From fadd16ce890a764146fa42b648be6240a7480400 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Thu, 15 Oct 2020 12:42:21 +0200 Subject: [PATCH 01/35] ReorgYolo reference implementation (#2384) * Align ReorgYolo to the spec (vector strides -> int stride) * ReorgYolo ref impl * ReorgYolo evaluate method * ReorgYolo tests * Tests update * Style apply * Add some coments * Code refactor * Comment update * Style apply * Build fix, mark evaluate as override * Revert "Align ReorgYolo to the spec (vector strides -> int stride)" * Use int_executable instead of evaluate * Use char* instead of templates * Code refactor * Comment update * Code review comment * Add constructor aligned with spec * Update shape validation * Update attributes tests * Add type_prop tests * Update backend tests * Add single layer tests * Update the spec * Remove wrong transformation test --- docs/ops/detection/ReorgYolo_1.md | 6 +- ...tract_image_patches_to_reorg_yolo_test.cpp | 33 +----- .../single_layer_tests/reorg_yolo.cpp | 75 +++++++++++++ .../include/single_layer_tests/reorg_yolo.hpp | 32 ++++++ .../src/single_layer_tests/reorg_yolo.cpp | 46 ++++++++ ngraph/core/include/ngraph/op/reorg_yolo.hpp | 5 +- .../ngraph/runtime/reference/reorg_yolo.hpp | 37 +++++++ .../src/runtime/reference/reorg_yolo.cpp | 89 +++++++++++++++ ngraph/core/src/op/reorg_yolo.cpp | 26 ++++- ngraph/test/CMakeLists.txt | 2 + ngraph/test/attributes.cpp | 20 +++- ngraph/test/backend/reorg_yolo.in.cpp | 101 ++++++++++++++++++ .../runtime/interpreter/int_executable.hpp | 11 ++ .../runtime/interpreter/opset_int_tbl.hpp | 1 + ngraph/test/type_prop/reorg_yolo.cpp | 97 +++++++++++++++++ 15 files changed, 540 insertions(+), 41 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/reorg_yolo.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/single_layer_tests/reorg_yolo.cpp create mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/reorg_yolo.hpp create mode 100644 ngraph/core/reference/src/runtime/reference/reorg_yolo.cpp create mode 100644 ngraph/test/backend/reorg_yolo.in.cpp create mode 100644 ngraph/test/type_prop/reorg_yolo.cpp diff --git a/docs/ops/detection/ReorgYolo_1.md b/docs/ops/detection/ReorgYolo_1.md index 25c4669e8b9a56..4801e5f750fbe2 100644 --- a/docs/ops/detection/ReorgYolo_1.md +++ b/docs/ops/detection/ReorgYolo_1.md @@ -22,7 +22,7 @@ **Inputs**: -* **1**: 4D input tensor of any type and shape `[N, C, H, W]`. `H` and `W` should be divisible by `stride`. Required. +* **1**: 4D input tensor of any type and shape `[N, C, H, W]`. `H` and `W` should be divisible by `stride` and `C >= (stride*stride)`. **Required.** **Outputs**: @@ -31,7 +31,7 @@ **Example** ```xml - + @@ -50,4 +50,4 @@ -``` \ No newline at end of file +``` diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_extract_image_patches_to_reorg_yolo_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_extract_image_patches_to_reorg_yolo_test.cpp index f9347e4f7adc69..062de2a477291a 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_extract_image_patches_to_reorg_yolo_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_extract_image_patches_to_reorg_yolo_test.cpp @@ -19,38 +19,7 @@ using namespace testing; -TEST(TransformationTests, ConvertExtractImagePatchesToReorgYoloTests1) { - std::shared_ptr f(nullptr), f_ref(nullptr); - { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 10, 10}); - - auto sizes = ngraph::Shape{5, 5}; - auto strides = ngraph::Strides{5, 5}; - auto rates = ngraph::Shape{1, 1}; - ngraph::op::PadType auto_pad = ngraph::op::PadType::VALID; - - auto eip = std::make_shared(input, sizes, strides, rates, auto_pad); - - f = std::make_shared(ngraph::NodeVector{eip}, ngraph::ParameterVector{input}); - - ngraph::pass::Manager manager; - manager.register_pass(); - manager.register_pass(); - manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); - } - - { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 10, 10}); - auto strides = ngraph::Strides{5, 5}; - auto reorg_yolo = std::make_shared(input, strides); - - f_ref = std::make_shared(ngraph::NodeVector{reorg_yolo}, ngraph::ParameterVector{input}); - } - - auto res = compare_functions(f, f_ref); - ASSERT_TRUE(res.first) << res.second; -} +// TODO: bug 39971, remove ConvertExtractImagePatchesToReorgYolo transformation TEST(TransformationTests, ConvertExtractImagePatchesToReorgYoloTestsNegative1) { std::shared_ptr f(nullptr), f_ref(nullptr); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/reorg_yolo.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/reorg_yolo.cpp new file mode 100644 index 00000000000000..03aa80811edb20 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/reorg_yolo.cpp @@ -0,0 +1,75 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/reorg_yolo.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +const std::vector inShapes_caffe_yolov2 = { + {1, 64, 26, 26}, +}; + +const std::vector inShapes = { + {1, 4, 4, 4}, + {1, 8, 4, 4}, + {1, 9, 3, 3}, + {1, 24, 34, 62}, + {2, 8, 4, 4}, +}; + +const std::vector strides = { + 2, 3 +}; + +const auto testCase_caffe_yolov2 = ::testing::Combine( + ::testing::ValuesIn(inShapes_caffe_yolov2), + ::testing::Values(strides[0]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_smallest = ::testing::Combine( + ::testing::Values(inShapes[0]), + ::testing::Values(strides[0]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_stride_2 = ::testing::Combine( + ::testing::Values(inShapes[1]), + ::testing::Values(strides[0]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_stride_3 = ::testing::Combine( + ::testing::Values(inShapes[2]), + ::testing::Values(strides[1]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_smaller_h = ::testing::Combine( + ::testing::Values(inShapes[4]), + ::testing::Values(strides[0]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_batch_2 = ::testing::Combine( + ::testing::Values(inShapes[3]), + ::testing::Values(strides[0]), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_caffe_YoloV2, ReorgYoloLayerTest, testCase_caffe_yolov2, ReorgYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_stride_2_smallest, ReorgYoloLayerTest, testCase_smallest, ReorgYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_stride_2, ReorgYoloLayerTest, testCase_stride_2, ReorgYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_stride_3, ReorgYoloLayerTest, testCase_stride_3, ReorgYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_smaller_h, ReorgYoloLayerTest, testCase_smaller_h, ReorgYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsReorgYolo_batch_2, ReorgYoloLayerTest, testCase_batch_2, ReorgYoloLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp new file mode 100644 index 00000000000000..1eab6b806b2465 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { + +using ReorgYoloParamsTuple = typename std::tuple< + ngraph::Shape, // Input Shape + size_t, // stride + InferenceEngine::Precision, // Network precision + std::string>; // Device name + +class ReorgYoloLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/reorg_yolo.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/reorg_yolo.cpp new file mode 100644 index 00000000000000..716e271d0d5dd2 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/reorg_yolo.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ie_core.hpp" + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +#include "single_layer_tests/reorg_yolo.hpp" + +namespace LayerTestsDefinitions { + +std::string ReorgYoloLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + ngraph::Shape inputShape; + size_t stride; + InferenceEngine::Precision netPrecision; + std::string targetName; + std::tie(inputShape, stride, netPrecision, targetName) = obj.param; + std::ostringstream result; + result << "IS=" << inputShape << "_"; + result << "stride=" << stride << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetName << "_"; + return result.str(); +} + +void ReorgYoloLayerTest::SetUp() { + ngraph::Shape inputShape; + size_t stride; + InferenceEngine::Precision netPrecision; + std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto param = std::make_shared(ngraph::element::f32, inputShape); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); +} + +TEST_P(ReorgYoloLayerTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions diff --git a/ngraph/core/include/ngraph/op/reorg_yolo.hpp b/ngraph/core/include/ngraph/op/reorg_yolo.hpp index 75d4d56e023906..e9b20f605bff74 100644 --- a/ngraph/core/include/ngraph/op/reorg_yolo.hpp +++ b/ngraph/core/include/ngraph/op/reorg_yolo.hpp @@ -33,7 +33,10 @@ namespace ngraph /// \brief Constructs a ReorgYolo operation /// /// \param input Input - /// \param strides Stride to reorganize input by + /// \param stride Stride to reorganize input by + ReorgYolo(const Output& input, const size_t stride); + + // Constructor with `strides` for backward compatibility ReorgYolo(const Output& input, const Strides& strides); void validate_and_infer_types() override; diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/reorg_yolo.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/reorg_yolo.hpp new file mode 100644 index 00000000000000..9de4e0147c9ed7 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/reorg_yolo.hpp @@ -0,0 +1,37 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include + +#include "ngraph/shape.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + void reorg_yolo(const char* arg, + char* out, + const Shape& in_shape, + int64_t stride, + const size_t elem_size); + } + } +} diff --git a/ngraph/core/reference/src/runtime/reference/reorg_yolo.cpp b/ngraph/core/reference/src/runtime/reference/reorg_yolo.cpp new file mode 100644 index 00000000000000..0ac2d79a122ad1 --- /dev/null +++ b/ngraph/core/reference/src/runtime/reference/reorg_yolo.cpp @@ -0,0 +1,89 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include +#include + +#include "ngraph/runtime/reference/reorg_yolo.hpp" +#include "ngraph/shape.hpp" + +using namespace ngraph; + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + void reorg_yolo(const char* arg, + char* out, + const Shape& in_shape, + int64_t stride, + const size_t elem_size) + { + // [N, C, H, W] + size_t in_N = in_shape[0]; + size_t in_C = in_shape[1]; + size_t in_H = in_shape[2]; + size_t in_W = in_shape[3]; + + // Inference output shape logic: + // in_shape [N,C,H,W] -> out_shape [N, C*(stride*stride), H/stride, W/stride] + // ReorgYolo implementation calculates new indices like for backprop: + // in_shape [N,C,H,W] -> out_shape [N, C/(stride*stride), H*stride, W*stride] + + size_t impl_out_C = in_C / (stride * stride); + if (impl_out_C == 0) + { + throw ngraph_error( + "ReorgYolo. For [N, C, H, W] input shape, C >= (stride*stride) is " + "required."); + } + size_t impl_out_H = in_H * stride; + size_t impl_out_W = in_W * stride; + + for (size_t n = 0; n < in_N; ++n) + { + for (size_t c = 0; c < in_C; ++c) + { + for (size_t h = 0; h < in_H; ++h) + { + for (size_t w = 0; w < in_W; ++w) + { + size_t offset = c / impl_out_C; + size_t impl_c = c % impl_out_C; + size_t impl_h = h * stride + offset / stride; + size_t impl_w = w * stride + offset % stride; + + size_t arg_index = + ((n * impl_out_C + impl_c) * impl_out_H + impl_h) * impl_out_W + + impl_w; + size_t dest_index = ((n * in_C + c) * in_H + h) * in_W + w; + + arg_index *= elem_size; + dest_index *= elem_size; + + std::copy(arg + arg_index, + arg + (arg_index + elem_size), + out + dest_index); + } + } + } + } + } + } + } +} diff --git a/ngraph/core/src/op/reorg_yolo.cpp b/ngraph/core/src/op/reorg_yolo.cpp index f25c145aa3c42d..d9ede137e59f9e 100644 --- a/ngraph/core/src/op/reorg_yolo.cpp +++ b/ngraph/core/src/op/reorg_yolo.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/reorg_yolo.hpp" +#include "ngraph/runtime/reference/reorg_yolo.hpp" using namespace std; using namespace ngraph; @@ -28,14 +29,37 @@ op::ReorgYolo::ReorgYolo(const Output& input, const Strides& strides) constructor_validate_and_infer_types(); } +op::ReorgYolo::ReorgYolo(const Output& input, const size_t stride) + : Op({input}) + , m_strides(std::vector{stride, stride}) +{ + constructor_validate_and_infer_types(); +} + void op::ReorgYolo::validate_and_infer_types() { + NODE_VALIDATION_CHECK(this, !m_strides.empty(), "Stride attribute is required."); + auto input_et = get_input_element_type(0); if (get_input_partial_shape(0).is_static()) { auto input_shape = get_input_partial_shape(0).to_shape(); - Shape output_shape{input_shape[0], input_shape[1]}; + NODE_VALIDATION_CHECK( + this, input_shape.size() == 4, "[N, C, H, W] input shape is required."); + + NODE_VALIDATION_CHECK(this, + (input_shape[2] % m_strides[0]) == 0, + "For [N, C, H, W] input shape, H should be divisible by stride."); + + NODE_VALIDATION_CHECK(this, + (input_shape[3] % m_strides[0]) == 0, + "For [N, C, H, W] input shape, W should be divisible by stride."); + NODE_VALIDATION_CHECK(this, + input_shape[1] >= (m_strides[0] * m_strides[0]), + "For [N, C, H, W] input shape, C >= (stride*stride) is required."); + + Shape output_shape{input_shape[0], input_shape[1]}; for (size_t i = 2; i < input_shape.size(); i++) { output_shape.push_back(input_shape[i] / m_strides[0]); diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 8969eb90cba6fb..6f46f1430d9103 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -157,6 +157,7 @@ set(SRC type_prop/read_value.cpp type_prop/reduce_l1.cpp type_prop/reduce_l2.cpp + type_prop/reorg_yolo.cpp type_prop/replace_slice.cpp type_prop/reshape.cpp type_prop/reverse.cpp @@ -325,6 +326,7 @@ set(MULTI_TEST_SRC backend/reduce_prod.in.cpp backend/reduce_sum.in.cpp backend/relu.in.cpp + backend/reorg_yolo.in.cpp backend/replace_slice.in.cpp backend/reshape.in.cpp backend/reverse_sequence.in.cpp diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index a96246601f832b..322c8605de7782 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -1323,14 +1323,26 @@ TEST(attributes, mvn_op) EXPECT_EQ(g_op->get_eps(), op->get_eps()); } -TEST(attributes, reorg_yolo_op) +TEST(attributes, reorg_yolo_op_stride) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); + const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); + + const auto op = make_shared(data, 2); + NodeBuilder builder(op); + const auto g_op = as_type_ptr(builder.create()); + + EXPECT_EQ(g_op->get_strides(), op->get_strides()); +} + +TEST(attributes, reorg_yolo_op_strides) +{ + FactoryRegistry::get().register_factory(); + const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); - const auto op = make_shared(data, Strides{2}); + const auto op = make_shared(data, Strides{2}); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = as_type_ptr(builder.create()); EXPECT_EQ(g_op->get_strides(), op->get_strides()); } diff --git a/ngraph/test/backend/reorg_yolo.in.cpp b/ngraph/test/backend/reorg_yolo.in.cpp new file mode 100644 index 00000000000000..0389a2c4b25cc4 --- /dev/null +++ b/ngraph/test/backend/reorg_yolo.in.cpp @@ -0,0 +1,101 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include +#include +#include +#include +#include +#include + +// clang-format off +#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#endif + +#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#endif +// clang-format on + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_2) +{ + // in_shape [N,C,H,W] + const auto in_shape = Shape{1, 8, 4, 4}; + auto p = make_shared(element::f32, in_shape); + size_t stride = 2; + auto reorg_yolo = make_shared(p, Strides{stride}); + auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); + + std::vector inputs(128); + std::iota(inputs.begin(), inputs.end(), 0); + std::vector expected_result{ + 0, 2, 4, 6, 16, 18, 20, 22, 32, 34, 36, 38, 48, 50, 52, 54, + 64, 66, 68, 70, 80, 82, 84, 86, 96, 98, 100, 102, 112, 114, 116, 118, + 1, 3, 5, 7, 17, 19, 21, 23, 33, 35, 37, 39, 49, 51, 53, 55, + 65, 67, 69, 71, 81, 83, 85, 87, 97, 99, 101, 103, 113, 115, 117, 119, + 8, 10, 12, 14, 24, 26, 28, 30, 40, 42, 44, 46, 56, 58, 60, 62, + 72, 74, 76, 78, 88, 90, 92, 94, 104, 106, 108, 110, 120, 122, 124, 126, + 9, 11, 13, 15, 25, 27, 29, 31, 41, 43, 45, 47, 57, 59, 61, 63, + 73, 75, 77, 79, 89, 91, 93, 95, 105, 107, 109, 111, 121, 123, 125, 127}; + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{ + in_shape[0], in_shape[1] * stride * stride, in_shape[2] / stride, in_shape[3] / stride}; + + auto test_case = test::TestCase(fun); + test_case.add_input(inputs); + test_case.add_expected_output(expected_shape, expected_result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_3) +{ + // in_shape [N,C,H,W] + const auto in_shape = Shape{1, 9, 3, 3}; + auto p = make_shared(element::f32, in_shape); + size_t stride = 3; + auto reorg_yolo = make_shared(p, Strides{stride}); + auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); + + std::vector inputs(81); + std::iota(inputs.begin(), inputs.end(), 0); + std::vector expected_result{ + 0, 3, 6, 27, 30, 33, 54, 57, 60, 1, 4, 7, 28, 31, 34, 55, 58, 61, 2, 5, 8, + 29, 32, 35, 56, 59, 62, 9, 12, 15, 36, 39, 42, 63, 66, 69, 10, 13, 16, 37, 40, 43, + 64, 67, 70, 11, 14, 17, 38, 41, 44, 65, 68, 71, 18, 21, 24, 45, 48, 51, 72, 75, 78, + 19, 22, 25, 46, 49, 52, 73, 76, 79, 20, 23, 26, 47, 50, 53, 74, 77, 80}; + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{ + in_shape[0], in_shape[1] * stride * stride, in_shape[2] / stride, in_shape[3] / stride}; + + auto test_case = test::TestCase(fun); + test_case.add_input(inputs); + test_case.add_expected_output(expected_shape, expected_result); + test_case.run(); +} diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index a1306684f3d088..0070aaab1dd75f 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -78,6 +78,7 @@ #include "ngraph/runtime/reference/product.hpp" #include "ngraph/runtime/reference/quantize.hpp" #include "ngraph/runtime/reference/relu.hpp" +#include "ngraph/runtime/reference/reorg_yolo.hpp" #include "ngraph/runtime/reference/replace_slice.hpp" #include "ngraph/runtime/reference/reshape.hpp" #include "ngraph/runtime/reference/result.hpp" @@ -932,6 +933,16 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ pbox->get_attrs()); break; } + case OP_TYPEID::ReorgYolo_v0: + { + const op::v0::ReorgYolo* reorg_yolo = static_cast(&node); + runtime::reference::reorg_yolo(args[0]->get_data_ptr(), + out[0]->get_data_ptr(), + args[0]->get_shape(), + reorg_yolo->get_strides().at(0), + args[0]->get_element_type().size()); + break; + } case OP_TYPEID::Quantize: { const op::Quantize* quantize = static_cast(&node); diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index b25b37a400487c..de33cda40beaa4 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -21,6 +21,7 @@ #define ID_SUFFIX(NAME) NAME##_v0 NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) NGRAPH_OP(DetectionOutput, op::v0) +NGRAPH_OP(ReorgYolo, op::v0) NGRAPH_OP(RNNCell, op::v0) #undef ID_SUFFIX diff --git a/ngraph/test/type_prop/reorg_yolo.cpp b/ngraph/test/type_prop/reorg_yolo.cpp new file mode 100644 index 00000000000000..c132d1fc9ed230 --- /dev/null +++ b/ngraph/test/type_prop/reorg_yolo.cpp @@ -0,0 +1,97 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, reorg_yolo_stride_2) +{ + const auto in_shape = Shape{1, 64, 26, 26}; + size_t stride = 2; + auto data_param = make_shared(element::f32, in_shape); + auto reorg_yolo = make_shared(data_param, stride); + + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{1, 256, 13, 13}; + + EXPECT_EQ(reorg_yolo->get_output_shape(0), expected_shape); +} + +TEST(type_prop, reorg_yolo_stride_2_batch_2) +{ + const auto in_shape = Shape{2, 64, 26, 26}; + size_t stride = 2; + auto data_param = make_shared(element::f32, in_shape); + auto reorg_yolo = make_shared(data_param, stride); + + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{2, 256, 13, 13}; + + EXPECT_EQ(reorg_yolo->get_output_shape(0), expected_shape); +} + +TEST(type_prop, reorg_yolo_stride_2_smaller_H) +{ + const auto in_shape = Shape{1, 24, 34, 62}; + size_t stride = 2; + auto data_param = make_shared(element::f32, in_shape); + auto reorg_yolo = make_shared(data_param, stride); + + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{1, 96, 17, 31}; + EXPECT_EQ(reorg_yolo->get_output_shape(0), expected_shape); +} + +TEST(type_prop, reorg_yolo_stride_3) +{ + const auto in_shape = Shape{1, 9, 3, 3}; + size_t stride = 3; + auto data_param = make_shared(element::f32, in_shape); + auto reorg_yolo = make_shared(data_param, stride); + + // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] + Shape expected_shape = Shape{ + in_shape[0], in_shape[1] * stride * stride, in_shape[2] / stride, in_shape[3] / stride}; + + EXPECT_EQ(reorg_yolo->get_output_shape(0), expected_shape); +} + +TEST(type_prop, reorg_yolo_catch_small_shape_stride) +{ + const auto in_shape = Shape{1, 1, 4, 4}; + size_t stride = 2; + auto data_param = make_shared(element::f32, in_shape); + try + { + // Throw error test: For [N, C, H, W] input shape, C >= (stride*stride) is required. + auto reorg_yolo = make_shared(data_param, stride); + + // Should have thrown, so fail if it didn't + FAIL() << "Incompatible stride was not detected."; + } + catch (const ngraph_error& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("stride")); + } + catch (...) + { + FAIL() << "Stride size check failed for unexpected reason."; + } +} From dda71338dd15f77a46e85ab8f6c2ea0f5c26e25a Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Thu, 15 Oct 2020 15:47:20 +0200 Subject: [PATCH 02/35] Update test_shufflenetv2 tolerance values (#2674) * Update test_shufflenetv2 tolerance values * Update model path * Update models xfails * Update mounting volumes to container Co-authored-by: rblaczko --- .ci/openvino-onnx/Jenkinsfile | 4 +++- ngraph/python/tests/test_onnx/test_zoo_models.py | 4 +--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/openvino-onnx/Jenkinsfile b/.ci/openvino-onnx/Jenkinsfile index d0e3f4932869e4..62721999d001ee 100644 --- a/.ci/openvino-onnx/Jenkinsfile +++ b/.ci/openvino-onnx/Jenkinsfile @@ -69,7 +69,9 @@ def buildDockerImage() { def runTests() { sh """ docker run --name ${DOCKER_CONTAINER_NAME} \ - --volume ${HOME}/ONNX_CI/onnx-models/.onnx:/root/.onnx ${DOCKER_IMAGE_TAG} + --volume ${HOME}/ONNX_CI/onnx-models-15-Oct/.onnx/model_zoo:/root/.onnx/model_zoo \ + --volume ${HOME}/ONNX_CI/onnx-models/.onnx/model_zoo/MSFT:/root/.onnx/model_zoo/MSFT \ + ${DOCKER_IMAGE_TAG} """ } diff --git a/ngraph/python/tests/test_onnx/test_zoo_models.py b/ngraph/python/tests/test_onnx/test_zoo_models.py index d5de6f26d0ab1a..b6a702e90cd3ef 100644 --- a/ngraph/python/tests/test_onnx/test_zoo_models.py +++ b/ngraph/python/tests/test_onnx/test_zoo_models.py @@ -81,6 +81,7 @@ "rain-princess-9": {"atol": 0.001, "rtol": 0.001}, "udnie-8": {"atol": 0.001, "rtol": 0.001}, "udnie-9": {"atol": 0.001, "rtol": 0.001}, + "test_shufflenetv2": {"atol": 1e-05, "rtol": 0.001}, } zoo_models = [] @@ -111,11 +112,9 @@ import_xfail_list = [ # ONNX Model Zoo (xfail_issue_38701, "test_onnx_model_zoo_text_machine_comprehension_bidirectional_attention_flow_model_bidaf_9_bidaf_bidaf_cpu"), - (xfail_issue_39682, "test_onnx_model_zoo_vision_classification_mnist_model_mnist_1_mnist_model_cpu"), (xfail_issue_37687, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"), (xfail_issue_37687, "test_onnx_model_zoo_vision_object_detection_segmentation_yolov3_model_yolov3_10_yolov3_yolov3_cpu"), (xfail_issue_37687, "test_onnx_model_zoo_vision_object_detection_segmentation_tiny_yolov3_model_tiny_yolov3_11_yolov3_tiny_cpu"), - (xfail_issue_39683, "test_onnx_model_zoo_vision_object_detection_segmentation_tiny_yolov2_model_tinyyolov2_1_tiny_yolov2_model_cpu"), (xfail_issue_38726, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"), # Model MSFT @@ -145,7 +144,6 @@ (xfail_issue_35926, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"), (xfail_issue_35926, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"), (xfail_issue_35926, "test_onnx_model_zoo_text_machine_comprehension_gpt_2_model_gpt2_lm_head_10_GPT_2_LM_HEAD_model_cpu"), - (xfail_issue_36537, "test_onnx_model_zoo_vision_classification_shufflenet_model_shufflenet_v2_10_model_test_shufflenetv2_model_cpu"), (xfail_issue_36537, "test_onnx_model_zoo_vision_classification_efficientnet_lite4_model_efficientnet_lite4_11_efficientnet_lite4_efficientnet_lite4_cpu"), (xfail_issue_39685, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_sequence_classification_9_roberta_sequence_classification_9_roberta_sequence_classification_9_cpu"), (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), From 7cead20209b3a4290eb059de90b21a9a2df2837d Mon Sep 17 00:00:00 2001 From: Tomasz Socha Date: Thu, 15 Oct 2020 19:04:43 +0200 Subject: [PATCH 03/35] [ONNX] Replace global poolings with reduce operations (#2650) --- .../onnx_import/utils/pooling_factory.hpp | 12 ------- .../src/op/global_average_pool.cpp | 36 +++++++++++++++++-- .../onnx_import/src/op/global_max_pool.cpp | 36 +++++++++++++++++-- .../onnx_import/src/utils/pooling_factory.cpp | 20 ----------- 4 files changed, 66 insertions(+), 38 deletions(-) diff --git a/ngraph/frontend/onnx_import/include/onnx_import/utils/pooling_factory.hpp b/ngraph/frontend/onnx_import/include/onnx_import/utils/pooling_factory.hpp index c4636d4428d1a7..e882d85812e93c 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/utils/pooling_factory.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/utils/pooling_factory.hpp @@ -86,18 +86,6 @@ namespace ngraph virtual ~LocalPoolingFactory() = default; }; - /// - /// \brief Factory class which generates sub-graphs for ONNX 'global' pooling - /// operators. - /// \note In a 'global' pooling operation, the kernel shape is calculated - /// based on spatial dims - class GlobalPoolingFactory : public PoolingFactory - { - public: - explicit GlobalPoolingFactory(const Node& node); - virtual ~GlobalPoolingFactory() = default; - }; - } // namespace pooling } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp index 7ea27bce89c3ef..30b6d4b317f6b0 100644 --- a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp @@ -14,10 +14,12 @@ // limitations under the License. //***************************************************************************** +#include +#include + #include "global_average_pool.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/avg_pool.hpp" -#include "onnx_import/utils/pooling_factory.hpp" +#include "onnx_import/default_opset.hpp" namespace ngraph { @@ -29,7 +31,35 @@ namespace ngraph { OutputVector global_average_pool(const Node& node) { - return pooling::GlobalPoolingFactory(node).make_avg_pool(); + auto data = node.get_ng_inputs()[0]; + auto data_rank = data.get_partial_shape().rank(); + + NGRAPH_CHECK(data_rank.is_static(), + "The input data tensor's rank has to be known (static)"); + + auto data_rank_value = data_rank.get_length(); + + NGRAPH_CHECK(data_rank_value > 2, + "The input data tensor's rank has to be greater than 2." + "Provided data rank is: ", + data_rank_value); + + // Generate axes for reduce operation which contain all spatial dims indexes. + // Examples: + // Input shape: [N, C, H, W] + // Input spatial dimensions are H and W + // Expected spatial dims indexes: [2, 3] + // + // Input shape: [N, C, H, W, D] + // Input spatial dimensions are H, W and D + // Expected spatial dims indexes: [2, 3, 4] + uint64_t data_spatial_rank = data_rank_value - 2; + auto reduce_axes_vector = std::vector(data_spatial_rank); + std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); + auto reduce_axes = default_opset::Constant::create( + element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp index 79dee54f8c239c..53af9d601142c3 100644 --- a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp @@ -14,10 +14,12 @@ // limitations under the License. //***************************************************************************** +#include +#include + #include "global_max_pool.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/max_pool.hpp" -#include "onnx_import/utils/pooling_factory.hpp" +#include "onnx_import/default_opset.hpp" namespace ngraph { @@ -29,7 +31,35 @@ namespace ngraph { OutputVector global_max_pool(const Node& node) { - return pooling::GlobalPoolingFactory(node).make_max_pool(); + auto data = node.get_ng_inputs()[0]; + auto data_rank = data.get_partial_shape().rank(); + + NGRAPH_CHECK(data_rank.is_static(), + "The input data tensor's rank has to be known (static)"); + + auto data_rank_value = data_rank.get_length(); + + NGRAPH_CHECK(data_rank_value > 2, + "The input data tensor's rank has to be greater than 2." + "Provided data rank is: ", + data_rank_value); + + // Generate axes for reduce operation which contain all spatial dims indexes. + // Examples: + // Input shape: [N, C, H, W] + // Input spatial dimensions are H and W + // Expected spatial dims indexes: [2, 3] + // + // Input shape: [N, C, H, W, D] + // Input spatial dimensions are H, W and D + // Expected spatial dims indexes: [2, 3, 4] + uint64_t data_spatial_rank = data_rank_value - 2; + auto reduce_axes_vector = std::vector(data_spatial_rank); + std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); + auto reduce_axes = default_opset::Constant::create( + element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + + return {std::make_shared(data, reduce_axes, true)}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/utils/pooling_factory.cpp b/ngraph/frontend/onnx_import/src/utils/pooling_factory.cpp index 766120dd2cd20a..09b83ae41f8407 100644 --- a/ngraph/frontend/onnx_import/src/utils/pooling_factory.cpp +++ b/ngraph/frontend/onnx_import/src/utils/pooling_factory.cpp @@ -73,26 +73,6 @@ namespace ngraph // Kernel shape is required m_kernel_shape = node.get_attribute_value>("kernel_shape"); } - - GlobalPoolingFactory::GlobalPoolingFactory(const Node& node) - : PoolingFactory(node) - { - const auto data_shape = node.get_ng_inputs().at(0).get_partial_shape(); - const auto data_rank = data_shape.rank(); - CHECK_VALID_NODE( - node, data_rank.is_static(), "Data rank must be static for global pooling ops"); - Shape kernel_shape; - for (auto i = 2; i < data_rank.get_length(); ++i) - { - CHECK_VALID_NODE(node, - data_shape[i].is_static(), - "All spatial dimensions must be known for global pooling ops"); - kernel_shape.emplace_back(data_shape[i].get_length()); - } - - // Set shape to all but {N,C} axes. - m_kernel_shape = kernel_shape; - } } // namespace pooling } // namespace onnx_import } // namespace ngraph From 6ae28bdd62438d2beb68388cd32e47177af34cbd Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Thu, 15 Oct 2020 21:39:42 +0300 Subject: [PATCH 04/35] Broadcast visit_attributes fix (#2649) * Broadcast visit_attributes fix * Broadcast: Python API adjustment * Revert back deserialization change --- ngraph/core/src/op/broadcast.cpp | 4 ++-- ngraph/python/src/ngraph/opset3/ops.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 6df504f504eac3..c5992ca7ac934d 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -205,7 +205,7 @@ shared_ptr op::v3::Broadcast::clone_with_new_inputs(const OutputVector& ne bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor) { - visitor.on_attribute("broadcast_spec", m_mode); + visitor.on_attribute("mode", m_mode); return true; } @@ -286,7 +286,7 @@ shared_ptr op::v1::Broadcast::clone_with_new_inputs(const OutputVector& ne bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) { - visitor.on_attribute("broadcast_spec", m_broadcast_spec); + visitor.on_attribute("mode", m_broadcast_spec); return true; } diff --git a/ngraph/python/src/ngraph/opset3/ops.py b/ngraph/python/src/ngraph/opset3/ops.py index 1621a3320a11a2..cb3f1a34092c46 100644 --- a/ngraph/python/src/ngraph/opset3/ops.py +++ b/ngraph/python/src/ngraph/opset3/ops.py @@ -97,7 +97,7 @@ def broadcast( if broadcast_spec.upper() == "EXPLICIT": inputs.append(as_node(axes_mapping)) return _get_node_factory_opset3().create( - "Broadcast", inputs, {"broadcast_spec": broadcast_spec.upper()} + "Broadcast", inputs, {"mode": broadcast_spec.upper()} ) From db85069713edad5134da1d99b6405f926fb0e7df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= <4430709+postrational@users.noreply.github.com> Date: Thu, 15 Oct 2020 22:15:33 +0200 Subject: [PATCH 05/35] Update nGraph Python API build instructions (#2610) --- ngraph/python/BUILDING.md | 193 +++++++++++++++++++++++++++----------- 1 file changed, 140 insertions(+), 53 deletions(-) diff --git a/ngraph/python/BUILDING.md b/ngraph/python/BUILDING.md index 3b1b9441bb6b43..98ad3be60f1e0d 100644 --- a/ngraph/python/BUILDING.md +++ b/ngraph/python/BUILDING.md @@ -1,76 +1,158 @@ # Building the Python API for nGraph -## Building nGraph Python Wheels +You can build the nGraph Python API from sources by following instructions in this document. A Python wheel is a +portable package which will allow you to install nGraph in your Python distribution, or dedicated virtual environment. -If you want to try a newer version of nGraph's Python API than is available -from PyPI, you can build the latest version from source code. This process is -very similar to what is outlined in our [ngraph_build] instructions with two -important differences: +## Build nGraph Python Wheels on Linux or MacOS -1. You must specify: `-DNGRAPH_PYTHON_BUILD_ENABLE=ON` and `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` - when running `cmake`. +### Prerequisites -2. Instead of running `make`, use the command `make python_wheel`. +In order to build the nGraph Python wheel, you will need to install a few packages. - `$ cmake ../ -DNGRAPH_PYTHON_BUILD_ENABLE=ON -DNGRAPH_ONNX_IMPORT_ENABLE=ON` +On Ubuntu 20.04 LTS you can use the following instructions to install the required packages, including Python and Cython. - `$ make python_wheel` + apt install git wget build-essential cmake + apt install python3 python3-dev python3-pip python3-virtualenv python-is-python3 -After this procedure completes, the `ngraph/build/python/dist` directory should -contain the Python packages of the version you cloned. For example, if you -checked out and built `0.21` for Python 3.7, you might see something like: +You can see a full working example on an Ubuntu environment used in our continuous environment in this +[Dockerfile](https://github.com/openvinotoolkit/openvino/blob/master/.ci/openvino-onnx/Dockerfile). - $ ls python/dist/ - ngraph-core-0.21.0rc0.tar.gz - ngraph_core-0.21.0rc0-cp37-cp37m-linux_x86_64.whl +On MacOS you can use [Homebrew](https://brew.sh) to install required packages: -## Building nGraph Python Wheels on Windows + brew install cmake + brew install automake + brew install libtool + brew install python3 -The build process on Windows consists of 3 steps: +Install Cython in the Python installation, or virtualenv which you are planning to use: + + pip3 install cython + +### Configure, build and install OpenVINO + +The following section will illustrate how to download, build and install OpenVINO in a workspace directory specified +by the `${MY_OPENVINO_BASEDIR}` variable. Let's start by setting this variable to a directory of your choice: + + export MY_OPENVINO_BASEDIR=/path/to/my/workspace + +Now we can clone OpenVINO, configure it using `cmake` and build using `make`. Please note that we're disabling +the building of a few modules by setting the `ENABLE_*` flag to `OFF`. In order to build the OpenVINO Python APIs +set the mentioned flags to `ON`. Note the `CMAKE_INSTALL_PREFIX`, which defaults to `/usr/local/` if not set. + + cd "${MY_OPENVINO_BASEDIR}" + git clone --recursive https://github.com/openvinotoolkit/openvino.git + mkdir openvino/build + cd openvino/build + + cmake .. \ + -DENABLE_CLDNN=OFF \ + -DENABLE_OPENCV=OFF \ + -DENABLE_VPU=OFF \ + -DENABLE_PYTHON=ON \ + -DNGRAPH_PYTHON_BUILD_ENABLE=ON \ + -DNGRAPH_ONNX_IMPORT_ENABLE=ON \ + -DCMAKE_INSTALL_PREFIX="${MY_OPENVINO_BASEDIR}/openvino_dist" + + make -j 4 + make install + +If you would like to use a specific version of Python, or use a virtual environment you can set the `PYTHON_EXECUTABLE` +variable. Examples: + +``` +-DPYTHON_EXECUTABLE=/path/to/venv/bin/python +-DPYTHON_EXECUTABLE=$(which python3.8) +``` + +### Build nGraph Python wheel + +When OpenVINO is built and installed, we can build the Python wheel by issuing the following command: + + make python_wheel + +Once completed, the wheel package should be located under the following path: + + $ ls "${MY_OPENVINO_BASEDIR}/openvino/ngraph/python/dist/" + ngraph_core-0.0.0-cp38-cp38-linux_x86_64.whl + +You can now install the wheel in your Python environment: + + cd "${MY_OPENVINO_BASEDIR}/openvino/ngraph/python/dist/" + pip3 install ngraph_core-0.0.0-cp38-cp38-linux_x86_64.whl + +#### What does `make python_wheel` do? + +The `python_wheel` target automates a few steps, required to build the wheel package. You can recreate the process +manually by issuing the following commands: + + cd "${MY_OPENVINO_BASEDIR}/openvino/ngraph/python" + git clone --branch v2.5.0 https://github.com/pybind/pybind11.git + export NGRAPH_CPP_BUILD_PATH="${MY_OPENVINO_BASEDIR}/openvino_dist" + python3 setup.py bdist_wheel + + +## Build nGraph Python Wheels on Windows + +### Prerequisites + +In order to build OpenVINO and the nGraph Python wheel on Windows, you will need to install Visual Studio and Python. + +Once Python is installed, you will also need to install Cython using `pip install cython`. + +### Configure, build and install OpenVINO + +Configure the build with a `cmake` invocation similar to the following. Note that you'll need to set the `-G` and +`-DCMAKE_CXX_COMPILER` to match the version and location of your Visual Studio installation. + +``` +cmake .. ^ + -G"Visual Studio 16 2019" ^ + -DCMAKE_BUILD_TYPE=Release ^ + -DCMAKE_INSTALL_PREFIX="C:\temporary_install_dir" ^ + -DENABLE_CLDNN=OFF ^ + -DENABLE_OPENCV=OFF ^ + -DENABLE_VPU=OFF ^ + -DNGRAPH_PYTHON_BUILD_ENABLE=ON ^ + -DNGRAPH_ONNX_IMPORT_ENABLE=ON ^ + -DENABLE_PYTHON=ON ^ + -DCMAKE_CXX_COMPILER="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\Hostx64\x64" + +``` -1. Configure the build with the following `cmake` invocation: -~~~~ -cmake .. - -G"Visual Studio 15 2017 Win64" - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_INSTALL_PREFIX="C:\temporary_install_dir" - -DNGRAPH_PYTHON_BUILD_ENABLE=TRUE - -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE - -DCMAKE_CXX_COMPILER="C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -~~~~ There are a couple of things to notice here. One is that the full path to the x64 version of MSVC compiler has to be specified. This is because DNNL requires a 64-bit version and cmake may fail to detect it correctly. + The other equally important thing to note is that the temporary directory where the build is to be installed can be specified. If the installation directory is not specified, the default location is `C:\Program Files\OpenVINO`. This examples uses `C:\temporary_install_dir` however, a subdirectory of `openvino\build` works as well. -The final Python wheel will contain the contents of this temporary directory so it's very important to set it. +The final Python wheel will contain the contents of this temporary directory so it's important to set it. -To specify an exact Python version, use the following options: -~~~~ --DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" --DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" --DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include" -~~~~ +If you want to specify an exact Python version, use the following options: +``` +-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^ +-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^ +-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include" ^ +``` -2. Build the `install` target: +In order to build and install OpenVINO, build the `install` target: - `cmake --build . --target install --config Release -j 8` + cmake --build . --target install --config Release -j 8 -In this step nGraph will be built and installed to the temporary directory specified above. You can +In this step OpenVINO will be built and installed to the directory specified above. You can adjust the number of threads used in the building process to your machine's capabilities. -3. Build the Python wheel itself: +Build the Python wheel package: - `cmake --build . --target python_wheel --config Release -j 8` + cmake --build . --target python_wheel --config Release -j 8 The final wheel should be located in `ngraph\python\dist` directory. -4. Configure the environment for the Inference Engine Python API: + dir openvino\ngraph\python\dist\ + 10/09/2020 04:06 PM 4,010,943 ngraph_core-0.0.0-cp38-cp38-win_amd64.whl - `call \bin\setupvars.bat` -**NOTE**: Skip this step if you want to use the nGraph Wheel by itself. This step is required for most usage scenarios, like running unit tests. +## Run tests ### Using a virtualenv (optional) @@ -80,21 +162,26 @@ You may wish to use a virutualenv for your installation. $ source venv/bin/activate (venv) $ -### Installing the wheel +### Install the nGraph wheel and other requirements -You may wish to use a virutualenv for your installation. + (venv) $ cd "${MY_OPENVINO_BASEDIR}/openvino/ngraph/python" + (venv) $ pip3 install -r requirements.txt + (venv) $ pip3 install -r requirements_test.txt + (venv) $ pip3 install dist/ngraph_core-0.0.0-cp38-cp38-linux_x86_64.whl + +### Run tests - (venv) $ pip install ngraph/build/python/dist/ngraph_core-0.21.0rc0-cp37-cp37m-linux_x86_64.whl +You should now be able to run tests. -## Running tests +You may need to run the `setupvars` script from OpenVINO to set paths to OpenVINO components. -Unit tests require additional packages be installed: + source ${MY_OPENVINO_BASEDIR}/openvino/scripts/setupvars/setupvars.sh - (venv) $ cd ngraph/python - (venv) $ pip install -r test_requirements.txt +The minimum requirement is to set the `PYTHONPATH` to include the Inference Engine Python API: -Then run tests: + export PYTHONPATH="${MY_OPENVINO_BASEDIR}/openvino/bin/intel64/Release/lib/python_api/python3.8/":${PYTHONPATH} + pytest tests/ - (venv) $ pytest tests +Now you can run tests using `pytest`: -[ngraph_build]: http://ngraph.nervanasys.com/docs/latest/buildlb.html + pytest tests From c9b16a79f57957ef494c51c2e25c7985fbf3e45a Mon Sep 17 00:00:00 2001 From: Gabriele Galiero Casay Date: Thu, 15 Oct 2020 22:30:12 +0200 Subject: [PATCH 06/35] Reference Implementation for RegionYolo operator (#2474) --- .../single_layer_tests/region_yolo.cpp | 85 +++++++++ .../single_layer_tests/region_yolo.hpp | 38 ++++ .../src/single_layer_tests/region_yolo.cpp | 63 +++++++ ngraph/core/include/ngraph/op/region_yolo.hpp | 6 +- .../ngraph/runtime/reference/region_yolo.hpp | 175 ++++++++++++++++++ ngraph/core/src/op/region_yolo.cpp | 6 + ngraph/test/CMakeLists.txt | 1 + ngraph/test/attributes.cpp | 2 +- ngraph/test/backend/region_yolo.in.cpp | 86 +++++++++ ngraph/test/files/region_in_yolov2_caffe.data | Bin 0 -> 84500 bytes ngraph/test/files/region_in_yolov3_mxnet.data | Bin 0 -> 307200 bytes .../test/files/region_out_yolov2_caffe.data | Bin 0 -> 84500 bytes .../test/files/region_out_yolov3_mxnet.data | Bin 0 -> 307200 bytes ngraph/test/runtime/ie/unit_test.manifest | 2 + .../runtime/interpreter/int_executable.hpp | 14 ++ .../runtime/interpreter/opset_int_tbl.hpp | 1 + 16 files changed, 475 insertions(+), 4 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/region_yolo.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp create mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp create mode 100644 ngraph/test/backend/region_yolo.in.cpp create mode 100644 ngraph/test/files/region_in_yolov2_caffe.data create mode 100644 ngraph/test/files/region_in_yolov3_mxnet.data create mode 100644 ngraph/test/files/region_out_yolov2_caffe.data create mode 100644 ngraph/test/files/region_out_yolov3_mxnet.data diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/region_yolo.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/region_yolo.cpp new file mode 100644 index 00000000000000..eb2e2807ffe8e1 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/region_yolo.cpp @@ -0,0 +1,85 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/region_yolo.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +const std::vector inShapes_caffe = { + {1, 125, 13, 13} +}; + +const std::vector inShapes_mxnet = { + {1, 75, 52, 52}, + {1, 75, 32, 32}, + {1, 75, 26, 26}, + {1, 75, 16, 16}, + {1, 75, 13, 13}, + {1, 75, 8, 8} +}; + +const std::vector inShapes_v3 = { + {1, 255, 52, 52}, + {1, 255, 26, 26}, + {1, 255, 13, 13} +}; + +const std::vector> masks = { + {0, 1, 2}, + {3, 4, 5}, + {6, 7, 8} +}; + +const std::vector do_softmax = {true, false}; +const std::vector classes = {80, 20}; +const std::vector num_regions = {5, 9}; +const size_t coords = 4; +const int start_axis = 1; +const int end_axis = 3; + +const auto testCase_yolov3 = ::testing::Combine( + ::testing::ValuesIn(inShapes_v3), + ::testing::Values(classes[0]), + ::testing::Values(coords), + ::testing::Values(num_regions[1]), + ::testing::Values(do_softmax[1]), + ::testing::Values(masks[2]), + ::testing::Values(start_axis), + ::testing::Values(end_axis), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_yolov3_mxnet = ::testing::Combine( + ::testing::ValuesIn(inShapes_mxnet), + ::testing::Values(classes[1]), + ::testing::Values(coords), + ::testing::Values(num_regions[1]), + ::testing::Values(do_softmax[1]), + ::testing::Values(masks[1]), + ::testing::Values(start_axis), + ::testing::Values(end_axis), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto testCase_yolov2_caffe = ::testing::Combine( + ::testing::ValuesIn(inShapes_caffe), + ::testing::Values(classes[1]), + ::testing::Values(coords), + ::testing::Values(num_regions[0]), + ::testing::Values(do_softmax[0]), + ::testing::Values(masks[0]), + ::testing::Values(start_axis), + ::testing::Values(end_axis), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYolov3, RegionYoloLayerTest, testCase_yolov3, RegionYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYoloMxnet, RegionYoloLayerTest, testCase_yolov3_mxnet, RegionYoloLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYoloCaffe, RegionYoloLayerTest, testCase_yolov2_caffe, RegionYoloLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp new file mode 100644 index 00000000000000..c8d74f6003f534 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { + +using regionYoloParamsTuple = std::tuple< + ngraph::Shape, // Input Shape + size_t, // classes + size_t, // coordinates + size_t, // num regions + bool, // do softmax + std::vector, // mask + int, // start axis + int, // end axis + InferenceEngine::Precision, // Network precision + std::string>; // Device name + +class RegionYoloLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp new file mode 100644 index 00000000000000..968909418bcfb2 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ie_core.hpp" + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +#include "single_layer_tests/region_yolo.hpp" + +namespace LayerTestsDefinitions { + +std::string RegionYoloLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + ngraph::Shape inputShape; + size_t classes; + size_t coords; + size_t num_regions; + bool do_softmax; + std::vector mask; + int start_axis; + int end_axis; + InferenceEngine::Precision netPrecision; + std::string targetName; + std::tie(inputShape, classes, coords, num_regions, do_softmax , mask, start_axis, end_axis, netPrecision, targetName) = obj.param; + std::ostringstream result; + result << "IS=" << inputShape << "_"; + result << "classes=" << classes << "_"; + result << "coords=" << coords << "_"; + result << "num=" << num_regions << "_"; + result << "doSoftmax=" << do_softmax << "_"; + result << "axis=" << start_axis << "_"; + result << "endAxis=" << end_axis << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetName << "_"; + return result.str(); +} + +void RegionYoloLayerTest::SetUp() { + ngraph::Shape inputShape; + size_t classes; + size_t coords; + size_t num_regions; + bool do_softmax; + std::vector mask; + int start_axis; + int end_axis; + InferenceEngine::Precision netPrecision; + std::tie(inputShape, classes, coords, num_regions, do_softmax, mask, start_axis, end_axis, netPrecision, targetDevice) = this->GetParam(); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto param = std::make_shared(ngraph::element::f32, inputShape); + auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); + function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); +} + +TEST_P(RegionYoloLayerTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/ngraph/core/include/ngraph/op/region_yolo.hpp b/ngraph/core/include/ngraph/op/region_yolo.hpp index 8dfdbb8e66db30..b7d9181a96894d 100644 --- a/ngraph/core/include/ngraph/op/region_yolo.hpp +++ b/ngraph/core/include/ngraph/op/region_yolo.hpp @@ -79,7 +79,7 @@ namespace ngraph int m_axis; int m_end_axis; }; - } + } // namespace v0 using v0::RegionYolo; - } -} + } // namespace op +} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp new file mode 100644 index 00000000000000..2ca3f324e4f937 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp @@ -0,0 +1,175 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include + +#include "ngraph/shape.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + static inline int entry_index(int width, + int height, + int coords, + int classes, + int outputs, + int batch, + int location, + int entry) + { + int n = location / (width * height); + int loc = location % (width * height); + return batch * outputs + n * width * height * (coords + classes + 1) + + entry * width * height + loc; + } + + template + static inline T sigmoid(float x) + { + return static_cast(1.f / (1.f + std::exp(-x))); + } + template + static inline void softmax_generic( + const T* src_data, T* dst_data, int batches, int channels, int height, int width) + { + const int area = height * width; + for (unsigned int batch_idx = 0; batch_idx < batches; batch_idx++) + { + const int offset = batch_idx * channels * area; + for (unsigned int i = 0; i < height * width; i++) + { + T max = src_data[batch_idx * channels * area + i]; + for (unsigned int channel_idx = 0; channel_idx < channels; channel_idx++) + { + T val = src_data[offset + channel_idx * area + i]; + max = std::max(max, val); + } + + T sum = 0; + for (unsigned int channel_idx = 0; channel_idx < channels; channel_idx++) + { + dst_data[offset + channel_idx * area + i] = + std::exp(src_data[offset + channel_idx * area + i] - max); + sum += dst_data[offset + channel_idx * area + i]; + } + + for (unsigned int channel_idx = 0; channel_idx < channels; channel_idx++) + { + dst_data[offset + channel_idx * area + i] /= sum; + } + } + } + } + + template + void region_yolo(const T* input, + T* output, + const Shape& input_shape, + const int coords, + const int classes, + const int regions, + const bool do_softmax, + const std::vector& mask) + { + NGRAPH_CHECK(input_shape.size() == 4); + + const int batches = input_shape[0]; + const int channels = input_shape[1]; + const int height = input_shape[2]; + const int width = input_shape[3]; + + const auto mask_size = mask.size(); + + std::copy(input, input + shape_size(input_shape), output); + + int num_regions = 0; + int end_index = 0; + + if (do_softmax) + { + // Region layer (Yolo v2) + num_regions = regions; + end_index = width * height; + } + else + { + // Yolo layer (Yolo v3) + num_regions = mask_size; + end_index = width * height * (classes + 1); + } + + const int inputs_size = width * height * num_regions * (classes + coords + 1); + + for (unsigned int batch_idx = 0; batch_idx < batches; batch_idx++) + { + for (unsigned int n = 0; n < num_regions; n++) + { + int index = entry_index(width, + height, + coords, + classes, + inputs_size, + batch_idx, + n * width * height, + 0); + std::transform(output + index, + output + index + 2 * width * height, + output + index, + [](T elem) { return sigmoid(elem); }); + + index = entry_index(width, + height, + coords, + classes, + inputs_size, + batch_idx, + n * width * height, + coords); + std::transform(output + index, + output + index + end_index, + output + index, + [](T elem) { return sigmoid(elem); }); + } + } + + if (do_softmax) + { + int index = + entry_index(width, height, coords, classes, inputs_size, 0, 0, coords + 1); + int batch_offset = inputs_size / regions; + for (unsigned int batch_idx = 0; batch_idx < batches * regions; batch_idx++) + { + softmax_generic(input + index + batch_idx * batch_offset, + output + index + batch_idx * batch_offset, + 1, + classes, + height, + width); + } + } + } + + } // namespace reference + + } // namespace runtime + +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/core/src/op/region_yolo.cpp b/ngraph/core/src/op/region_yolo.cpp index f260acec7f695e..4eed7f59904bee 100644 --- a/ngraph/core/src/op/region_yolo.cpp +++ b/ngraph/core/src/op/region_yolo.cpp @@ -60,6 +60,12 @@ bool ngraph::op::v0::RegionYolo::visit_attributes(AttributeVisitor& visitor) void op::RegionYolo::validate_and_infer_types() { auto input_et = get_input_element_type(0); + + NODE_VALIDATION_CHECK(this, + input_et.is_real(), + "Type of input is expected to be a floating point type. Got: ", + input_et); + if (get_input_partial_shape(0).is_static()) { Shape input_shape = get_input_partial_shape(0).to_shape(); diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 6f46f1430d9103..6e3a9f5312c2c1 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -325,6 +325,7 @@ set(MULTI_TEST_SRC backend/reduce_min.in.cpp backend/reduce_prod.in.cpp backend/reduce_sum.in.cpp + backend/region_yolo.in.cpp backend/relu.in.cpp backend/reorg_yolo.in.cpp backend/replace_slice.in.cpp diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index 322c8605de7782..64a5a60451ddfa 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -787,7 +787,7 @@ TEST(attributes, reduce_sum_op) TEST(attributes, region_yolo_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i64, Shape{1, 255, 26, 26}); + auto data = make_shared(element::f32, Shape{1, 255, 26, 26}); size_t num_coords = 4; size_t num_classes = 1; diff --git a/ngraph/test/backend/region_yolo.in.cpp b/ngraph/test/backend/region_yolo.in.cpp new file mode 100644 index 00000000000000..8d520c4929acc1 --- /dev/null +++ b/ngraph/test/backend/region_yolo.in.cpp @@ -0,0 +1,86 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v2_caffe) +{ + const size_t num = 5; + const size_t coords = 4; + const size_t classes = 20; + const size_t batch = 1; + const size_t channels = 125; + const size_t width = 13; + const size_t height = 13; + const size_t count = width * height * channels; + const std::vector mask{0, 1, 2}; + + Shape input_shape{batch, channels, height, width}; + Shape output_shape{batch, channels * height * width}; + + auto A = make_shared(element::f32, input_shape); + auto R = make_shared(A, coords, classes, num, true, mask, 1, 3); + auto f = make_shared(R, ParameterVector{A}); + + auto test_case = test::TestCase(f); + + test_case.add_input_from_file(input_shape, TEST_FILES, "region_in_yolov2_caffe.data"); + test_case.add_expected_output_from_file( + output_shape, TEST_FILES, "region_out_yolov2_caffe.data"); + test_case.run_with_tolerance_as_fp(1.0e-4f); +} + +NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v3_mxnet) +{ + const size_t num = 9; + const size_t coords = 4; + const size_t classes = 20; + const size_t batch = 1; + const size_t channels = 75; + const size_t width = 32; + const size_t height = 32; + const std::vector mask{0, 1, 2}; + + Shape shape{batch, channels, height, width}; + const auto count = shape_size(shape); + + const auto A = make_shared(element::f32, shape); + const auto R = make_shared(A, coords, classes, num, false, mask, 1, 3); + const auto f = make_shared(R, ParameterVector{A}); + + EXPECT_EQ(R->get_output_shape(0), shape); + + auto test_case = test::TestCase(f); + + test_case.add_input_from_file(shape, TEST_FILES, "region_in_yolov3_mxnet.data"); + test_case.add_expected_output_from_file( + shape, TEST_FILES, "region_out_yolov3_mxnet.data"); + test_case.run_with_tolerance_as_fp(1.0e-4f); +} diff --git a/ngraph/test/files/region_in_yolov2_caffe.data b/ngraph/test/files/region_in_yolov2_caffe.data new file mode 100644 index 0000000000000000000000000000000000000000..3111300e54df57b5b74de7d31ae7e8149b46ad66 GIT binary patch literal 84500 zcmW)nhdbBb8^*~>_9)-TUKLtKh0i(1N<&4cD5I2&rb@f4tO%j0C=%LI5})%t3Z)?p zMN~!$NgCSxT)%(doa=qA^E~hSdENJM{gx~2uX#_TkIC8Usz=z$TS*F^ZECL&3`-KM zF|;F9y4y%;xJu7o@?~i#wba(67Zz?8 zP(4YQYhwZftxtKUg&g&ISyj=N*h?Ot6`>=(%p%>dCxKPvB7xj>DPo}QB%B|5o#)8s zaf{^9(Cui;7ayNQgY(z$sCap@5=)3mt2vz+HwkvypWyKxM}!-PkLNQdhw;Eb(U{EOA+OV)1|Y}>Sy=eiaMPG66P8pA}e7?VQYN=Q+s z+5$4+)+fIGkPTmYT3t|j+!-PR&vLiDe&mYvKim0hR+HEHy}aIVBxuf^N{l8N)Ax7h zkWEo_{9l>@b@g39n{DNw{FD~d&z7xNlkk#{bd3_KFW4#+{yJH4G~0o+TJ@1RGRp}+ zH3Rm2Ur7QbXHv5t*LY&OHcX$nj$_(${(I9dp>Te!P&h@OKU?sj!gS0zfzE*#B0Ap? zay&D+W5-gVq*x{|xqphws<{cLDo^EO+H?8qhtGMg&nWQQ)5O>H&f^v{ZOPaG6Rx_t z(e}&YDZ+{+--T9pEv)w+C8T~qlfb^PMz|<+4o{Zc!|l4g_}EtyxZm-Uwu(CAdExyG z{&ZOy{~7s(Zv|5@m7M@L>a`(qOfgTmyjLiHUX{-uwB{v0SMt-#e7VWA;k19GKdoP7 zMMY2SCpQ!#h*oSru^C)NpKL3klY(04=H_fl)oduNxk037We|9Bll+m9rB_OO1S=+N zBBI*ANrcKls@OV6C#dPO+s`!E++$tT)_=R7zD;7#; zWOJcKEa+Jb!=}emv9DJW$&;7hZ+QTuN_{GNEI#tHpZ;a2{u5uH4jLZ{y1#gD|=_>fGy@>0Z93ac9Y`B!R z1$jEtNw6~D6(`E2-2Gb)kx@QFp6ohXkt5j2FP&-QM>8|PQz-{3`;x(bSP&>avj+Ww zXFN>1g5SRWh+CIr5y$59WEz<8qAQ1adg)bRtLhCtb96LZpE@7X9(%)&nJeLeZwS=7 zKH#dR+T7XmJ)husiHG)`65c9XAbg_gN_z4$$sW5&wrk(M7nEQui zCX#nvgcxi*##07WgbFWClP@t^L{0p!b;IgNzF1kEN1cr)RT3qncgtyFT@ypZE=LHk zsYN(>y^!BrKT4=JX%6{#;vKONt0y|kqDkK1U)xs!4#Ius4-4P7kFPMO>k}0Ftsc!x^6V)^I^< z5pNSeEOc60OSbOWKq{*%iM`bzd3H^VzST0ItqVud*9Y&CIqjQ;d5X)y@Z2oeys2HN zzV86JX1JY1nI;G(T^&jOvp7Z;*2R+R{=02=2aT^_>)!}3*2VBbAAh*->^OohotCsh+x%(H~jbe0a7#Yf$OT4vZvt=4$9(q~O@7z& z3yH6&A&ZZ;3a;+RBJZgTy*|B~4C|guwl(CD^nb?$QcIn{)@Uario5Vx_BY83T^AY> zHHJz^{v)m>(lq8(8=1F1jrf~{k(QWJGTK6v6!|WK;xE&{{Id+vo+wMdUb3L8ook6& zQW=@Ptyyq7Xa*nK9>&!brg0~&CShm&e>~Q5CH(%X0%JeUBa=>w(u8Go2UKVmp+?4PWs&;Vdx`DQL_zWU z?L6}1Yho_+qg}qoNYpr4Zu@FC8Pkv{*m&$P|L{4Sli{L58_8zDS?_2vc*v0acAUw( z=2+3rYYV8u!S_TkHjOM*EFdMCi^vB1F5yX4NmBJ<3o)6KCpcHfZKI8*2z2bC$(rkF z)c;>Ly&395Ck>xMU-lW(rS)QTK+lY5wTuwrbP*fZwBfcvj(!z+(tUz4t47jUa|@{J zKnztg4WvKHQ|XPa4C?A1K|jpBO~hyU@_#+qLRqKIikRe%3f$USzF$I-l3QuiAjE}k z>XoKzcdww$bGFlOFTAL<^nYZ9>U@5z>XLBUEeSzdXush8i(`U!hkFU1ltkYS%%z1p zZ$o!QNH|Bg+)}0n))9h? z*20P)-5Y|5j~@#INjQ&L8o{kqlX;EjMJ{t{FL%w$E%)D2K{h9+(Iu<&NT0)f{{5c< z)NX9#3cc@m#LFvuk?2*Vg9QJ8pFg&s+1<(UU&fzw`pn7+{+ z>!Aj$YCnr6 z*H@z!A$awhEAHMm3+MGe!+#I^Q98K~8^>_W9)1;9u#Xt}w+*l3L+pMoR$1Er5jP4O zaeUcrJbZB*-f?QeYn^w{>ziEVVMmF|BWn&~@L(=>w!Xp5e)GGBfEh?1;|EqlVQlio?tsi~Pid1S&sK?g+CrCGbMhBTD9DH>MQ$I$c zF!dGQRUBOzQL0fHqB6Wvxj?aUbIgd!sSAGK-gnQiab!KZx8Fm1iyF*dSAu?zk6`cA z2gs{cD<^0xR~8CIDn-sqRc4T%C`^8VF1u^6=)oNvm2w{g9hy)W)PmW$H}L%1O8hnQ z7uG(MtV{}*sXVq&ta7|n3+A7^hURitP*qrt0fu)`eCW=czWN=_>>Kgo`j>dO?>U-9 zH{z!wT{t|W3wKYe#ufF&SZj6`SH8TAN`V}G{U4x1>RX&`(Ty#ZiZIbp68@;3B5S^9 z(9-;OG{jbvnVcNPI_wo$`3P<1X=~4uF17GZnJf+p^Pa}b!`3!v_6-N_) zpP-3*RayVIX)JG}1!IyHZ0vOh=54W%*($E3#4JZpwqJ`5KjBLEzM2RRArq9}ln7H3 z3aRbY$8?YTS~72>4Mh3w=0_%!Q+b6=pkAs23+2qHGnce|JO6$Yn(Azh){= z#o4OpnN039Xdwfu9^(z?fln3LS#=rrK-7qj9gh99f zxAMC{)wcu^thMmCGzV5MgAHCvIOdrIX2iA%rw_B?-Vw@>^jiwW`$yu+ZDME`B#Qkf zKfxNGUa;I!2bvSaF(p$GQzn-~@y&-kPNJk-K2w#KZ+GFR|CtD)!@9Y>W+c@9DuCj? zFQBvbJWQ9Vf+^`M;bL|in6~fZ(+)f&UY?>{w^~&gbN_&#*5oobJ+=d476!rgvPS4y zZV7YJVj;0Z4T{Pxaj7h2t}LI)S2ei8ridy&ygv*U{yqr{I-i1zc|OEmx&UWCj-e{^ zv}v)_NZM5E!@vCug1(Cj;EBmEzWbURzuiZ{U`iUeC1k?ZJ|{>BmV|{%bLeufyVQ76 z8x2j2rHj-x=)X2YI{M&ts%dh8mOss9j#eKMFqcv4_DtLHe)8S^%4`9!j2itt$PZ~+RW+M zgqu|LVLpABc!}6}mCz>R4J=#p5DWYCnD(C5rc%l-?CED;rjWgg9ey@~t^InJmNHIR z?-*uy$dxS|nZ_o!ma>O#*Vyx+8cF%0QvTR#KTUnwLBHt^(n|9P9+R?%-#VT{XWyO7 z=Ir-p@2lO}IO(zMcw7tB7<+>oN+rUJ&;9&$=m4=B|E%JnAQ$%DPXqs6ebyJ5!NMQs zviLWN?AgK^dg#JqeyQgX$j%pnt7s~WJ!cQ2cHe`i_r5}A`blVmaZETlpQV2|#cCvD z*_&;d^u-iU=$oGhYx?4#E7}?~*R{YGQ&Wt%poHochv4APQff0rnQ7P$XO3l=bj_Uq z;QS~^8>K?%1d~3J_*241hFmtgBWB`7tlKMsp0mvNeD^KFj5ag*QCbIZ( zG9UIh3OqKq!i#PTgc2VNd+LSHu8qUO&)%zHQ+z1&q77VSHM3i;=dlHq5mDFk5Zr0!C~*|qaRw# z^rRT;a<-!WYdfz>}qhh+~J*XrY4oA&amn)W7*zLr!X zk2N+x*M<=ocUcL2*Hyx<`2ae1C(^C@O3YKwo<-g?W(KPzS=#6ylrOHM%XeDRsq=cc z-uVZRGU^M|P4a<|Pb$EFWzlmblbM#oGG={GlI_&qGBo?orBc`Lk()`iMEm0k!S8ok zLo;wNJp5=t+LOfupAMd&tD|k1zkxHm8}N?$=Y1iq%3Z?I*Asceu6El6uVqQCr_YT>$#X7 zF7BoB_vDy)|9jeU?HE1Nc$7jyFEy1K%XTE%u<9NocJe<-_TcDidb60*=o|6$=+!wa za;!IV{JxGQ70hC%|LCy3-xHbVAzv2fF^_q@)nzry)R^ogRhH-WkGjW4(h;GLspiyL z>fUL?E+5xtCa3Sw^}m3EOC^17d5Atp+(a*DUn4ft()jV0yM(`|WC;eMo9ONp6KQ93 z0S(PROmfUcN&5Caa!h%s?!K-D@#C)`BhVP{3&&x=j=!+Nwi`sBRFX|E{oqsL7`T5& z1}5v=AYLbWsjxwljZ6-wI;nCvMt2oP#JJ$WMrZ8H*1?|DmZW^5H%vXTAJXT^;lHQn z;e2Hzjo&w&$^KJdH61(10`ES^nx%`e^E7eQ-oFsy@P===7!2=(@@Sjpj%HR<@YIu~ zWaoY{R-n~RTa6>=qT{thAzvA+S{K7&1qJY%u@zQa5y9v8!?E_98(LILVd~85f?auK z)YIcM^+pM5zrByEpH~IH#8_|?4y|vv69iv0am#@NILXNYrAYO{5Xto*9LwCR^dswVr6YaWTF< zXNr~f2-)pN=(mAF+HP@`zR_Gl+awp!w3qrc=gB0J+r5y7_lkozqY$#)7w(ylhS}Q} zaN}R*ROz%IUo~MX7v`9flt?L(lBh|FCV#2O+c2N^txTo4??O_YOt@fGN$h_vOl^W^13*(rZ=UI9<%s}uf z>kK^=2`avFzjeIQH&d1k;&zkQJP8Vq;PcQSx4bZS~tq1)ug%p9|v5ddgrmN+!9uE=L}OBJkEMeVwt@1G?wfAla5)T%0!NTq_18y(weFox=U?6`_qn$ zdbhI0agEGtMF~6l?*h|4kj6AVIkJ2DCQOluvtK79Sn_K<#+p@`&&LQ>SCY=&MW(U6 zYoeG%i8I5)a~Uj=XJ-EfXvzfvyXP;;e0IyThVN2rjhZw|3-e>A7WuHWqy?mc>} zAe8Q;Kj_6H^6btULzZ0O%$A(DWv8|~u;wmfwnC|cF8Vx)2|VYp*zwz0WP~|W?U!OR z17@*vVu5V>s95$o$)2tIY|aeNFxF`*&H4>r(F+EWtW9Miv-6qF3ZopD?s~!s;uf(f zf2K2O_nB<3k1-Rsl3?b#C+W591JvFW9tAP5p1Zcge4Y46N`FqU*eyAW!=y75?Z~kKj z(|?D+s9sTUX{zBHlJ9V{TialgTnzkHw&EX(#!%13T$-mY!HVWgV}UZJOw9QJO&e`M z&xLzKnBW?CT5W@zcinLPxd<*Z-^c^6AEG*rh1O`CHeqQ?bQ zif_T)oqHfP=?SqmLP^o`oMZMG3Maev5( zY=+~(bGS@!GZ8m8#^itm>{s1_pLVXnqt8N+IF7+laSUXCDd0`g2%~~>A>2b6e;(81 zg7vO+n#WDB`4oV!3w^Qug%eg8X<`4aBuH-1C96a0pkdx6kluU+u2`hPl<}9is`pPa z@@FPUPuIsg1;bGI+6WJ)tKydfry!(qK;qT8zxZr&mJn9yJM)d`d^NoS{&4t1%2k-D{Zf7B3-+NGeu7DD4 z`Y74yhr5mkQD;aWbG=V|z8;CZz4-1oDg4ws> zn7?cqX3{{Eo@s_nUk3Tz>sz4kU?|wXUki_1jUh?b1A=0`Vfh&fYWpF9-FX z6;N_0&NgZIGoIveO)#P39+_f$gibK3q!V-`naxZy_NH()yFY&(yXB+7jOO>yPDM*P ze|;F;Gb)b`9j(f_s{WwjI z|K2KeKWECCJ`q+{=fTo~IeR4jj0x0q?3VrsvRn4t-)?loRJ*U`YIZ%gx0z5)hguHl zkCe*=EFv_Hc|1{M%73I;Ny>Y{3#CxHy5Jl;cJ(d$*4)p+?SxEk&P=k2FPOK#Hu-=}0lXAhZPD@}jrM$#ur2dUSC zWH9QXIQ8OKoN06q_N0k})TRTZ>M7+xk6l2~wgm2~UKO0(lusKRYN)}IV?xoHZ=vqb zMEo8sKV*5hK)>_^8a`UP!oe#8ss^5dC2}}hWXbquDrwgi5SLMF=`Lx1x_s0Mx}d|JrbvZS>n*PI^MA(F z`EV1d7nS8X+ZCYc@IDZ{f5n5mI=R_;9eVkbon{3dUYA>?7a!UhxWZbucZZTRxFAKDpBMDIaST)bZiRzn@Q zPo9F=hnwI^$u7Z!VeVjFeH^3;V*#QsLh@}<)R$C8y@ge9IrTmyO;d$&o(Dm0>r?Ky zd^BsqjjUet5go9T{bb33Jlx$Tb#O`gXlv{kY0 zhPzDt^IYa0>PBDcI+Ag!Riye>6rbPS#A}Z`Q)R0{VZ&$-c(P#(HSRypj;4s(MFihx z&YqK4>(7s5yhIyn%;5BVMe3^U>ZiKVbRABa!5O#aR6Xr28pMAVKq_7_)eT-_)?$nl3D^ zBZn7R3Ju1$S3FP|&U;>~aqUGT$u!a{%zO3+>bxW|%Pa>z>#XCqCLbc>UM>RA!RpH{+oBXd|Jfx|q;My0q=n=jc*9Lf^Z0aJE z)U(Db)%J+f<&o{kr!!|yrdgjvxE3i8q-6zyhWH5lk~0pa>%6gKp)YzB?#0KYn^5D1 zIllNPhMlfTtnq9UU3zb(V5_d1?V}e)z!dI5lI=wNRi1!4#_{+;E)X-@web4(8&LE@ z56TCl*`2>T*vNKimT)+Z&Z{}kgWQ*c>XL3K*}n_Pg9G?tGFff4(#`;HRl=-$~kGzEf~u=@_{7c^!9ll_Y-7C+WurA82|Mu!SG% znZ@WUEQ&a?0FNFrBUFY4ypv$Pdao$@HBrHbNvz}IR5q$%ISbsdo|P3`W`T+i*bDny z)-ic3J(F4iiGiaa{mU>GS@VZFf1Asqzg=e|(n^?SX)W`dae=wHXHXJ9o6Y`d!UByi zR@iEaLP$^+z1}f{l?7Na!?6dMh}}OHVbIT3+!|m?kq_7uc7|_D*-Ou;9pbmHq(bwQ z`<%b(pb0dY)moosCFM1&_rg2&t*n7fPI$(Q-&|oX>icL@whHrhJtx$=wjb`r%>j?X zhg30n4_mn4GFu_I#p+xxv02Xw+1dw}nfc39Ce@ilRhpjCKly}~X{y7ANff$$ylK)D zIVNZ2#bo{luvdGnS+SljbDFNjHf)z*Kl0?M$4wRLp7WJ-Irj+j{?jG5d;{n^)r)k2 zdjlOeG;2(IQcA0SWY9L-7;5vzhqh~9A@k?Wt#}`<3-D(@l8HF_qf ziMYb`zY;JaeJcN@x`^wTz2WygC20P!L~<%Qo^J_@0-<0T+}IutWjc>xd*d+l^{j`5 z%37eZvdwmP%^BM!VF907A_kg;mV#l8-}rdRNU#%W;F0Su{$J+^qNGqx{VABZJ3_*D zafm;C6buHh@XYL9IR4`qr zWXNBG7lCo`{mw>OwAhdRQya-lQm(@0Y)jPh*F!VM5x7W94#$-kW5OLn?Amb-oG(5B z;nLfXVj`gVQSxlgi0|Z{K!9eg>FB=mB<@&r2%`_KMw4b&eEU{_+s~~8*Cpa8*?1G) zH%x$seHX~cL%zb4ZPPH?I0korOvQ7tS@?`4p~$)|7^dNXOB_5v*5WHX5S7ETSx-PJ zUJZA`jhk7^$rahARn_72X)Q_&Xa**_Ni&JS^c-p`QJQV%w}{dw2- z`TRmpNy<$-9rcH8k*D=0Ajs+6qaiM>Gjr zCc?vRDe&q)w`_w3l)30G8}Mu{g|%WAz#*%Xc>g>}+z-YQu`+wA?&nA`zl?0VS7(#G zZZy%KY(k6!R|#TVcJc7fuE6HHgYfPfm{#}(4s5y%B|SHX?okJ{+q{96X|3QA*9C7M ze}}Ta(%5xe9^*>&QB+C~&pNB(xJ(1|j(-V;XWoF&^FO>XTmubktvd4DeBxGhBNl>f}(=GU}94< z#1>}Jq+%l~U(!Jjt~Dbade`Af>m6{>Ne20V0$9H*9zNgx#!JS}ARmU-PqpT}a~{&t z5eiJEV;IY^kB5%EK3Lb}goW;BpnP}~NK6TbiCyyC`;j*-*f^x$Q)bir$trBKYY>}j zkwdLZC*p_7)o5`w7+04j0LiN(t4ycU`%&f8`Lh~(@K=GAoReV=X$Po7l@x1t&L`vh z713v4IBtAW528^{yjHK4cDHM>5N~q^^^6V7Tg-mzTe5dA(#pU8TtY9@5!kG#ib)<< zfj|7rd!l3LuOUwGK1quyXQ;5#9!iWXnZza;>a)N__2jB!0X^Q7PF^_ffT>T+VffFa zp-f~e`DWNaW{JpBmqTl)n&l(A65>b?MKda5-%~z$nkzAADIxmn za!6m16wTS7Np&`Er$O1!E@%hswmgZqn1!q^#p z5O@3n)cM_m4ViLregyb$ts9*3AJF{m1L8m6i~fpIU6LPbj%?Aw$M?zb+1%UKz$ zYBs`KrL)l`bv8ySn&J4Q-SBVaQGTze6SP{#WB;TP_|r)mH!iP+1t-5lqqh!9)Ole4 z;99i2=Z4v6foJ9%hC4et1Un=yf!}HX<7t23l)eK7%6@~9!7(swX&bPQqtNWME~=RP zgi{N|P~&ze#K+H~yIRM?op1Aa;en4_A}

iw}ZgSsZ-o>Hxu>*C2fqpJ*%x2n_IaFPy7H0JuqSrk+{Ji=PXg(c*A2-fKwODTwu2@Ee z&AXU$XBW#l@`XK}Tg}W?0?pkZz=rp$FhI^0b3&d$=fkVeG~WnKRQ{tcU#qi{5x1GI zh?bpv|1i6hkW?0Q?lsYg8jF)m1!&o!gg#&N;n>S0;m6K{@czbD?v;-8$CL|fRiunv zFMZ60ZCK3gyA`PNmO*e{GYZT94)y!XlJxAMXnHO60yp)tg2L&)1TUW|vS+=A*@WOw zHmc$ZP2y>M&x<>-E%O|>I_tu#A`3Ec)Hu4Df3+!fN`RFcUEu1{;UvVTiZ*O4p}OS> zB&q(YF#K6Qf8hR=i`)z0OaCYm+dmfc=Bp?{!T2-Kur>mU-u~bzp{K}-$iJjRe=_Zh zJxFiFm(VqWOVrQg2rXKUL!e4{tmT&;Q*8^`Lr?j90;$`6f8Nb37ibx5isW z6nA8q{lLQ z>R#W#!xW%HY8{-t9uH5FZ!-YPTko>X1cBxevo}3IB|MC zo#l9*UQ)eG^>s>!N0|&}xLKmWNEY?ie6IK=DNc_&ydqnKyQq)JA>x}FB^>jylI#nJ zqY_I?>F~(aM88cQt+qSkCN&c*UnT~*(z-DIv?OZ2sfW&>DtMZtf}4g9}}c1zx!th~n=SqGS3fT((LLZ*QN2|DL~t*BxTG zW6nPa3v}dkBGi^xgkC>&@YX4D+}>k_xjlCv=zA1||J1@C zZ^E#qV;#2wybMGv6w-Vu;o<_M-b<#5GE6ph1F z(571#^O!#V=V647lP&P@QfJf+F+p3~QaHQkA`JBI0!;FcZ-3Gdoq=i(8l=cj`Z8am3Jhcisgf zVFOH+7!D!oNy6$2ry**ZETri;fVS-`FxV!9%_TEn*Q{va%!nCu!4hk_?w2V2C8|qP zPaUJRFG^{oNg)5R{2tB!lt_gycJTe~dx=_ZHP86>h}g%gv3I*Sv1WrLwk$uBoxTyz z984%%A>BfrwZ5Y^^Uc}Ij(@cIOAq;^S^%XcPhpL-D%Uevo z;g;D$kl#KMj}1J8vG&X0O7$(M&yqx~l`0swX&NrGn~pIiYABLn z`lI7BEvyetf!Y35wqLBBhWN`(p49Xm?u6Up@)iet_qGa*Z9hOLpN=M-#<(wjKB|~m z<9g9S*e1Fg`1vM&PyGXLI(ZQyrfXnJh&iHr3k(j%!5;acIylw<8*Yl@DZ3#i;`9n! z{*}PdFBOnn&;*GKMKF2Xe^}^bfx>;_c=`5Jxa4*aVt=oP(PkAeYhyJ8ONn5@6a{Q* z8jGu%r=ZkMU2N!3!ySgI=$CjK&iR}Ju`|~}z2OFo7;_&k$~}j`c`A6}xfv?zQ?ysM zM_DgREIDqB7iY<%^1~9i)+WnZEWXp7R<~&0@D6%6b&wu@b(@a1PoN=mDBl=VMC%t{ zrDijV=wMF`&6)6lDpjel?jOV1B@-7~+_s#%e?$}SrEu#>@~+20=%*yoQsso`T;*0(R7UA-U9_K8I^Dg85Su5vybQG16a-az(t z;4~X79rAQur!tB10wy$@%+?NivMuBV8@s!jsZOeA8zgVDUwuNh#o#aNKmUjA8QH{C z+8(oWqRFgcNd;@~t7OmCU1nGAykH+TmNL(gADB$HxZTOt28KDUOr`%m3mccmCJT$1 z{d_&6*dTY^r2S&K_jeMGkD&iha!P;WoC(emRR`(mKQ#72V>Si*XSs|=# z*b!E}@66EmEo1gJdW*iJW1G+?unq*?fEXpFi4si=gniocqluvV-&k1o=fe6-U@=WhqAt3 zgK5RLOv-oV(H{!gv^X%H-kdRyKF%?t<}yIn+?vSbf|OX-&NuYwq1iMgXg1xVc86AK z{uyEgN2rrWDv3TWPb@rNl5mIHv}tD%X>3}~mR@ycs}zRvc=rvM$L+an$~t{E(^Q9^ zuFe+LZW?;`kv>4}-P~AepEpZc5-gY`nZwMcPGyB5l5D}=R7S@ZG0_Ib-ew=+$CgOo z(iwZfKdyuvd-a(f%YH(W!zc6M@xjb7{XaG?_XBN`p2;%4xigQ)?rhD4a++4>1iwA* z!pfXnD3PzVtw~udJnAM!Uxx_T=egmGEevMq$IEGG#Q~}zzK~fy+{8{VU(G^Z4`Vfn zhE(QT8ny4fK=P(OAzB}enMYe3JDi!yl&r+qzs}3lR(T>b{;+|`@m#iz9%bpjV%fC^ zYHVRd40X)>K<|Z*XY)5mvR0qVbhqaXn$)7mPTI|2=?*ch#JP|?dwrG7$j)b~s?u!5 zD8EKv97!FEb?*%l^=bZ*!^oERSzH2 z6)H}2MYjv}II^FFHZxita+WUcd_b)vd+FxmP4wM?`*fnm5xU$)oa!q&)4cq7w0Ph& zNxCIYJB_WWh*LIQwYi;+{qu|dJ~T*o3}qqztW#pT1*)t(>OL(mJV~V*Hqg!9iRAcF zanR%OJj>^d+X~?Eu>x4_6aZ~f-F)(Y zp+tYMjx2Wi!V@g>;6hv)JWomInTE#&9|F>dLvWw%f|ZY<`L`Yx)r;YjvPAfsq7Ji; zn}XD!5cca_g&dPZFv`{m7Twwo+BQ-UXD-}uPwqBK6Bo;?4|u*Je2MGghylnHzA>&rE$S$-IuerKRP+l{ZD zDq-*00(fYy10L@>$@&9o^mWNY=t&)i#4{b=DW+idP`3E`qG;^=o{nzu8*sD7Slo8> zJ`CEg2GvcIpyqOBd8>IZj9j`DeSPwAMfpX%z2+FEH>|=@C!?_Xh!0Aj5w5-Gj9$T} zIB2Jefs0;1u5bbRD5c@!(M3bNCkea$#p17))i};P6^%`H;n|-~*x$Av19W#`Shq7y zic`dk^CqB_Lo$AiD#Pb27ZbkjN9W7{^sAkNxOX;od(Xn=f*9PlDhyYwvBup)**%TV zD5s>n-H}!A5^>A`Hk2k z!V_r?&_+!0$dyq@%}>FpSS?g3S%8;*&%uQuQ_!|s24^YUhJv+`AiqKsx*}&o-LPYD ze9=vqHk7FiPdX2S=>x!@Yok@G0iHM~jdG=zVaM~6kbZYP9D2P3JgN>t*pD@kHnWf$ z4J+gq3@5@ni38B`SqO6t5n^8*gK-D-08ako7xrfIis=?Sq3|m!RMLE5xV2g@fVwkRqoJ zW_7=K##lF~E4vQ12K5j@+93Yf1$fqN1E~Xt!Og3ZZ{L*--TXCFWTk?3bDr>~QM%24 z8dW@YRv;`0s{pwbqFD0y930M>10_w0xHdNyuAMJ}`3lN-Ua<-~_qOrD-{w57rG!UL zm;*gO3gPGTkMMTaJ_ws@0gt6Dac{6HhN%97ZE_Rw1?uD8ED@agwj1zFE=)802p)c) z;i6|Zs6GgSom<>VhwBblbxm*R4W1GnJh~q5*u>+SyWzOrU=e=t*2MY7QW*TF5=IoO zfQ+6WeNcUn&NeKfFOBE(UD?xdU+oEWp8)7S;yf-)jK+-2N!V1;2kN!b(Cm1GeqUq2 z7Vc`M`*+B($+bm%#fxz$_ctC72VBF8Ur%87t2ww-ybnYN*1j!2EXv z)2Z4plpC7`RZ)s~&UiY?Hm@4uqCPk^MGjww^zd}86C^e*j!N$^VV3j#*qw+w!U^HW zxoC|(?mM#>7k#qEWnV4uz~FC~)nE#%g(@V~tb%+lxkx*o+A?*0A2!}>5mdV_0VN+r ztaG$P4H;`JTB(Zp#g#BGWhP8obc!F!`&1$NG?i|-Y|a#j75j4|5}ZcuhMc%6C|jX` z^@YRnl=*8oYncTS4~D~*^(*-7YYi1kWwoeCe;b`&!s%V(vk+kz4L#ddz%;j`FjwpV z$Zr%Gs*w`{(ojXpA1Tlew#xKk(`5RvVGP}GF_$#|a)gGu5ZLP=5C0A2q>m1chJGV^ z_@WvJ;|zmg^a68;sZjxHJPbt6&j8Qg8IYv$P&jLcDB0p#K;3>kqe~!@rU+%JtLGhI zx3&T7!#@5smUFvdVtk-z4jho)33hea{LzzSSn1or1sMwTl9>jzIJu0x|Fwndgb1v2 zUr_M8BaX5WE;#Mc41Dc+6;zY1gLAYR?j7O>8+?v{V$V1jQTt3-FE~b@O^{;aP1NWX zDH(LGj6?0|nRslxC2px3fJ3?7_-MN;p56Qbc3%&Gm>(wO^RX{fRV9?!y_w8zzN#&M z;G%%DtrubE4Fik{9*ghCZpRO<%dpsR9RAfj2cEX>e9rh9^6B{@nm_Cb?Vn*zx7lw3 zkpo{~xR(s7SQz3V{go(<}^;-d`3BSR^;u|o3 zMLFbdUVya$yD|7(EIyIV#DAW_Xe{cEBIl*iOx_>lPpyVOFXiyjg!y={d;qpLe}fIv zcH=CuLs%4-JH(BTqv5i|p^969ZaKYhBJ8TL~P|q3W3ZR}9R?UsHtWhSh+bSVT-5SNZrs9p~n)o!%0qZyUV}1}p zUl|p&x~q#eS9GyvnGU+veuj?g-O&DSG;SSm!uLze(X2oj^LPG+w7qZOU~wVHtl0sQ zQbBNy4?xH^Q(RL%2Oln-fp3=&NmxH;jLe>jGq0&*-k3r#?>PZE@_ukN zDT8aKd*jmuN8o{UD2q!7AZ{^+ge}}mLW^&M!LDJrr*I)IS$F`G4I?o2wmS;y15ke3 zN^~ss199PMwq(-;=GMB8+6Cmm*}Fr&^*|jA>OFy5-Xk%3hAK`m8uGK3b^ni|^YE+j zedBo1UfQKJBs5Tpw9b9s$JVrkkd>L8veO_mXh~9JG$kad&V5}+MniT|%2rmPudJfq z^ZN(R>r~J4bU)YU{dr$tQlS7JKK-C?eZNwj%@^s2uN4v(pE($IGy{hmnS*grQm7fU z53I92g}HMH^-1f;(7qpApJK()+G5-1T&Dmm>Zi7KLQiosA2pjzi!-3^m44uCcLAnMKgo~fw~^n+%E$-za60_jAhu$f ztH9EIrmk_EZg-SnbN<*c@y-KWQfU6hA+Kgg3MAnhd!gBpPK1bx7V~)O_zDbFt+f%72BRN zgvAEWp+hF7Q4;){Gj6p2z`E`UocygY8i@i$?3hq$F z@QGZvAW3wo;DuYihO@l1rc|=^ixx31T2FM=>yZmiC89;IWl4=}qv+VP(fn!bOkU@c zLJs%~&Vp&9$zCrX{^J258A_$(-P;t>YMDTytj?07htHDP>-$pEIzyt7H zVv;)Nt-L2-5=NfIKziWHumD&qUyMB0b#_>ukThOdIiHk;cyJCIDUvb zJ)9{?|ECO3|3!=D73tH=eY5FBoAq>5eGNr>5bYgAC9Hd{qI~MH<-Vlp-JRU z4{(otrAUTP)~2W4Tatb1Q{iZ%3{Q~LrA6-|h*5euH!-f~M;_NmsvivC;Q&(E#LJSuYptn#nLRmduEGZ-pLerqkcgb^MeZSeuT}Khc0?kl zb)=#zFreb#opSze)_Q(s|4$NkIE9o@H8OE`qPuhZ9R4`ChG)*G<>8>1^{vx-T_~KAbR%>g?H1Ro8!~T9Q$$w$6nuy06Td ztmA3brM1*zTB#_A{^L;>Hc5(}j-!drgXqNk%|y=5g}iX3#MEp#)eNeoyr6`->pdYc zjFJm&LN4vK8a9vk!|g`?s8|N+WP6bdZ4<7!n#cd~ont@3{mrtN_2vU?l%9c)qbvD~ zQM$HU|g#O-UVJ<=`oYsQnul#TSWY|#!ProRRzh174fIO9-53t2y*|; z-K(Bg;J_)89S@vD`>YnY55BdZ|3YhclHfCPl~zFW};*ha}skt|AA=B$24y>&Z8t4P;E_81laDr)Woi zcVa&=njG8^Phu7%i*~<4{{FrO{L|dbzYQ=TUtW0)|?b6 z&Alwixqpkt4BZNYGFL;vJ}IA-If`fr-0|8DGp;^Nfoz#ES7aF7N92|KR>a%Bh}?Gl zmW)t7&2tvUL7(Y?kh3PAdwvWip(Q&iPX2gZ?o<~;%&p$IZ?HS;uD;uzh~_LH+$cq4 zG`5JRPcMM14V&TZ+c#W!eJ~lWtVq@cnDgA9Zsf+ELuAk@Gt&R80U7lxoJ^$#?ignhxsH+E!13l?AT9LL(&=7N06yi+}U)PZFR* z(+jrfMf3i{N0R5yvqYhvN=0KyGYPP$AuA-~$dcqv(fyA9+#~J#ibCVh@X#TNFfU4A zC|$?#D-Q>dA2W<3O7&h9@tt$XxA)q_{?TXmekb+F)mnM-IXFWU;OfgGqaq=*P!8_v zDv9EDogsSp?&L_c1sVQx2)WZXQ?%2jpyF^^kSOndwrHMAi%6r|lsjk50`14+_&4)m zWXttR@+0mB`7Ej@yJfzTFux{}m3@~y?cYeME_@`z+*-(D1#dEY^a)<5(N$qv^`1Y| z(1J@h)ZyEQzMwL$56ETcz%?grc=JRR-bd)e=CcDJ^Y&{#d69=?txG65>d+~fr*f6w z^)2VpM?w7SmBW1fi&MP%b{=;*U&ZxjzUN1eedph8ABqO~!qmmV;IttKhTGUftE~#`?YhGsJPsgw%P$kFU+c*JVVn6)rN3NR|1FRH zV-D|hj3MH(I_#P~3XbOt0hOUD(4)44WA;6}q&H0Qnx3H@n7EE?}sOjKXUht$XO4b$5t zYvYHyU%t~MnW6YnVxF;Flc=0EC$l|C{jgGUv}6Whx|c;-n#z2ePBgb% zh@^{*qmU;Qrb z@h5;US2X6=_GC($Bm0mkAzz8o;$pJlmksf>z2LrB(8&1ZACV+zI&jSL<|X#i_zNE& zKDjiC&$c$^t7L24uUSe-#D^(l`?1yvRf92ncGV(YFm4K8jMaQ!;W>WqR4gw)U&F(G zm-CMHaK2V?l4MF*I0=89BboK~Dj#K1$>Wz7aF>6r+@)j?TrjeN<`K3qBz-t|EHQ(` zPYN)V=W@TZVXg%`PV+mb?Lal$4JQ8^3FiYC#M=AAgpCWKWbb3EX!yyGmp5%vpB`_q|OM<}9}` z)HVrqjf?PKR3#>Cl$TC6xQXKyhU1TXz{md0m@Z$7t^2BR>y@<_*e3%c{~W?Nh0}Ox zMh5QuSBTMpk8#QOT(n-NjR)f|;NPgBQXAS&n%Z21<>?lwH;FjyvMm>vVEQjVeYh?~09 z#8XaMXDB1}TJ#eYA~^=GJ&qak5^$gAPt-0|m1gZzk?QHFN*|=^N~iDri>8OZqSwTK z820ctru_bm1tqN*^Rp2ju4+Njtts7oS4sL=)Qig3WTiV#b)!+pO-yJ!haUdf*g85N z5Bpq03;kQTDOmWsUHgtXTNI$l<`5rsx`kghN#zeF)sV5WvxR&i$vD|Nd>@4W}v_*vj^|6HRhyye+T+fi)ixasW6oQW*O zdkmXD!j35)D5OgJRA|JxJ2Yj}W;$B2jc7EKP?5l2HM#jQ|0TQGVcQKXyKMoJ3vp%r z`dP7$D`UyR@LS}4_cHpe+L0Dd6@%xJ3~o0ejgAmmu&6v&HtDbmi;Ih+PugRsMtCLd z|7{LTjx8tK3oJ=WZve>8{tOWg*PvPABI=@NXq&?mI{#6X%_xCpOJDyH_NcKjssoqdw&?M76ZRdP zfiF$s@%%Vj%zyj_h;BIqm<&fH!t zUWmZ>YG2H1)4zoP_-&qj2qn z+35Dy71abz=F@d+9QvO&Hb%EY#jyq`R%nORHJZ39+Z>-|Eyl@HwqQ-7KPnp#{3#=k z8w)l-!n-`!6&GiX;_eWb3qg?iU_6+%?Ss!F z4?xg9O~@E5hWdGiVC0d{4=US3U7#8<9q&qSG@aqUyH4`3ubh8wv4!4_mEa#g5Y7w! z5Z~u%pwlk&e&;q4b^oj6Qe8Do-#ms5y&Xlz^t(*H_g^iU*Xavq9bzTJy2jG~Vvo~~ zGhw`CtDfjonhaC-8_D8&max{)2o}{jisg9Urt;4o(M;`jx_GG)%N*#!Mt8ZfPbbUC z?|1UFduk{3-0H~WES9oC&QYvBrJPu)fK(Wp9Ytcx?s7p4i@xpg0csvVZ6d$n5B{dAEUZp z`<}jNd$~Yg0PsQ^kaxY(1#6P?)G7bGm$B1-U-iZzx^kg6H zbeu~6({CeTK5F#r`~qre_KEf`?V=Y>Z{f4!0ZFrDg+&qO!9_148_&>zU(~EvSt|D+PC&`sx*TK?oHddNO z;T$u95=2il#Pj{5l<~2{Se!h= z8>b9WMZexK_!JQhLEaRtOH)y~^Cj2`Io4^V28p)%1bXAT0o|`O1pGzuaHU@`eA?-&~K=%1(;PJ6F+-k-MP0XEgM@$mP08xAqJWFja>KGMR@)|$gV={QDLJ!E>7!# zErIXpuuw6ZJW`eYOL<5B{Mf(^|Efwn(sxVJb0_e~`zt{-;5j6Y*1$!_jIcP>9F5ND z(^u_9)bp-0U0b6L=C}(I>l4Ax{UGFs(?IEC2`o8R1}*g$Va$ejuybvMA1B)RP`N#H z(SJ#_s?49bwrRqyar3|}Xd~p!Oowt834G~11ItEDhfak9TcYiYbGnO z9SbD%?lea_rm2}sUy((woD!3YQ|~0DF)`d!zs&90B_ThXF`uy>9ae2CrQJ=%wBhkj z`mf!NtynpRZ7|ek>v}HG5|6#K(jb#s^W(Je>U#FnC6U$DhO#Y-6My`_UoGwm#Np?o~_KLS1j>T5ZSj;=au3p43XpLF^uP>Yo`Ct3IHsMt`LiN@LhFA9vP#-~o+(rvVRR z?n09=zx>?e4(mQ;@>gyb$YIBa^qZ@Mq}M-!BLi20dFdJ2;;PLal5)C#kuErC=0ilB z3Z@zRLh`sT{HUQw(h{?e$~F$f0*y%2@tcLh;fs644V?scvyP|&d>4%Ri6L|HVA?$*J+^M=Zf%tnhO_)yF!s|f5<)^ z3rCdFA$9j8UM}~DJHP4Xa^F>9t@=kkze*EAmc@f@+a-v7dmL_1PvO43BKfNl$@N$L z;xfDC$#jyyAJ*lGUVIOdNV0ADar=IdIxPd%nw5d~^%VH>W;=L0&!y8B##5_=7@GP? zg@&Ooxh!0hS~JIqp55<|=$}8wt6mHO%|D~S?76^+IK}ZBj`8kKBE7lgFDbwKB!x5y zv*4Jm2dUn~BQ)Q20WDsUOd9M8_@Ehm;OT-oUc0uQ@33D8L(boV4W4zd#BiaYAGpZx z8#wWAMDROUZ-AjK0z=v9kNqu<;K-)kX#YY3Q_ZaT_>BG#I&&hg{9Q;UkJO~s#~vXz z$2BCIW|c}b4NidT8h@PLla6=Fl5xs|Vd&mxI;4Ba8w2qvD5V?+`E*k1#~u5ZGw14ppq)F8ANVE{XaIMcW1OzFBIJ)*5sme56WUz7SnLSG~3 z6^(p7mlUd1fKByaWR)Y(@NYP;4K-mVx=Y!A!wp&4jBNVxg+E*lX@>Dx8t_bq(!dcQ6ZDsRNc`vy9!^g! zj^OU=mO|YU2hesqPVPRwL24YDh?d4?lJBb{={VfY^L?`5qGKdntUgRcQ64bnYc)*u z{0(iN79E>i7XkaOx!5u*QUa>q%#>U3ZvPWE(r=S;Yo?ILJ;3-5L8C+VttuM1H1CAFg*Z zxYn`p8m`<3SqTo9FsTwApC62>&y4VB?RA)X z*AQh-8lzpCIy%>z;MH~qd^7$U*iSAN%`krsgYWjjtk!REOio*H)i~7sD6AEH&Z#4WEh8d=~z3vPBbp7c>{yVcWkU*tALk zzr1V)=doGf9hA-Ol2Sm)y8s-7K2BpF6CCd1f{VTFF)w{6M#Z*4mIPtiihp49)*25O zTjApEEzq5Q2SR`S2i}*4;)*##vGK1hez>NE`i&=H*QG)pXi&;MRWiYP-B1(@PMw_P z+E_e%DV%+M3dDzGa7vv4c5PS0IdavIvBnGbA8X;4%!?|#h8FNy8HJ!Q$P&{tpToXc zPxu07H~8g}3JLWHgKb*CQsDRN&F(`$-cxvFe;t-A5PB?=t_pMUJoq6W2KCD?QI)Sq z?@SOpsVgGs0Kt!S^8F`zCrgg$Ug^s|ztmv;Z1mXh8*;3C&Ij7K&XgK^DzgbPMr_O! zccvLPit*6-?7CbW!*Tmqyypg%G3f{^-%!NTqc5?Vmq*x>wvY7k#cFz>IDuB99y2@R z$Ra0*SmWkX>_M@VS#DPo+bm(?`X`=Z|5_`t->ElD`K~uxG$w(z9~wm)|9P;5Lm0y# zEf)7Zk=;Ar%G$^TvH!D!;_3SjimQjt5zkcbW)sH*uo}G-D&0ATy<1qpURoSuQB$|E zyX|Exv$2ZV&9xFc@G$Xy5+(KyogvOn>LXTM7tb2HhS7713C!dDHob$K<*3R}$nWyI4jaogF1=wod0f--u}ra%6wQNR-S zVR&+$8P30|#3h=8*@k=5ne<2{eGz?(uJX^N@rM*x_JcgZ?_-bCAD+aUyCZS#_35}p zwp?%sD3PVxwCJXBUx)bF1#(rpCY0HZ1-sO{ zptdUlVl`w@ZK^B$)6oPvc?pE+wZQ!pRh+hT1ZpmE#G#QID7C)H(;n9{2I zG`Vsl`DD74l3yc4d)JNv8M_s*v#uI0`+OIvym4oa_pHTJp9PBVyNwpl@2X;L-(}e) zg_Hc>%UH>sD-+5c`iv)fO(%KYr8tnAwO1q>J(9Ux&=mjK?JxGZ=O%vc`jYhsTzPfT zI#Sga$fxV-bjc?*Dq4Mun;pu7*uSY@e|#^w(6W#H)cMOgAHQHmzvr|0UNZFl;S_KS zKxl~*I$@=+!K_dnlRcGD>rNgtpBe_U-jq?dmM!d>RLs^I%F`K1x8ZfL7Jj~{i9d3b zQ1OH?=L*rsZORQ`s*ni5gExcn^%_!ow1YIj0+W(zVME(>IHD$aie@%}jg1Vh zb-fD7Uy@1Xwpti%`4V(regd(!&H!vsApTVQ#?OXyo<#13D#W4|}F z(NfEQbm1EvS}UCO%dNDq=frwk8ow1|#LF=`=Q%_V*u{s8+6FK3*093e`mOa#JrO~mU=+%b#Y{t)UW@>qq{YW{??2jB_<1>?4R+JBWmmv>)cRpCY zm_YngP1(3}1KFFlgKXHYr|iL=SB#$e#>BwcrwzIETG~N&&vh4*ew#*rlyM&RGn=M5 zcTpc1XExJY!uFaei3jeL6W5h$i~qVkVpnqwL^po_qC1-}(b36=>45%I=~3$xddokD zijrKIvk|bG@{erH-B0Z8N*OU4US~e7TWF?tBVFP9i!3pW zhHAZeNfQUyGF5}Etf+1(OI$jKMzGanNXTAZ=QsfJ`^NCcYwt^b7;WT77aMWf+*hKJ z`&2T-;}F?iyPQ5<6GOKLancYxfjpjPA?l|d!%v@k>%ODwy2QUBiZ{oY!|0WRz&dat zZ}?m!ve&Sp({H;|hqic8>xokpRi`g<$G0`yf8ZvO+-nthX%-Cm2WsKak-PBm`##vF zBzWvw3}{YTpy4$2+Zotau zaHtJW0+|3?^oDWxyU+rE#=Znw6(N_oq=gk{tqlzr$2PY*UhV4O1M zKTCyKmKAih=XT~*F3+xsHo?yIQ_*PbQrv3jgMXaA!AeOYES~fTzE!HAa-<3xS2l2; zR0kH`u!jxPQeuw;cH4Tu6}%wVgo|AtHXSds+1pEp=q+Y%vqrPhN# z@o-rcRJ2-v#n%Kje^~_f76qW)DsOx}eFRP#@{kAG--0UXXISTc98TYE;*SldL67c6 zpds6!@Us)l+d2r=JiZ1K6U@--x*{svQsKUv7J$N{EU>a(15;Mj^Rr9Jcz5S;ZZce- zD~r!dqVL6XhlVw<;l)G9kvjtp+REJh;SGLu#}HokT#wwncS>U3Z6f(}px8b4-E8u; ztW5N7f)=+eGJtm(2VleRASkSqfwCjbeB4?E*kd$6Vt3Yp7%o&L1?I99Jfw;1P5j73 z2SnV<`AvDZ_gB7foH`iWSMuF+Vp?0Z=X6O`USmB1c;)V~0~ z-p0U^x?AA2I}cQHbhyWezI?W2q{!s(Xy|Y}0@D}N!PDr^Fab<4)_EBI*)tMn39 zEp+lXFXyYPW2y6s3uI4HFGSy+gBC`1sPc6ncIG*tj=MG1_g3k=(6q+!uyKF4)j<)4-5L4;S~dWoKa+iva1{+tA8{tJW@bom6y@qini?i-(@Ur zs)UX!6?}C)McDf+1~bpTfU6+~KwjAznj(|v%1%eNTh)p^&{ARdtd&`GvI|=_-J4dr z_@niRXtbR56Kclyr~S*cSy`GZJ8N^09qT#9dUIzo$1fsQBRJ0t`~IN$Lr231or$l!5?`CO-QGQX&8 za3y_Ra*NiTZ=>CNUa?$xGDwQq9vbw7RyK{w%pmUp%U%vXQswHLWu= z@nsWnDbpo;n+K4u7XG4(8ch?KM1_eCpHGhpuK zpRh1V2Xj+iK$=21o?{T-?Ep)LB*AkZ4IJ1#2)nr{ zUWt{%`H4-!b)O02!gs(r|3lDx=1wKK;~}l%0rY*Of?c(yXeRV`4_8^@q`GZ5 z!1oqR{QVh3b(FQ~3=o%a9kI7kCEK;`1`UzTga`{gym!(ZN0gUCQIi3_>bn4c9(={U z9tfSgFC)br1JcADrjcR`SzECr(viLYri`80Q!y%e6s8Bt!+K+ZD<3idkICL64b_RP zO0r74sIypH)_7FBs&2G+^LI1Ge!F4y*=0CKT?>__ydz<|>q%oxGwfE`QjyyjD!4{R ziTCLp5Gy7Np1?{~v386j^WSkDMh133)W_L;!|N7ml5m=?@JWW|vs(0Lz6(23*(AKY z+K6wh)faC$gsfw&J2T&zMqx}URZ#qoj(_-r=6M#=dLg*)+8sxI939v!qZ4et#&h;6 ziu0uuVpfgM8U?anG0JQZSy7vybl ze1QuN@Abu@0~cfI;Kk@|z|dl;0&2Nr0d3Vp84V@8ZzPMTZHn{E9C4xGqg-&$2~S1N zL6bKDXxuvs-F2N%F7yv*Zu$Tgi+5u{RuJYz5S&pn3|$7;;nb=|nBCk9Yafrr5vudh zZlNz88?BDPTOwdupA?Y$a}YQF4o7pf75MXq5^n6e1asAoLTPV2{M3965nJ9s$M0q^ z`yu4i2im#fWnZZ6n1!eBkH@#xt5MBjEq2eIjjkg%qjLRttV#03(C03gvBnxrz0Gje zw=(#AqXu4{>x3y|w6SWUHwL8b!kZ)>*ZQ8pt+o4bZ}3zsUtxmbF<$7@e>1A;SmV9R zxnR068#Gqlg!yN5(6wg*rdhe-%5&pUdO#5~8yjK&P$gX9I0NPD0`aqCG8#U93m50^ z1snfm@b~K=Vb_W*hOSD3TN9I@Ca#VQ85An4Tw4rAWy1S+mnMpW8=$f24CH5sz-ZtM z$$Z-s7}b`?)k}NHRVs#+YsQehkCf;Z6BVdl^#V$IS|MBB2UxcV&Wj(y?c|%V&wK+c zpS+RhEx68QPby&OS4C7CFN2f3^zoU$12%8&kCw~p!MC>(x*~6aYuFvwm}<@MNc+)> z)eey6;EHb-ZNP~4%W-bxGz=!=F|E)G2gOoc(a#e5DlCNDH*;V?w*?Q{Jxufsv|!e~ zPw-XFN{BN?;p7t|@U7YiG`ehx-&(aXTcn4<)1C=V{Ccn>$06~(P}GV^gvk9pFs)Vx zkIdJ>it#!aH(VXw~dJ z^b0=bu7tIj9ioG`j`K&WRB%c{J*e#e3c1-@*s^OTR@@$s2IYo$rLF-kCpg2T+ziQ= zzhQJkl$5HhjiQzFUP94%OL)C>l<-;YF!!!E&d3+xnYF3VV^m6hn3}MY#ygpxN)+o_ zID*~oETSe}CwZOPMgBKvCcTw+fCRN#n7nHV2G2bMcht+No{%ZND~x2y$@gih zr!EbuFCri9qq&-^BDGqpK!5dqg~^T6&_8MrN;VyaS$1~($oUv5H?57{xiOHoE9>#s zPqbkbSqV;SXF}x0cu4U52Cl)HXj2ytBlerP8&9eLqdF}-6QGOhpLts$^YI5S%gY=D(+$GLt{o1|>Du(Ip6DweMF z!9nulQAX(U&KoTfes%*Elq}2yQX^^HZB3|YDu#UJAN<5^15%`Y0vr!Mg{`-h@bbHM zIQ;fB{2X}MJ$D1m{L3O&IIfuk;XVhU^z})el5PZ9xwF8&JRC}QZUM84-auBq z<(9sR@VL$dCX~G3#|GWvOOq=3jMhuM=35~>Q1P2g5b}Ur3&Bl3V=-NlKZmG(>g8*< zIPm=7A~Lq|GU2mK$l5#&y5XH7yEe{=`P^-$AC88JE*)D1o%Oq+C`1YxPBJLHBzP+N z{s1qihC#3n=2@-a&*Q}G{^4Nu=hk+%YeN{FzAcSB-gJ_F{p!p2o_hqB`Wr*RX+Z9!_Iw8XW3dOC1WGMkZZb8?0V`B_zvr=jr#zTgypAa*9FpW5kkHO&wQTIXORQwyQ+87D@qQa}ot@G)9E80*gUB_Sa`-RYm&m88-tnChD7G)lExBhU6|P+!J+mlpYHa}rmFjpBxbxOog)si8`H0`?%#Wu zQ@!AC@f5s|IeXb7@g(+PUol<1`8b(pT~AonS<$w_+ak%7tF*;y7kgJ9%f{RJvyzNh z<~^Z|{pn3$)oSZl+!9yTc&8uxwmy^^_c~E66Db|L)RdWj4Q1)0-Pz5(cj?sjY8s{D z#Ef?gW4HB;+3}aEY~8>~Y|0%UmU3ziyRmCNo7zs;zxWw!|ED2r)T={+_hFBysY;c9 zk9)`qnhx;`(Pnh$wL26N-_j80S9IK{mvpJH^P+hFYZ}({jjq~zmd3@tpaw1L=yK22 ziY;k(_=fUAiNg&CzWwHY{#oUVo5Wq0Zm#W5TN|}#oyia?)ypQ26&};B3H#|2KL@&7 zemLFd>_bcC=h5t?Uex=O7Wt?1T5{6#wIm@gkUlPZK>Y%yQrG`tn8%(_=DKJmvkzLw z=Fbaf9iGhCc5PdUtg%2MXMUC0Y| z3}lKWdr9mtDWvN4!JZyh@8|eumlBgOG(JE6He0N$9L)W zD@H8$$uKr?MXe>g^ayX4aXku!<_|s_|Qug&X+3Dk<~(n&h`y0cPgg7;SO}^ z(%Cd2!I;h;8%FQ0N)xHN7xSJ89w00Ci1<{Vqu;z%(BoleXvq0G>gw7+AAW46`y%es zTZdBU`cO+Mag67!6Z9o2!HFch*M@E{RHN?aPLQ?VzLLKI+XU`I*txW;hn`{8bZvJa zUG5W2jqSbZ)Ft!E-h6Hs?Rl7(SXG<;F`Na=-FB+|RU*kNWnPn|AdDg)u)R+I4>9Zp%Q~2@x3AXBf6VRKtij!ufXeVZ8J*6;XdPt`oY+ zf$Nr__q}8sJ}nP5)Gwm+^)_5qJP^;l72I8mb#TYYXE5lQFK)S+j%KU&W9<0dXeK!B zR)2B8&*jVTS~5jjH4O}we+%>L8)3E8V6@wyjdKr9$8FLSEL=Pl7bYtRPD@ohy&?{* zR8!&T!4!!0ssO)sQ@k|hK8y+JhxhJ`#to56G4@O*J}VLCTn#~z@@;mYCR@nOXUpP+ z^TW{A$q6NEqOo(lE4C1Q3?4fG&7X|NXD#t~Q_~k`9$yX3)h_UTYZQ##sgJoEeQ;TD zC?@PWhB8l9;Y*v5SSi-PF@kH$W4bZsPt1fDH z!*F7XExOh>L&Wo|Fw8Mka26NB+@Jg5Myd(?S)~kNhTEZHq$930+=H}1iUZ9y z{9tMiJoNkmdF4-`qhA~3jQ#>&@GWSCOX2<4Jh<6)4hj}!!^%fT;6vXMn0{9m?RUO} zgB4LQ|N3_L+z|(C;8m#n{e-LVk<=_GpZ?b!M0JWZsmdTn+E8+dU%LnJ!{Z^?wYtE{ zot#L2n1OF_B{a{KhYec~k4!;)RhKYjb(G{R7 zp)bbeT>%FuXK6-=BfKtZPhb`(8^5qbTve4q(#5*%OKldB-5S_|$RGQ;A>!dz(A zO3dmo!pI*XFeA>9t92_t{i7*h9G?m8vy(tD)zzOy;{EyPi_Y zG-fojl(sbHw@h#x3;y661HOS^7Q$y&b@AJ}G=RT*dA@=wjU2R%+J;@Ec7e7mU!RvzE<<6KMSMioL%#&m+nv! zxFT0%;1W(m!NPtz#cv{KgDg?&l@fm0;}{s3G{BSk8t`423Oc>hz^<~By9JJhs7;ed zZb}-NxzCE;??VI@u9K|za+K%E?t;4a^I%P>8a&vM!GHJkgZCL5;7;6fIPqn%$Uj85 zMoX?sq)D>Wx~-UQ>$prmReDgH$HjE;>3!6*`X>3YU>3JTkh^g*+!Pt2G`kFz<}L;Q zHft)`eu)}aX|l?}vCQ&TJZtIDV09x_i(bsU05+EA;o#KEFj`X!y-TXVU3(j>U7sg; zDC`#8(~-iHmX4+B3P0)6Qdzb$oY0NC`-8%hbl4gA9|WwoK(FCySS!8>UqU6I9i9Wt zo1H-YqdL?oPJmX|O;G2&Oz=xhhcTN%Al#2l&K|3kym0l(eRzC%EGDj6jafxsphCWjKPvuM;SeS- zu;ViL&BPSbwXZ`*@ohLEWYxniI%1CH66`IEz>+KeI4x8W9b2zK;5T`mRO$kn?-X$3 zN(Xc}sf95k-@u11J9Jq#83WFEqSdS6SoECXOhX3@iBw11s;e+?ST&^0?vKS+T?B8$ z2y__S3CryIVTY25@QnQg>MH~%&+G@_IpaPYNxlqlCJ+4TFG0nJ7x2MY8KFuU=gj{G zAzPn;jd~aKPI?GG@=n2S!AGUybs9DX9fNHBL!j>*24;T+myYWL=r7j|mmE95!L{Ap zC*ws$wy+zxQ&ojdt1TlTs`}Jib_zWdvxgo%UP)I@DW;;t1p54_6Ajr~LvD_YrQ@IG zS8UnVAS!a`6>Zl$Dfzr~H8DAuPfWf}qq<*W>FfNlbXa^l=}apTna0YIj$|*k`lciG zJTXA!q|-`)Xl$F!C%7j=_KH8?w&xlIoxBN+*99#9$TU`T?+eKLYe1)^7k7MB z1HO180f%g?KygOzY176w@W|3(<;EAG!ej|ZdWf*!vG>5h^E(z$0P48 z$=uFG_Ac@uX+h+AK5aqmUy=W=#k74N)YqC?vl6ODh-n)uq`r6;vp+pKxO?!``e}6*3bF-0t?;pXgYlbM; zyctQRj{)@8oe5#X-L$u#svVs=la+?+u(rGcD($W@m3?;Lo9o zSVG$>LvMqETSdA5_wxjg3isL&Hd$VMA9mveOL}YpbH1HQ^kWQbJ?SNrRKyN~lrev&t0~ zpdP3U5GqSU7FTd%Hviy)n#$1XqN_+X+g|X_K@IkF=Rn)2nP76@Ae;?M2b(8iP~99*_LU-8p+&Fk|K>yw_$b-WhuZtzYFu*K23Ppoy!G#yvX!(i(?qA0cr|32bRCY~*T}=<^{tgj1+xeO{7RI6S zePS3wUH!An&`4MEc;~_nC(M8$uR`reY(0 z&trMA9+fVCBUpW62`aoa6_Kr*kaJFjZR_tzTxh*!UH3Gg`*zKtr!)_tOEW}qanTJl zwkH!^8?iw7ZOhTx22%mKaYpc^cD|r^?HECSO|#&a{(dfd%6i*(f_j1cjjw_$+FHnZ z!x38xD+4OGbv`u?K1I*`*g?O9np5+w$7$POI_;4bqeB%dIWfh3oXo&Z!J!+ckd|FG zYTz@xhhFbMxmKf**Iz5NCGra5%TM&};}xW6bskOBkL4z+kG3%=`_5gNry?lPUWk%> zZIR@OWhh%o32krFL+Mxb@V#FM_qQA2%PM84YF73&PcKcR@&2FSSyC&S`CN$NjrO4> zf6gM~wv|Y8PXxEE>%E|8WGSjwy^MaeXQx$2;me6sYb<}#M3T?I&LFX#7xMdCIRC4)R>UV28Em}PvMU4J~nq@Q4*T&rGq|pAT~V)U3Jq(Zqhh~lQfBa zpb30Nc;WU`I!Psq7G5@`pQeANF%#oqs5>8)ygCJ~$(d03Fc((diG;>`N^o=hIG&MZ z3L6$0!IZn>;6t;Jb~Y-&X_5@O5ovJ5`YK4yXRvkqd3Z3x8YK1`L*KRa&@(*?a*iE? z3vP+9fM+0y#ZHEPe*LI)_XbJj_0T<#2$9-JuygE2h}5@*%CpPC8il~gS4j}slEic3 zcEEzA1k`W8rY}wiK%is_D<98<+Rn)^Pe&S-Yu}~0eVsJ&^bq~;y%cy%8>Xi-`)EZ4 z&%l%aNw1yYeayej;gu=RE?goG&g(^?rFM`;Q)MunG9F?YbYSZDG4Ln9pQ?LT(D0k* z=o0>aqkP~k*Q&3;EjeZ+kSkpw@Uav_%1e#W3@n@Om{Zi+Mo<&42Qg z^i~@4U?shZzjHo+oj8M=lQ_RgHP=>{rU~|ef*|D4W?Lz(U0jxP4!u1f3OTnXfU%(p ze4d|3i$yhPdi}*~6W;x_Ee)TI_^uWj_unbOEHQotLFXT*Bdnv+(VCze$UA4+RiXIV zMj9fX$4!kLWoxcmDNuI5A}HKeZ0r8XikowFC4F`yk)E9VjE<`}FH;R9nFKCpJK;mu}1dS^(9V*v^Jz+6$qs1BoaoX^G?Gn1wPJ-JV zpo$`w9zz$ux*(Sa$GCgFTW#HjZwo%$n@_twOoeqpnV=d$pzoFftoV>d9hODWEd_CP zVQcrJi|uJ>wW}PB$#vsWDpLiCJIB&0o$2tkJqi4VETG0|EZq7LLkqmG(~7l5)KtR) zEqM@!!tT7b3BQ{p2ynV1Fs=}zKZi|WLiiSF`0pz!$8DlX2|3h5QI3ZAbO|!1 zO+*F{lm!b>q~K$+s9?<4#Z<=K2>ROB!I-=zy4kXqW^UL{bqr3??HguLw^`M7i@6(u ztbs9t|9)N*M2noOYwyXSpWaRc&qIsg)&3Z|RXUgIIuuZ!b;qe@ojYA=oXVvwDYTsv zGluiDSFW3Jtc)9&pGiaaX~2OkM&L8Xg37e6rSs1w(LYt0^y3_TdM3P)+f#m)dw>sf z?@16hIO;i9tKmooh4*O6+Yi+Kc^Jxj7Kg4boQvoib5#G!9BounLl2%b3Ooyn1QQ=E z6p-oBHjeIN1fQif(A=4>DBhz*Ab^8{mW&+&+kLJA-`6JuTFFOkx22fWi8gJhGqTL& zt~*t7dvp(SY@HZbhOc*ZR$a?~-^Jy;%Hl>nPUJTIHKZZQgvuXTL3e7T(2diN z(9oOfsO{W4TuQcu;KQg{NK|2&K*`>OYdLX++cGDCOHCX{54q2$XGEQ-Urz)*JvM_{ z{>`EeXU9_gd1nL;RtBinQxeq`rE`zd?WlpS1D&vVAx(OjOnr|;(Y32nsGKmB-k2Ch zzw<02)0ML5S-2c3DWHOTrQ%d1dKbNFBTJ=kZ=wz5Rn+p$RjSh;PvN68E#(tnE-D^^ z=8XsqM1=^pjZ5ZMNTk!RnK!70dK#_duX&nk8kI7Np+_GG=&t0=9FlF|?nQLk{%f;E z^6j+(*(sxF`kH2XsC$rZ8DC8YmhYiU^z5kWVs+~Lv5Ctby3Dz}oJec#2Xe>fJ{K%@ ztP_MB&gMRB%cgD;<+OC;TDoeCJ=L5#hl0EsHTv7fMb&k2>)e)8XK`h^;L}mT1#x#< z>nHoTmF_C^*(4FVzMzIXI_^71l!e@adnsJKdKH(st&6*T!kX&$jc{cu+XQ8YvIVy% z%&`q2;heJcpiN4%x2>DSSeumNb+)m+qXj!fYHj__-?9m7>#Ca~*>C&bxj{ki*kELq z=Za)PR8g@|3l%rmq7qF<S-{aD_9x^k%h)R} zv|Bw|XxTJDm|$)$T-0YIw0mVJMDxZARc`4CS60f3<|WCVpbqdY-XlKG@Lup`Q09 zd}BEt%0koLiNe3>n!-B;YC_jsQQ_#gFT8C*N*HFQBW%0$j-_XvWrlu5%sBQI+uAV9 zQq+gp4rK}9@kNTl1>7W|iL9YeQf8F!M(`izk@}Bk)3-C9wkBryv4VY@)4&=W#D&T> z-`GFX8s>Z8DeLPO7qTK%p^B5FaDt13&~>YbFnih?_DB8~6Rct^Y$;`14IZ+qN~hV4 zm;g5SPdZDV*UZM>9%Mhi$Oxxg9wV#_6%kHT=wMwJHH6oBzQgG|@7YJWXf{gCnJIPf zJl|DESpC%dY-RHcmXatT+@`N0+<*sJdFBV6VLC$?zIK|h3KWGNXHuEfnpoDMb%u%E zY+^d0n!nKB@g}kR&DE;{zyR6Z}E;oH+uabVU{P?#_qwX3zpK*;ToxRJR z-@DIDmp)~k@m;LUtDE`P{APmN^)NVa6mC7XgAub9T53BTbyhVaW5*8k$kha=<*VcQ zt&dQ#!g92ASugsS-Y+;}y#OAMUJe_b=D~KYaddT~2#!Xr$XnzvTGPz$rQGB3$Cbx$ zbx$d7m8-yoqw?$IU_Q0~{hVgy9)z0siEvYWKU&e@fu3fr$L{}XFy+~TQ+8<*uf{>l zc)zk}nF*wHCZiPd9J*393Ff$7=)&qi9MSg%hl-mKeRDIi^640opm+t#ZoxQS z$%p5!yg@r+M+xK{6q%l%61&<^NUH|R@E=`$GPiLr$<2)BeavP=LFy3R7^{G!W}k+K z_E$KCzy~z)xF(ynZz_Ax)JHW^&tv6io+}vEg*UW}A>r9=IQoDkx?EyL^TTs^7fuKi zEr-wQ)OVApnsW~OB@I9MU~(x6lYEr zQ<>_{qhPqC5^UF~F#7^=c0&9jTopM2+R2HqW?EC-MzyP4fld}Uz1a*kI^ry@MZk18 zBX<9_9Q$}$nmyhS0ruVEu%|^4w7=WKbQDOFB_ipIUUBAQ{t;T6rm!8qF_WA)pUwI( zhpC1Rf`|4Gsvhn@pU?S3Zi0u#QlZzhX%|_e;!Mj>BO4#%~|LK z-a(dX0b{?-FOu2c0{{R}7e zLCuB@%}{51Y_7o#p5rwY`@_rYs<7v+F&#Z2AFap;68KISYdcou5^b_60&$)N9J5h} zp%R|^ZB`1K{dU6*`OnS>O1@Ks+ZEZ)OtCL z-}Hno|6+%imvmtz*+yc4CfM1ZpJ~^ip<`DaEN0j8vYr_C>?Xsp{CxZUZfk$D((TAzV`Z?{89Lw+AI z95A-Chfjj9H0^dM=i1RuE!_h^cH9||K64W~*PQ~F)pF?J;%xeK&oq$UmjcszT)}=^ z1O%oXf{U9EfUej!Sa@|lT;=OmId6N|>iL2m{+5ak-L|a@TVepMUfXyUoGW}gY6n}= zU19vH|6rEeTu7G~16}@m=t+Hpy5JsfwB2YQu2J5O4R3_w!pGHE9Cu*3cU{=Rst!+G za0J_~pMdXcJVOdU*5J`&b8*tQpLoj^G3?Ph9!It7AT%qSOE~I<=g}=VaN<&YDcS^I ziYcIFJWsoYm}?0&{kPFZDS5v!>H(X4`F<%q}>f$kU z=1zGyg2qD4%Cfridef*EnkgumxeLtW96)P{Bu%&*i_@EqV57t1u0tO@vAxmu+jIL8)yFc^yzzt; zISGRJ9E`6H(UX37!*hpSuYyN^6DT`$g8Az%9#h`~+4b#ElcY%Z`>jFWL@e>DZS%15 zYfGHH^C*I7S?I9c0f)2>f#KICc=+%WsFd^XyL=fsgSz;7xe_TJwZq+~Lh$C3i?Gy~ zMgotG@hzW+)7!``ra-+fG^XW{VWraAN@L=FTB$an_`$OP>U!Xc7PL zU%2%52OJsHf-fdt!t&vlv1aXUeE)eU?oN4*!L=A?FEJq1u9Jzca3Zbpix9!u!g)}Jc0Dy@5HZt zJaJvCF20=Hi^>c0aYX0^+^Z3f--kX#N?yVE<2oraQG|D&*qM>J_;Sp zF}JlG9Z)B_$Ovz%JBF`UECk|p3@%+ahLTx#@I@yVEM^voS3T^+@ne*TPHPo6Fu=4GVd8pak6SoPaK_D8>nC#-u>L72mwO0Q*Ll;-}T$u*uHzcwNL_L10}C7x^X% zTrF3_Xc<39Zc^bHTh;h%%^m!1{CvFnu?2pp6@|aQ3&vSC2u;m-P8II+jL>c4z&KbN z9xe_BSFr=Ov)c9XKYrH5phz6wjvPidjYY^(zKNDstcS&a?Lfou4ZRe)nNEt70=Tsc zT6S-NJ?qTi$bc5QGJYyI({OB*1Y;aRgz8cD$h84x2^)E z*Snwzr``z6L?5C+KZHF0RnU+t2f!-!0Q59*G^f5B^{kqOqfHC(tTl)5-(@YF)Z%${ zY2lHy|32a{wg$(UKj65>L`D*S!6l=sRBetZS+-u2NU56tDUj+nq;c6I}dg&nFAv!xtv3BEw@cZ%j%)1Y;{(k8`bst?p@gm6w~s}|UjY<;5&9D}Ik@*0 zDK40RDxYiN+N2{;R=16{w$!lstBPIaC+ z)v{5jE4gwLRR{b)A{q5)b_GJh+D5K=_5x~uu98l#vVf9d0$P%8G~#_BjThNWg>`@F zmHL}-$hix)oGyS*s$(F;Lns(@$SiT6GT##<_C(IlhGw93UDu8~;iym1^J zj5yNHs8x7A{e;U!CzGmo>qzKd8zLVdPh@OviX2aC_QW(9p6eJJw4vH>SJm#z$-f!Y4 z2sVCC->=i5ELq5zNEX@7Qyf4petU36#w(bfT@9)Sb7*E$AYDCM5i7MH!F5e*kRVg9 z?&hs|oR5nx^%_)zt9`PN+P4SY-=- zilr91qqg3KFw^8ao%;3e?BH|J(7*j3JzLFtw*&LQ^HeLHpLGRg2VWJ?oY1;2ANL|x zjae|F&<@9**xWE=@8Q43eF=7z(0$Ev`Pw`);VSOgGLVls> zd*)gG3U>Xdg|93VURK+J^{Pl1S;P0xJF8%Y(NB0|+5!e$WpKTx0KQBf#fJ7xWVXfk zVN8Y%%qq}?6lX=qMa}mMg#e0J$ou;io~lg7RI~@X1#mqI`~U zYnF=B-u4!{@ArLbB&P+V**&@=CzswYD64y>92BgKYT)a=8K}=Y20w4T zj-Iydg@aq)LvKI|xba!b)s|1uG0PJCent~M=7pq|sFI$waX7@z0ByXXkB$t-;G!k5n2$igB>V;3 z$LxjpJ$k4+RhvvTNF?G;c|<#NGtr8cAP2|$;stlUqL#E6?4kM%zdBWhe`zen`jCo` zz4?hxkIN(B?_LlMzaFBu{~B5JRzNIrhw!|?+qm6cj?6slOH}v7@j1;kq%+2Y{N61| z%>Vm_XP9myXO^8OM%j!EO-UfLE@}|XVhz&YJCBh5$>jGrTk;{*lH~eha`j~m{Q^{2t)%ZL=&KP3SNwT~gQ1^&b|UygixbrA0u+K0!5>Jrr_-S|)BXtMu{ zGfDMcPj)AXkpq<(=)A;n>Jd_pe=Wx3fyPvlv8e=0G-;y1rS5oz)*8ItWEeXdT9G$n z?a9RR<#?9gE|e|ePhDNJN|Nc@dw|wXfHi6{_ z!l7dMCuH6kgkQFckXfGJ@v37@STj5i2X#%yer%x5=JFA4M13_(^{#|8j=xtdlt6Ck z9z0^1j*XUy5&OM^SZd>GYV@nkrCHo*SJdysay3ZXb6@?CmQ2e+g z0K3SR;kP`a@}T)OI?HPc6iA8xUvc^g+HYZQ8J*LbOrWePw-Av$0p}D z;WIr4u>G<#oCnXD&;DpmZ#}7ju)t2pP_G6hzE@VfECeR~<pk*7`gm{D%%fG>nV|N=iN;ZD;HzmLrY-5v=AgpYT&$TEdA-_MwCa7BQAZ#RD#oC zuPuhb_SSoN#QU9(w%i9PZV3GDz5uoH_3%KP_a&x`;6Swm@;UY|HfkRSE_(eC({vV0 zey75fW9@aF%i07L)BEU|!<*oD(SP9nY9kgam_vef4v~`?Nn}Q)EQv2Tfm{8&acXF^ zZShlkw1N5H=$m(ul6wlhps5PC&;7=8ZT-k9lQ^Qh{}dTWOeG7}rjdj;M{+aoD1PH6 zL@T>$(Z2g?-04-iV7GZLBrom8@6LOYTjEK?TQ!#iwkDJ3>z0yUvyHgZxfrxOXE0&D zA*-~PV&5)(g`rbzFpaMTB;EALWyu}nq;Cq5OPx=`cEn-wq94w#TfuIg-^$`km$TM* zJ{!n$eAm3;WTX`b=MDfh~ztJ{E54dpZ2}m#g4)F)O!A;^I7_60rqdiZlT2doTDO^B{W*EVnbNw9E z|BJp)cmfBPo3qLuV@5hmnci*8dga8Khx1jqyNQEkCS9;u@)tO)vSR)kBJ97Zr>N6p zem=FU2cC>=h1Fx_S?D4yHqGn}44&l~5odMTFF!M;WUtFST^!k#E;)APB|lr_eIK0; zx=z;&$%FUK5_o!6jxAB(=bc6;vNiA1*lf{IHbb9xcP}{s@qMS@ks8kfbC)HFyx;J? zyAe4zVoGF>p2ibgm2vo{YG_i_X94B!!B^7~8qZImnS`N)v6IQ$G*Upr7P3jNfmpsjOz!u+LYd>k(Nn_>rOFHn8XlD5 z2v|a>Nh+y&c9wW#UM0_!GDvXNFx3g12lM&az1PvapUCwljal1CL*Fihj4LJdbLJnE zVO@Z46e*CmdohVB&?kD5J)EYtIOx8zhP;UPbm*s&K=E#&t#$3GtKnlT(K4)!E#i0J z#gohNap652GxGsn{~(sS{W%8}Iu~Krhj*|sy%p}%JcAb%?;v6SdpKf3VVYPxJeTnV zF4q{kxM^_txEE40H>YnG&#tp=9}gGmCV)+Y2ZU#(LnEK7+Ryt2#Kn_2se6}@(_jqR zEYyR?2GMl0|rw$2Br!Vv`7IzUYF+ zX&JD8Vi%x!RT)1^XaWyvvccfgU+8nW0i*jHAVy>#gj%Fh^Ncbyz2Flny(I-vX`XCA zUY1#n)@8>o=EAhmVvL%sW~C2yv-kZG%=x<~yIo?*w!BkiH{Ub?sY`{AUkEGth*@;i zRHlD=96O_A%pNl@Ry2Me>xkiqzFsS=JSb_&j2s{ z35$4tbAFZ|d$s;JyCPk~KDTBtztk0MaoH&L`@l&st8Rvkwb?MKHW&JP&%oA2Pr%!% z9qykqWs9eWvn%b<3@HY&k!Txs)>4vPZd?ReL#+_O-G>KGdGOjV7zz($Lf0E#*mQ6W zcqrAvXO|LK___{0etiIb-4*b{Fq_UlX9w94O3-RHn`(MkQt=dN0UHcPeXZXFeJ{6j z)15_Vq}eddcE<1~yO5qA_{Al8b#Uk9&LRK0AtX|8lY1NYhQ8fC21F0a15=*?Y5MN4 zzS$3sC2fUnk*l=fQY`$AZvlT1zK`*#09q2_py>Hw@Ys+DnJgBbXv9LSofqUs)X-%g z|Iq2@8tH)`{@GK-bGB+P?|=1M5CSYH{28ec-vmKj-&13DV5yhV2SCFq^Zsg`fd1BI*fz4iZW7%DY@I?zLGXH$DU}Ro3 zz3080KJcl*waR?WyZI8CwDb{i%c|xb(@w;FbrU`}bQrJMQ;9X1fUJ|8gI5Ym1=IFd zplIqu);e4u4@y6h$jzd5O!FN%|9Ue?ePTe+y$WQ0*%)i^*{aCFd>m4A6dN44hO7CU zU$=TKX|aDucBVWeztW4y`fdTS`J_N(+nkZ^(JG`38QApdHB2um64|i|goeiuAFV^A zRzIC=d>%;T1$sm)<~x4%;v;_7_5vLkc0;chsNz!NJiOz?YkaNmKDMcr=DpV{WV47o z@we~6Hnk`5yp?I#ZF?eq`d*bLrEZ`y*=xD&d20N=;y>Q+G#CH*A;5?J&c=1v5Nmz@ zg%oRcq5h#$f};&Zf~g|gaZgq|o;zy@JN%eP#^lZ*Aw5&bqNhs4U|l<&7_KF`A?n}8k<%Y-Nzvd;5_7_V2)s?n+b{h2!`*1SOZFZr zs^s|^s}|xY zAhi}*scnI=_lr4$JK?zGRTj3>&%#x*1)TE60$X?Y)9B`(XSCz{1uj+o6yg)?+>e(h zVWPz)SW=e_r@ThN`dzZ%xG0$h>ej)B94i+6a~!i#{s<9^KSEGfJ&Y-8hu`LNnASr> zo(Fdbsy3I>t`oUvDj7lryf;#1iW&3Rv5-}k%x3*HlbGU8UFNIf#U?iUGVR&RAWZYF zK%-HMv?-h+nnf!@D7TBJ}A~OwGOo%9R4)KKG!N+vbI#u%hM>~m> zo@gh`>?R_MQpm3=iX*b!*`votnBvz2R<=)_6|KDqp+~Yozxo!nb8f=9E7cCbNq8$Iv3$0EU0FK@0Z7+~rRpPz$jQANXt7 z8_15&v1eQ5|G);G_48U!o!y-=fe9asVsq$g2!EXqITHRb+Ta32XN$2FlN4F7lRWd@ z`W^KA`DbZ#E+i{WX5NBvY@VqW+va}@%H$p3{=fBbRAMWr^Pm5?L;~=3R#+UH3?DUg zpkK=z_D$DjCzP+lY~MPl$AxB9avFkPy|M7&UKea1rNcbi z#xf>UXEi)SD7ajPO*^K?+|KDU)ejTc`Tw3k3Ga2x3yy_Ai3Y&Z4Pcl33Zkxz<(VB* zS(q+nlLA~>LexTLIdwjJ*y6>k20dBCunr5V!Z7b;Eg1b90VN+k59uhud`d)Flip`I zI$4ov@xAx~rKt=*b6^VEbC^V(9J>|tp3a}G3o{G$@SchSxV4vq>~{??_dd@fD7y#O z&p(DAF^^#7t2=Pe`#eai=YYz*NzlBM_cAwMhte-sL4Nowgl!m$}?oooL z$DdHg>_yaH*^bKjii1GW4pu)-hpg?LaAutd6Hk?4HmelanI99FQs6lDI{h_tTk}i+ zyL7s(@e5bu0`%F>Nsv)@4CbF~fK?}-z{EA*Al@t=W=x*~`R+~h+<=w9!ypb#4LpdO zBhv7nz58+gx&`pQkIzRvj^~*?_n_aR5SqSKa!dL);Zd`Kaj8i*UgubZjoUupvogo9 zUymeSKj9Z8QQ8o-D^}oL9>!G!yrhN4r6^XZKm+bdc<$eNTy|ZQ+zXK+b8eeppKUX_ zlKqFUI{$a;TBJ!I-}_p?`}X?(KEqc(-W2HWxkS~c*`k+e$rw5>U^ue?rwnINy_gn! zBWDyjlA=!*7weLavsPq#j5HzBwxeBf29P;-KiHM?Jy20aIN2N{DB2oCAKp^NGv-&} z*@>@k^^c#(%c=$K5MPErZTx|Ly-h(wi|*4u8`eTnUmOHKjiK)Ko56VLMY>jIz}6$| zH=VOcnaNtsV*>}zKvcsyv}?&iymV&}UZ*|<|1#>JqT-|B?W9=vHJAw{Wk*5r;7!QV zcVyfBf|!+sHhU7Qg4^f4z~#~3@O<4Cd@6$HJ|CS2x{mwdz{woQ)D~gOuP8F@sdL%A zgZ8XQz~A3>0bJ^YK-@HL4^B|Nj2^U}foJFbz%`o=s9NU8!{V>Q#xf~(@0uBt$kt@H z-c&-)Yc;`wm+#Tb-e2g(p9?73)D~Xw=eeVv*1^7)=c)UP0I2nT0ZD$pq0sv&%q}j5 zqjMZUEL{%Vk6oiO)RQ_d9tRo@k#Kp@KKL?y1M%_qAR2GRk%aDNxc>JZEcMI;mlarI zhv>`L_T43{GfNvQwnd^xu?hH3&O59>K8C#XUqdEDN|POmVffB{BP=052`49{V~yfF zSVXB7U(}q3ePgUJbxy>aGoRxpnTN^9rc|Og=|6G|!)i$@FEVMEBBfEbIFkS<5cMje>aEcM=5YdHb-TH-Dd*oyW6&jv>DHQ%K6m z!(`Lb^W>cLC9)zygvf6d<@-58fz%W~P(5n~&Od9>!_JRLUak?(dp?U~_a7t=3d+dU zQ!!-Ev_jJO=`S{}T~1r)?xKq(gd+I^HdyKg#a1r9SdRA<{Ii!KhS!qGw>`;Z+?i>l z)pRv6TxdkDYi`CbmPz6GH?=6T@EA@HJc{Q`oXfLT+VP&Fb4hniAvr9JA$3(QB-(HT zF)j8We?}RQ`DrR-qVi|H*E^X+-s-jW8#Bm*@l#060%g)raS_jS zk3vFw6C3i2lk}`b(*_dlU8a61Ce_|?)?s7rQYYbKj;^J`AMBjiI5`> z7rL92AgT}?1r!qG9;Zym{bmt-hYG~N!m?h zBF2-Sj`n!)-KDxB=LL9{swXilBShjVf4%N|lc#Z|q_OHc*0E5G6Y&gu|nkB`{xeJW@@kz!Mb2}`!%_o^$zaQNCTB&9MMpIn!U zZGE~>bdMN!PWT<$wUpx^>s&73{sTaJ0Y12VhoFFCV4ik^+cD)(?fs#6GbvUvnOa;U@69Crkf4Sj_QHNc>syB))y|Fuv|oi`PCHO_og8C8MqR zwPo-zs`i--QI~{t`gna@5;cr!UF7gXom70G|2w`A@Bt4iYm!rLK19hWgt+>e5#iRi zh-l|iVwa4z9b1DwJLTEREYiY#<$LkDe|gw=TOIaaAx4($CS>HpJfh^RNlp};L0{ke zM0qMYxOw|HEYT2-k7}pmpYvDX_81R5WFLulO{m2G%^1N)L_~<(%{DAhsG%&>e0KSf zKPbN_1eNl=a5I+81UmIPf3|Sv~!Y4qqHWhqvxR741Ffk^4*>Z&r^( zx)bs2=cjO3S|MhI{M_i~gLs7m?;cC+$Cn$-iE!E^vP6Cp_83tG>#II=hx|wM`*93P zx?GOj3QBE9b|=!Z-Hvox=s~J!SB&=cxMOqQL_E&uJU!Mij`b|oXMtHZa9RX3|`Jl`D*H2FOWHB3FgCw7))&Do1(dqz} zWN5>_{gh@8C(H8u`1dgHoHP@T5@k9&&%wM4rp$bB1rwVyop;XgdHjFl*uiWgmU&N^ zWi2fLU#GKx4R6E6?<#Cdsve7dcLf4>>M;NB*6ilr@hmy80)`Jtvu`d+Ol@K}_#apf zMZPZZV%uGKeD5^e)cg!D*VIBA)nY!}F4?x8gWF z;^z@Qf3Sjp&eJr?<2CIw>_uY>Q<2W)Lb}CyKF!a5MsxpA;L}xbGV(Dz-MtUaj9v=w z9OuJwJcD;FSz;;C04)A-A5L1VkJU6LVZT}#+^?k0Z5SI&ch3D(7q%i5P5QV3CB^Tg zPj)6DPeBka)w_q^HMZf7_&SU$Q}D`P1!!P}Bs~{Ck6(I+1QGFecRbfhw@|`2||=B@q={`{G@CzPRn<3@kQrH&XmQ7G|Zjg7e82^iJFX zf%BPa`lY87)gStbU{f~UrmsmdUj&dW{!IIzvN37A*M=?6q#%(A5uhFYp1SS$OoL6$ zxQ_UHg4V!Av~j|Hfv2Ay{u%NBzwZ&^n=hVYDamx)<#kZ-PGy8DBxk_XgxBz-U;=zE ztE4u$DbOh$04HTk!8P;&ZKDRz|LHBgTX%*|NYR5kZU1PQ_bv!+{0tBLpFyScH)wrl z#D=m>*tVEo@ZZKbxUx2nE6i!9t!n!~c6>9rr>w{JfO+R zkY!~Gd&i2yJAUqD+=)hR{9AYGZ+{zo=4TN<1v!xB1@p-!Swk{aH46{;-a;AtY{&SN zgPf1C2i6}Ihc7twW7!X*NV)DX9{x`Uhwqt8&i#!g%GPiShhI@-aoU440B5~U*Zlr@I9WEJAa@H)+C~5vdi#i)`0bXjwew% zZe$9cNu1FgAhDy^p12M;KIc4myX}RQyQE=Ky%G9wC<$j;)#BUkm+(ZHLhSA2h2KU` z!ZT-XgyE(99IL*IplQ!O+mViSXh7|?plZGhC|u8ita%ksO88z4Erms!oWXsM2J{}z zq4HeH)#-_zXx4-Q^n1<>JZEhTTB$ODc2rEEe>0ij$w4g}hl3aB>`UgbMQsc$TeO@8 zY>q%qeM)#%?s~l7iUi3_c!yiHV{ltz4^A!BB0--d$@RPvoUk_p>%9AiW+uAgKfzzn zG#6vEvHh07K(!nD-x$UP;%=Re%(E=RVf+{F8Hz45324&cX?;`q*^CX7R# zamyX&K)8DarV9sgY?TsmnB+t{`TEtbuM$g-MBrRc|+`kC%bqR7BZo+?$@?qd&Dmdn5g4gp}SUI^F z5~|<8reD_~ulfp%%6tRkL!N{2hZ|s$aTV(Koq}C1+rTi0fOe=OY=0BW^X1b|%V_E!jMSEP5?2 zIy4XG6c*wOcZTrtCt~EnqF}r;FaZw}5fU1~>&Q5F64m5P>_1zR$j{BVD(nreJgtwv zFABh&B^$9_%p=t0`WSvau7VB6(zx8y5x7C|GtPE;kHw>pVK;UHCDfe7hP|a&?(9Ll zeOUl53wEUk+KpM6izeFWU}twzl4k`6PKR^|OQCqU`fEmWDGf<>2~L)bL||BaBK6600DC3iP?oQ;K5 z8yaCsT@B29cn_)~Ie30~3amV(0JQAU5#T&PtiWI zCC;9>pU=U9f^t0YC>rxCSQKIDj(x8b;e)0ew*ET_7ijF@7R#z&$4Db`zB7~zj8!Bl zHAm2oHKFKHN*TJ`Z-dXxJB24mzQ*IXb>Zj9kMQE*G%WJDg{$j|@!c;n#OCZJTp$_1 zh5l)yVS2T~lYgpE&TMsTV0<21%_zXmh4I)>BLFW>KZ?Ko^uqOD;&FD^O1$xF4U*0_ zMuB|Z<<^ClC}>OrnzG3b-z*8mX76G!E;)?%4xYie%i(oJ7BKF9Cki7n;Y01#oej&Le`Pdcz)+S9JO>E_Khf_rr*NB;(!^P|2Y;W<{gBb zZ80!&?_zlN*{^);`c&F$DvOR4PQyc1@mR5H34Y@zP3H+NfZu3ExT-!KhGxV?&R&A`25?C|^!gt}T{cuvhI-bJ(@cJ9oD&!tuHu_hM0 z+xgU@VIPX*cc%&c7qDTQ1&+%2iMHMIK_}zIz+K*%c70e5wKlh5vPTWjv2k!O#FpCW z)T735U99kB1%C2I1`p?aMM1Ai`TO-Es&LYP`aX07!SNh`;v(4dXC-WT{nzQ%WQuz4 zYT|owqwp8uXl#=`g`Z*ikhYY%;A^%ujV`eQ1<4JNw=fta3;29y+%hgcB@X!&y%N4< z3sH`GIg;N_QHA|dv?Y^K8Trq&@`f3xt#AQyZao~kZwgXl?r|r(^SNig9q7#LNUr6R zBPVq-uzWOr#aSq>rfiVVWnV(+z_tezZIFegF$0uKJjV&Vc5?@X2gC)?`F=#HQWE9(|b<{<%biw`(2;ulONJhGvhApnfD)3ZA`?K%`tfQ#pT$g zAQtVY(tu+%?r?mDALu8@LD%{cZv4e373Y=X>FX1>xifnVvEJS1_=|ig9(r^PFDSy8 zTYZ&#v%(l`R$0MO^L?}>%^zv@xe6~u2x<8PIc&7>3>J0Y#Sbs+!=fXTD!Md_sozRR za7dpEBM+-WZf-1nF?609raTtbP5dmp^UV``CC|fO-j$=NrDmXAx*vYkXMoIqiJ-(C zgZ4d{!b`%Mhd$mN>6cP$<=AJ>Dj!#l7W%7=Nz^I_5%DHx-q2Y=$s;O}Zl zkPGVKx}SJfm^hYE<-sIq@|I_{^Uc}8#YU_!fPr4qbe>sY3bqT4z+ujJ8j%o5r)aMh zCi{*N_Ek1h@9ZO>^_S=TofKzlocTFxNhC}vGywma543iEGyQMjPb#$01r<42_%A1o zJ_{M7$7igC5!3m6v}G;~2A+iH`<OA54he4%qXRd}wHJp2o_f}vJ>=+qPk-ILeo z)4Ai}CK3RHy ztTZh=XkMYxEs1Wec_?hRORX>)=f_ol%;w%6jN+D=|K)nmw{o4@4xFvk4em%|91R^l zK|8a0I61CH`0z)jaQ?$;r^lB{DrU_J9SWRPZ5AxMQrNa90-hAs~1^WEjAzFIyHrH__SFj_dqrxsho0^QiN4=z@so3gK z4$48<+~SwAv|4UC)oK_^|H<9q!k_BUmtobMkJUylUi>WAYdVQGpLtK`zkN>s<~vb? z5`Frk>Ld5t&!1+m^P~+{1>CwXOKFS5M7qQ&ja!ucMDVQZJ6A~?=#HDRU^$dPb5@R} zCuD;-SO0J>*~x@UtysdKf1}$zQ&jo24bCv!&+|TiMC3>@j zt4Mdw5zdu4EIgHbpWEw^O&^7BpzceGI622Y;e5lI@>FBqt&yI}H8hJOuVzo-JLf8) zkJX^?^3FKnw$3n4?!-yz?J|$f-R;dCG?<9qJ~<|Qt?tGB*4ZjBm|}sJEL1@_N*_J% zQA2VL_QIqCs+^Ld7IpH-<7|GU3to(SB(yQ87DP^eU(x@^x*{$tOxXLMn&8M_h$9Ov zD&Km{n;ZY9lG`0AL!-}~3myoqhW^4G7AjqPVm4TT3TZ@%cGIjtJ8CWeW?QOcZ8DOFFt|Cvi_5Z*ek#3%HgDm5Lty-&~TS3C%FP z$T|NFqD6PqXse$(UFmg~n|5y^o$Nl5^4AUOQISkte&y1!tMch4wS4+KKauWsy-LfY zRA5?x2HaNCgybPrQ2441@lI+Gl`sl+xSE324RcUkG8MwIE#Y*(0;HG7!p|oQ!Ciyz zIlkQt>ti=UgU@alHVT15+r!~wzzMMBh3T6wM?uNGU2rvcGHm^%F0u>J6RAwl7v*~> ziE@`Ii&koBiZo)iL_K_OUo}cgRBb#`^u5$vbbioWv}U%B=whV2D6mRNG_P`m$p6m> zkyxXiXlU~&(VJK!QL)chkw*0>(X;ELMN<{Whzj!bMS~^kBL8Mdk!{yMW>EZ-H9Sxh z1)P@R}#B4r<9G`@{!Hyddo5n{A3Sfltr13#Y7sXTiN6%$63dN zDaeoJFn&W@7Hz&U`>3Btv-y;Rlno>p4lbHi-)Z7&I$oL?< zWozJY*fBPzvW!jI_JP^Q$&1>5DTthHelz`Jnxdn_x}v_>Qlh1+HnO4CS-q#ULDx4tl8a5UMP1OdZwGQwP~ho6`(e&S75!ypLQdhb_7; zEjm~7lZAaz6>U8@O4NGKMD*NDQ*@w2K_vBOvS?tZl*l+*Le%h6Tr}epWsC1eGRr@V z6`@z`=M8bu3!)^NnWQfAP*o5us}L9IPBIrgoIX;dD=}UaC}S#88zCzy^sZpC_c~dX z&qsE{dWb2z%ZVnbs)_0@%82UgKeI$mLlh*ZBl_e%K{W6CNKs1GH?}dloXs8A$=2NL zWDRFNuv7I?qBpP9MV{hHB9mxY5#FXFTD?a{6nt^C=;taG(fWdR_AtDJrA#bi-mbuU z@*lH5XZqRrJQR<#gD%)HyB!;dYmt?oY>C2MS2F3aDLKDD zjwEiBMvuh3c=wtSD()*p>wSmmTYp1rI;H@>&F;hVR7Q}mph@}@d7oWqJTC1>z%4<6 zbi#`~Ry!%OP3p$Ix5to##&=kEiVJ?F>x)&Ja?#_{5pYm; zDP)WiXHNb5nTqdtrum;1`_E<&_J)n619NWTrGiDcSk()^e|8rC8@`7W6+_`~{zkZ{ zY`}siEoZ?&JrL}m!Pb-zmfijqu5^UK%vFE+?rth5c)B9%k>`+JHKR!vmEdrlK1-jr zh()X$&8l=rJoP)Xb=FIBQG&Xc)4r~A8 z%c_=Ivc%2luxRiGv~9TxzDq-)6^)1KW4mePtBFvhoCD&enhfihvzvG{3(1jY<&tM$ z*y=JJx9A7ed+rH2k!`g9ax=GA-a&A(Ru4+EC&5qKbZFW65VZYnLfF7w;L;D%Nke8R z!D+5wvET%qzF(0Z-8vhsot43}rXC0__isZ7o>o#AqXzxo`EFN$IUKnm3qOMX(bn;E zpg<)7F6+dB-PQS^w)7p%m)QlD8`r{vou}b^OexG7>Vn=0UEuooGt678$U>whu}^_B z*}nUZEa;>>TRi0|9CUj{E!J)0+#Dz3+@z1_YHbK=AJ5V2FE)Zt*nQ{QiWT}wcwN6B9yaf5J>P!vGlA(7J@$%Xa1@R1B-(0$m&onICN z_k~I{B^u%U1u8_y;z$sALQemwAUO|$$gLY$*kAIH;Bm_@I@X=%-~~;Ad$RjsCS=iF zDYe2CM*Fa{;U3)mP!VrE|8_rPGC8wi zCr+ek&HY?p{|UNjr!=AmZk9(!snNS`cc_WT1)5&k!MmwO*sg--PZ%!1_i8?)KRe5W zh5?*#+lf(VZD}?tQ}~Rmx=nD$V}5U~86%w4`bap{%o%^Uw-*biDPXS~W001?DQ<~l z1hq1~Pp>a}O2a#gsk~ovML_p0bjWZqn(Ms)MV2qaCqol)tv|zd%Vo*@ssX&vvJs1Y zK8v%Y{ITo?IjmUo3KbjkPG-I@AO8Ib)*2d(hejU4OMjoksZXxq)3y{F96f=5C7IxN z5$c?N%mrw*sD^Dmq2R0-XoWetMc-2>E!G9&panNr&9Mhfp8j^sVnb8o{>vliI?z7IT#u0zs- z2%%|}A)5R|kyZY)V*ZoN*~~|7Y~h$G>{g*GtBan_Hh7L?sXi~EutkY=TFSC{zS}|l ziUq$L(_(E}KCEYx7JIDM2W^RsaJg^-yEJVH^Q&LZrr*X)B@44dUKXq@Q-Mv*D~J0k ziaeixJe&QQ^1F&HurkUUf`U|;KmYwNT|SLx@SCt&K1=^-$|Pp4XvhNk2jT36m$16w z1-w)>fVCz)^zZ&KIPSthSKSEqU_E~W=pMyBEVW=Z^88((SdV>wJqUfJZE$nV5FB`* z1s^9H!H&F3JR83Wj@FN3dXj{trOshjG~L;Yg;Q8o!6;UH^BK6MuL4Jd4WQY$kT!{c zZl9(PT*59`5c?X=@L7bqbvn#`;TV=?D#u-6{O*t7rC4;jd<{RkxR#l z@x+ax*x=+V+^}u~{wDVmdny0J{TX$5dWI2p`Jj%^d~C+HJNs~4K|L0;>c`TFb@<{1 zU5xGo<6tj49I9_cW^|7tGvchV`qFUNd*B|#?cje$waxLCDcORmvrF&;k9T;?Sw9@A zseotua)L?UWQp>vmv}Ybx%sl>KPYVUg@x|(@agT(@Uq=D$fo86N{`xzo4dTRai85EhcO8QOl=)blnnXFly6=&g2j{H(UdWt9V!7r2QcFL6+yA#&BPcsbT}C zHTcN926U&|pYBv#2SIfQVBeZOpm^mBTv=8IX~|`fcqtmd%m;j95gqx+9nzvVz;u@( zy6j*LRl1c%ACG9GV@=!Xn}#yFrm>R?IlGK=s36Gi?=)KKtV_{{JbHS)Gpy+SNlP@Z z@>!W7!KgA>lrpgei7l>0tB=cZFOznGb=@>r!sk{l2M-}PCp~PwLxz)IzmYq;NLF`DCRV3s|dAKOvH0$d_!g}zfj&p9jG4@0Cp?C3TjJZ z@sB&5SkEm77fLzeS$d}U)Q=wYpu-X$v;oLhm1Ntt>#*+^_1KCxN^E7|G}bjql4-_{ zhncN|^j=7@@aE7~^x0Gf%u?I98s0^vIB39X5Bjk?6O>uJ*8{MA=*^1$1v2}C%B&~m zC#Wg-Lcb>eF6hpLj=axY;gSi=^PE3x%Gt%jr)#pKsYNg|(v>ZVo6c^KTX1xFF35%` z!||O;AYY%$^R!;m(9LE{t?vlS-4Mo#Kh9H99pHsf|mubK98kr<6;kHqw*V;nJYqog*Yc+7>}YqyH^A)-$@4#dcd0P zh2Th*f_*=q2VLyKu05T=K3r5~(es_)yVpoO`>PLLAv+RF8cs#YZ<1)v)$O1t%7HtT z>p^xQLv+8HXpr4seq1lBqJ8Ve@iifFkBZhD4u^?L13S3YNc%J6{n4fh) zoW;T=zL&(k4}ptylVGQIFx(j#0a2@BAfo>$*x;iOy7DYcHBW;Z-;M(>wME9|Q;=<0 z5q11E2Qo%%<-0@%FyYH~N>A#dF$HRhL#I4?OGrLTK1_07TX+srL~#?(Dlx&SQHqedk*U zx3=7ZlC}v@SaSsQH$Q_q%UC#iAdq`AQyWx$@6e2+-)O4MQAl`{N^`$;3KY98!^?X+ zz-aRnREby9YnzThPJJ4k3k>lDDKIJH_cUKQ7!3UkZyoo*rAR_jQ5w5mm7FV-0xf@mYO0zL%J(!nE^6aB0PD*qPJ`5zo!oNx^(J zRQd((6y{Rf3Jv(Y@G5K`z5;k#9{lGv8Y=38E2^Sh(Zwz0$llZ(NffH^`K=}}l*Mynob33tjFaY-QLEV{F? z_U}BrY3~WlF?n2O>4yw;ThQEr75Ks7<2bweCKmq6z%4Ky2S0y+ev7H%Y5z83(Lsup z&XwV5W`EFbGoEqQ{h~Z(pC|eZ9{=CBspGutp zO=_tyHqiw;&6rNU8(k;u^8I9+w}f+zb`^2EWJ*@;zlWEpcjCGO_T*UUL_XJJMn=eK z5hd~U#J{(UEL))9JWxK~*?O9>bAQP%ViM+ zyMsKA^(QIE){th~LgGB|fka5YA!JQ18L93=-dwOI)=O3qumARtrLu>Kfv*A)Ogn;) zH6a?bJcQo-mW=f)7LauzE69ieLy}lphkd_Tkh24rT+FD#&F{wG#}{3|(LNHiHx_{y zf5TOrb{so;{KanbTv26K77Tw61nV7|q@br4`#tl+Yr|&4>&#Zz;C~GE^!R}LYH8k= z#$nHTeg^w;4tneK7_qE`Yd0R_uf4VS)ry0-`3P@~?b{4)DPL*ezzHOIF$2%@6XA<4 zkI>pTMewo6fL-OYAb+BaF&_rU+l#&M12H*#vF0N3IQW+Cj8CL)o`V$^2aUNt|9$X6 z{560wwlm2 z&XRCv`$QhC+(erqgK6LoSMJ<(5A-p=OCU&FMr9)Wh3gzQ3eRgvpuypFd`5FE+H8@A zG^Kh_SbSwg-T1#~(}gsgcSoG4$jFk{Z~y#116(ou8;3=TaO1vHnE2{rqdU$H;fm9c z$Y>KBFp%S_r_~6_z5+a=li|UX?O1GQ5PsD%9?R@~LHGN6!pNy{5T11f?&ME}s-$Fy zyZn~cY2&{SKhYGY1UM+lhQ~_|z{L}jU`$gYr2afh zpYIuo4yt~@2>}Oi(7BK3p4CGtblU|#yMy74-~(-fZW{VSKx6hy5-MD30H0CP?8@3| zT5G0-@fjZkGLykB{0BUZ5@&%rpP;7mEZB58!07=s&hk_|4Uvq7V+w!}N@DXF9X zD%Vn(HbbiW$4SR+N@5g7q(sh2>WY=pcJ_QiWkN~&eoZb?EVdpcK(8GUv0wI z6=H;nIxp$U8AN@*|Q^4;>3tlbAvSMs4~mJGRh*ooxK*h@ApOD9?nFO#~&WTH3Toahv$ z3%{aTy6$>-Ma$-K^wZ5#0Xj3RWCQ!}Tw^(APIhU`%KMTse>na{8$dXLTL| z-ra<)nSrqW%u{qu=Pd1=BLmO>=z?gQBWQjwgGB8K;P&4D-E(^q9WNb^%JL`Rxo?zk z(}+Cb)|l-$$ij;j99#ff*<=`-9snl?^x?!OV;bi>1~2ruiKo`oVO_r$xH@+`wwG^3 zuA!}XoO2>)^m`k4nXiP}>yO}wzZ{Futb<9DA|cpa5$J0HwBB3}&8P3dLLq@jF=Ko} z_8eUuluB=}Iz{8p*Ffd=0Z>tT2m!BE*{G{ZOt5_-dzdwnX+D={XC}l0y2jtf=8s3e zYYqu-G~YlkdThYL6#KV(f@T z-Z=8;$29V9LKE){6XKIO8C>_tLzwhd;RB0{_)F7PJmOIS-kMp2{f3v2-z~1hBJvA< z(>In#?X@N=^lS*u-$>LwLdd_$J>*5sDpGP`BEc7@W95%hq~z{K(mU!9@8nb_3*`H8 zSxz=y5vEFB<4eiQ<)_HyN(sJ0-_7a#cE$Fu{^4iOuV8#1VY~UOkj^ei zo;|UZXmF>=*vMS6f&WZ$ZH^+%LGS3tCsK&+jYPBe?Zp|ji}3?~Zi=uuD7;{D3fqDN zG0QV1E6f%V-4mP1Yjaytc_LK6b)P}^_woF`=`q;z&~AL|(_xg}X+U?`l8Vx|FH!9^ zKm1ZGAD4C9#$wy^u-lCUv`TR%Mc(}Vc;*ENEGvd|7ZJEzcm)%9-t8=ZDYop|WAM74 z02dsMApF`$2-WOG$Bpiy`@LIGk;XI_SrGy){GLg3)*jd(eGP6Ur-JSdSMd0Bm3C`4 z3Y(=gK>E&Unr!m`^+<-%2mL9~)>8`UqwB!)ZWhd+o(FdtGT`kQO;2%ex2Ltwymk`M&`jZwE7U0TxBfr^V{4g(D^@2!lMY(4v$E zI61luP<|DZ%Xorr;%WFag?D>TN9>{aA~xX@Vw>ED!BHb0>XpvJvioW#@grGGhZD)#=YIf($s92PK^ivEr<2O3aD(gXY*$NhCh4f zp?#fO@r3PTV5!zvz8~$)B5$2!*S(_Hqy8yO;s6Iub9{jJzrZ+j4?SGrk4j0EWk@{99e^i(C=Ss4EX$f|NXX~wa;LJvRS7YO<2EfSM znaPem$>`=JX76>9Z4n${GUA(<<7o{xDlHv`48MYB`FOTO;K3GFNU^nsouJ<-&BCiR z*o-I@MvjWHgIhnth!y(mlY1+u@7W6VJI}()q!QI2Kbo?~XU{|PetW3T?We?54EpwEQE!RK)c9}=ms%@J6}$-j zGUp3*zGhBMWr|>yO$o@|;@?ug#=)xaFrLen1+TJ`A>D8QBYV46jQP(sWrtg27>|$xt1AHNSvpL{+nt5ITENl`|G?nE zm-NG<7rZ-Bk=dFnvZE&r*#4VynPd{*?TEBvF>Nl)_kcHBu+^LW(wM@&&k$$NTr6?N zx1&OfP5ex`F&R{{df@0!p8YV!j2%uG#Uh66SxKNByZGP*H1w~5ex)g}-t-V&El|ee z|6Zk2^!34UTN)g);=3OOF)(*uI8F8)4Oqr;o5nXW^+l zA;iF)G<>%rsv`un#!&Jyfu^A-N*^{uWMWkwl3z@jbfsC`{cac`+ zL@iE_Sn!;TRle_Vzh69l^Tzk6xSLjsL08jk5+&Na!GD^`nUQHd`{MassibwSr+a~Ieu)UIGb_$2P#BSgP=M8X3;!a4etbzQXR$K)0tFyuU@ovD6Gx%Sq*HE2$iH1iP z;m%YaA{LcT!g@B4P%Bk3-s>|u(ccQIG=$9Mu`zr9?F`I{%7W8xl+5`i z?d+N5;QZ>ar1L4aY%`@&JqXSiv&)AAr#*@vu_81>Bspu;SDaB!y>8R1`}% zi%W_-udW*+!&8rt=GWsPx0z>`exabbOpWgH@c^;Wose=R8%J8ok#oLI#D+UcW?wHN z^;0T|&6VZkP{tUn?WqHnmN9f|=zXerObWKlI|YUz?O069g!Ip{BU(Ja>I;b^wNX4P zWxF9csdx_SXtjWZQNY6no_z<2a6&P;HUcQ^~%Q?TQSr+C#aDIzvSp7?iu!xuOH z#0eT-c`g>k^zk{Ik5^;S^YM7z3Ok|V&{O!7@fJQG?SPy=TI})}BUW+Mm<@cgW?pk< zu#^XjSkfK-mZRas4%{?mo4z%`+Y|;LI0kX$A0feG6bp&cVR{|9EMU9=TRt?6-Ldgz ze^%{ap-v0f_4{^Adxji)6a5U7Y^(Sl2G4+ym1I0$oay`MFoQu^)>x~@R^GE=ak3sv zM{x>UJ3*R#4Z91AXDP7>e1AW#!II74X)?O<2CT(@B=hQ`aBFT0&q9-6Tl@#%VS6RC z2F5~}LOfhMsLf-}ZMHvBeYri}~O{MpWI`C%jWN5Y(Kh*o3= zn=U{}%yalt{u%0cu7Tb4txQ%knQ{qloi|r%qjpjTyuyYl2{5F++_>~FE z`Peqs|B`noj$*r`_cGH}r`WX-Jg?F!1|G~#ftR&q(3PXkqDD+&*2^tfvlWCfejFUK}4F~Hs5v#{)7v>?-@6o&hTAaL?sP~<(urq2rLm2!Uf z!%SG~c}J!_SCj1!Hp0(rU3_eI8(P~u0Zvy|f-cW&3vqJ-wb6{Gyo-aQ0%aC!$9F_d zxv+=F&Ddb4I@`Lo4(?k?vp@HFwuiw)p7CJB>`{L*CCUR$w% ztQ#BrB48PHlbQ4H$*iVI$g-^XpTk!-HhH8YGk*68em&U>$s?uN{w7N{EodUE@EyYz zzEEXv+Em$+3pVVdIc1sd1pwm&J(b4 z7Gn|bHCX6MJ@z|9g%!OWg6_sQptt)r+&F&`_J`EL)aRYxbGsV)Havu#`|iW*v_=?M z+{@2TFX6RqKBNVwftv0Ukk|y!G@}!~NY_H@pgwz+XV3O87P4(mU72dCKbvr1J-e^t z%nmFuVrw_bu-A(3VNULUAQ^WKx}E#!xa2dCk}u0{$?LQ52NL|f>?NGFZG|IOo1yCZ z4Vcb5(oElPaPoAWU4A<9Fz;wxhpO>Q+Umlyn&*iz`;KccFKHHRicka1HPTqh{t~_x zaS2OHG$FB$OyRPYU&1%p3|&uKD_r_o8_xW?2xo_S!Sl8ftJ5if4d=U^+AW{sbFX8u zyTy4r=1>=(5!`{F6wbrx6T+}bUp}_W&gQetiNYxdOQ5&55oV07fWhQ8s@Bj!U$r+- zF%wJJ=d>6nc6#At!C5@eWQ9wUg0bV{FkI#Rguegq95yf1W^tx}Vd{V4Z2bd8$QBQ# zHWm`t)ioT~=9bg&sdbR59R5@6nlr{MGS z8{{|heu(bNFoT;5S#c9!@g`p=^xO^t_0!-#a2zaF$AI$a9;&e^5X6ekL!7cd7NF4_o}mE8HyHZ!;sy_pV|U#BytEP_)>*THY58Pspp1V_#TEY)U! z_31|X?ob-q`t=H0Qk6hI+#U%xqIAHDpRIbgCzU7ODxk|Ju0n+$WFU6tX}J6E45Yfs z!raZ9sisX2oshB(+;arrpO!}_74-=_CtKh^B!NM>2nR;ILIoEta~`vEsDGm>%^?So zfe|utE5r#y%H(@EB9|VPK5k$kpK2d_{S9VG&+pUxDK|itR1yabsy8K2yZ-#gQZMcY{ax zYu`JZ`0grBDjYygcr$!i^AHw&4S<4FC!DfToiyx_CUwR6 z__OAI^0Kjl*m)+C&NQA4Tl^Z8+|Q>&P4#qbX*{R|orBo%QNj&6YQ$;0D|uFV3p)p` zB`w8S#OK;2G9=Z8eeO=l}H1}ib>(Nt$M<-bs6}f>@pJca2N6W^9VnA;!4t! zPm-Qx@#IzVLbBjV6?S?Rk4F(168!Hej(pXGpPT8D;6g-x{iXXL0kd$+hT^cI_XsUIrnn3xS&L(h2toPh!WW!QVGF(Wbt)Ytzm47~mP3hG zR znV!DH@Hh|WTFc;)lcUkn7Ycak@*12jKZ*40RVMatN8rHHb2xlMIBqe$i{+Op5QhYH zvN7ZVzWvD-#rEifg4ihmH=kk2%|^uIpA_j6Q$vY2%JHj(zj0=&IXUarjNKF8;;7OP z{Iu~7UGlFEprsJ*HcX+@voGRh1{pX{>4@NvZULV6O~^Au2zjwe5(nwt=H^|asBAzP zt!keFk7c9b#Qa!rXf%RYqE1VfMG8O9yMi>06Zm&`EM9y<2zf^3ux)n;6s!HBa^XLy zm$4;GRk#kBow1-_I~rm#5W0QrION47!{@)pVNKaV(0E@1`XB0`XK5f8oOxGRb#fbu z&Yw>O<_WaUuGp?}&pLCxVd3`Yd|3ZhA%5E;sJ) zqZaOd%L#fZ=MXfd>#?P7LiS?Nn&sBr0$<6?pdYjcww=-gl_e8kZN3Wl{%)p9ztzx% zQ6Gd?#;$@bVz0qE<^~+>2!yvGfpAs!FkBo+fOg#tuwl0%h&xD8$73H*#6A0D~l6Vx(Ai2W}s;Ah_qg)8~| zm+gc`x;WNR_|Gs39af)*wbJHeFJDP)^`i@QL>S`P*jMWsvMvr?Pm*PVJhxaYRAHd(U6)^o+ z6FfQo4&;kv*^8YMn8piTrdswLT(v&KmL1(NBC{IiFOp|<@DXfw=D`)^SMX?FBUHT> zV+%TY&(%3kR(W+it6Xp&ZpYmMbTSugzQ=*-?7QH){2#yz5zJoP4tJvZp)4*R{`30) zk^iLGeE0zBcX%FVOa!b6z6tvtAlNaY1q@cpGxpzASQ~Kp`2mISGUhn|kY(E7$y2pVN@1~A6 zlbj~~oFL@fB_Xgr04C{8f&L%Mx$6<`^uH`4m=$6I2}X}-Rf#cm-Xr7xQMjzqh=bj-0%)cWe(13U(=2CfoUV#BipIhB0b^#Xti(EoAJN9G zPtc~xjyTm?oXqF{R@cqmg7+9-$F@plJfo;s>!> zuo`)*Ka)&U9Z75ssuKGPrg+#|k$j4cB3mDyAbwK%6c9Dh@J_L0d*1!>yiO`2CN?gp3a++I+V5 z)1ecD`mZDla*C<4e;BN^+)4jdFG8jNlIaA|McOB+gcF2?SWLo%%-OPmq==`J5v93A zdU!8Ms&at7D}^8toB)HCjv%k)3Ylak7`-YM{#uZMca8sxGlNGEe%(P*hP}zjdKI#@ z>lj?oV&G^Z0#%-uAr)>4GXYWEJKXe>_>%(|tYXmC!I-Pd`t3&;Z zUQAY75sM^Qva{Ttbehg0Yk&3QOFx29!g+p={%I4|{`wFltm>sUyxZ6@D+Uy|e#b|e zrx2-)b4lUS(`1+I5wfsXk9?1l#jbMWgdI1YpcUgn>GIj~P_j}Un!P39&(11*IqEmQ zm{Ehb)Qlh(Mk$bm*BWrr%Mg5Znl+xI{@YQ?op;ojALr&)x6?=4>x2$pT5%WxeCq67 zo@09+zx~<98G26!pYpXRdT1G*!g$%r_Iorq=`4Nx?=!rA{098nQ-G_Mho6$i@Y07- z7%PQVs70ngy6iD{S`~%Tokj^!jURQZ4}mqKS28iveXK%#CQGUwhJMQ;zB?U?2d*v@ zhGpLXF`lQOtb2R5JxGESL({Tn|J1xoqgr`v_D02Ep^_I}o*hfhos&ykl9|z38E|nd3Wa0#Gr@V>UrCRMvKOC=jb+?oc4!CCQXC0 zOVz+_mJ9D@+yzdNyP?DUFzj0p1`F-Op|xf=OrEy_4i3+S^WyX1b8Y~Chm3&%fiL_F zKZi5X72M?T9xph-?^vJh!_f_M@Pc(7c8L&FlD|?=g5DUqFvFT|=MKjj-w&J^buJ8TLL{htmWdc%$w_th3M^ zl65uML0cJC)gsTb9j#ebD8HAg-UbJ>k_BmZ{t08ABx8~}RB=aIpI-h^1n=u5ncP7) zrd^}P`YhhS;$L5&Z+`(a2MyE3<-di`a&^HwtOsds(}J@RH=x3%AI5Y!u&cgq%q~@o zXIo!}6W=btjQR)k^pT?|^yzeN_p$#`bS8c^eO(-G9+VQIxzHpc3ductZAFr~B4o^1 z=Bb|yX;z7dM1_=)q-Y+_Ui%WF2uV^hM&>C~ruV%6z^!}k`JA)XUhDfjr_r)W0|VRp z;IH+?c;!eP+Fdf{6QZ=aL&`>UO1H)%GZ!;uUo&DWv1cA!1E^2-_=6??;qp;=`7GDX}`D>1ANC82_GA^5#3g&t9E zFvRXG%Xd6Ye*1idrk-_@3+}tr3+m9hp_kx_fX<5}1 ziD841uTh=4mpp@d(gKw!y0oFB8%?qK0s}%u!87N9aQ~kowHD@4OX5j?z3xYoQ!c@{ zzQ^H>`E+;~uozmOltFKo1~|F27Shg4f{^mz;P1K(wxmgnnk7FV|@t@9M(r6^7v{>{y ztU}r|hsx$x7l|<~r-gGWdx+MpW+8CH_EGM7bz(hMh;7m8BtvXwHx`{^!(A2Fq0j0r zpC>rWvKRjp2R4i#?Ruxll#Dx-51qe=cVYzQV(2Cgu1^yZJSx~$DaV?Pva#2H2$d~5NFr?T=gQI8+`|r? z`jxZa1qv+cZ@4(cMw5;7n8`|=irI^>tjdwrL^+`B_+9RKzCSxypXJnBdn3 z4tV_fUh&zhAd;D-EY|XWB1x24Grv8tO;IMQNqlP zvzXn+736klUPavvL(!(2AzX|&E9j*(iQo4ySD#d^sXi?u#Q`KP!RH_++xAb1iYyPa{ukzL`h{VZDHCu_K3gxVVW#i0k@ z65}($Bx}-0vgr2{;`-ihv_k;-6dh7^$UbKzPHqe#KsAa@G zVgc!rCGj67HxZltFG);iG^yUSgxo3g6#l*aEZCSnkge(RRW|p*B|*i02~o4nB-2*d z5Q{3Q=8VoEBVWmhTktN@TCk1`DTpBFSH_c~jZq}6ZaHx%qU8Pg>Ex1|CV6*pr{KJ7 z9+{jum7H@lBjSJWg)={V$j&Zfh}FaI!XX7+;qRXXV%eOotZ#I8=63(RFtYDz7B|CH zNa*KGKCDg?mb^43SNDYyd)2cfa@JmAvA>c`aFIT1E~``POV%cvKnLQ3g!^y8?U@i5I?~lv=#u%}N;Dc<9@ol!KB9GM# z4Q9Jc8M|owng!goW(8|>81H+Xm6?YMp{t&ei<6$R<0Te2_g6fNZP?42=hcfj5})p~ z!cA5_s6SRk3_+K9s@Ng+z}{sw>_?$5@%ws{tqy#^^4sn+%Xy}_HfSK~PnLY8ibJvF z?QlHzi=yE)8CuL)i)i*+x_QTlBQt-pH~F=!_Phr=Ri@xMgX4H*(Ole#>oC+S1Sf|c z#!bIcaQ*nhIQLdJtaKZ~jHYPdh0g;q#ke0@1}w!5_hw>imMiX*K0nyo3on(eMDJJ2 z(86&(ZuNi8?vFhw_E_7*R8Jb<^g(~wnYZ_uLRA(!_!ilW5sPYc*Vj9qa!uY!MBawJ}TwP@9tv#mWQ&_wc+fd=4qC= z{2)7W{jC@?%wP6mLtN$BThW!i{*98_Z9;{@N|C5>5uMyG8M_l{$?I;DbaxqXd6KKp-K|7bprq;BLvX~lM@xuI99D0&BE*#JW8%uB>ta%G9X31cu6MgbG ziwBFX#4!UrSm0Vqw%)Q$yl}BfEDzOS-Z6FJSnGRYeCSp2xM_{};LjfM+oMsUTh_hG zwHaSz4}va=Lm%XeMH@>i-KKvOY57=|Bt7%rn}&$Rjzyw^MC)Hm`d7|Yd|BBu#H!LV zc#se?fr@v1?}!h|jad1dMNB>CAp3mBlYOatC#LHgveRlxV%fwf;l%nAvcMlnl^w3r zgwXF#D^72_EE@O|MvYdni)5Zzx_#-6>AAx2Sxfp(8vPWGT#H zk7QMnBhg{|?8-V>lJIfl3gL0Ez3fMQtWY}hoou7#J6UMPJ(r0W5@h~KKEnGOi9+M? z{eqYqEu8ldh1xAz1T^jld)LhoviFAx_pcn0(bH8jOK(%*#_r?7llg^0qVi&4+r>Y! z+P&_=f~T8>jT0sc5{F)zW7V7fWk;y8#qZ_g{f}78JVp$9&LZg*YNWo)JE5Cq zZbfJkso47cxzJh{DHMhbt1Q(MD%U6<7WVAuLy8*~k*qG~h@1QX$*1#3xWuvY4*4US zlV)BSf3sOO;Y5(|?_i{CZQuDqm6fY-a)z6%O|C)YhYpe9aUo<-<3d9IToN)CI*Bu; zO%;FFKC7IX38QixbHy1QLE`BF8^lB3j#V6J(I-Ryt|bR!mXo8tTEt}RS3yjR6_nRJ z6T5c~Vmi4hY&8B8!{?-k1LHP{p^3?Y$-FIOOQ!?5wDPBL!Tz$)SvN;m-s6lc<#m|& z%`Ql^=xCL#aJLm^ezqsa3cHX6pJhZPKb?dR96$_i*^(D$Lxjg3qhz-x)yV!%Xb=u9 z7$j^~d?2i!YDnIX709ae*TTWgy~y!vyNP=bZ8BSCLG;I8l;wXiuRNd7ASedZ3IlR- z#anq)yyS6KcC#Tq+6q1hU&Qy&*gT)+bDJ6XnA&J%D6~bQ?X32@2@9Z@1rDk z+=`O53~`fI{~asTO1#CEwwaagAJj*RkWL&-j}WbZ4B{u`k;_Bc`IZ@9_!pueZ#eOm2jK(0xZhKrY4n&Mq^*3|%eQ>t z9A$aUz^?L_7dyG^`)jVYKvRBCViTl(G?AC5o5|f5bd@){>&aE;_K>gr++Ch^(n8)R z%3AL5$y(mWOi%vQ`yba`sU}z7+e?0^x3T=IfrsOxoT2p?`OkG~^wu}7u6Knaw z-6Q2&^{KpR(?EHPg@$}l@pry!xQRUHyOG?rNmt&M@Qc6PZ6SB}(UZ^mPf6Zuik5s! zn7;g@wyk{QD@S=)rn>y-!nZtesiM;YbtyuWGuL8_cLhWz6--HqiB z4UFX%JQU=ojnw2v{4M0;S6j$el$pxgidwnesbze-O&UM&?=1J8S;Dmn-tzWAUE~AT znaE3Y|4I7>zxY9oe)5;whst$l+skY1`^t^Q0-kl^CcofT!{1rU`S$ZK`K%T#xkXNY z`Lr?J-+w)|_Inf&lX zke8TR$oCYg%1eW_6ljDf48c9|13>;*Zmsu-mhAD1y=IeU30j5Y6*8Ly~#7;pYb$11^EKo zPVS!do8PaKvc4-uqJjENwt3+=+@|0!<;&{H)G}-6zwrP-Yc7y!>F{QB8qA-307ed0 z5l`H*2j>YJiQ1M9k}RFG)=j<+ah-3$;%+k(RV&fX!?ra2jyol(ess%AN1EGlpJZ5v zODyd);+QauT(O-aTz88DZyy~>4+l694w4c!m_)yIeRUl@ zinRILYq|L3WxH7YJC`XRZV`7~3xS5aec|ez8|+$p7MnlnII5kE!ufI47+a#i%?tZ* z>$=`JyTpRDd!&dLckCpAE=KUmVHGjivK&_ioJQG4Q*PY-C9W>j;Ax+R@N213HgIM? z+}f`Trrx;8cE|fxj{jao=$q5R<=fY==9d!xStjwe&-LO{5BK89iwt>S#7hiYIS)rU zPsfqhr(s&oGZw*@vV!S{*uVoB=%E~ogWiRs(qoCU7F&k8Vm@lsdg4=`Q*6YrDCQe? zpOu73oSyCK?9s*j%y-yfc4I*n)BF=A&TXnBo^5F)$*)u9{<=h_kZez6zYK+K({;*c z<@^>EZM(28!*a;b{=xXr_CEUcyp99H_hQoSK-4{(g#C20(dbh-sy=&zmIXTebE7dg zGEwHAQ@Ubb$Mv{90I4^wZ%$x=j5ktp2qTF5a{Q{-KpL zPX7p71|6ge2ei^)%Wrg2N(a^OD5Q~9k`Jl(ZrZYKKK=OjH2l&Egssn%h~vayoS)`_?U3fgp365Gz%)15!9CUXP?umKC#?1I31}~Ojsf!_-;joUhCnmt2 z+cALUcUhl5&)GcejyIRO;+8uWc*}h{&cBEif<-I+m`Ix$LjGodqY5^l4f8HLv}u9nD}4|K8{7=w28dr^7Y zWc2@Fjv=YX*g{7mHb3yIXs-H&T%OlL!c+PR7n>At>&l_%{CYAj85W52{b)Ro1U;@1 z>=UeoXU+WCv60ro4!>vQ;Wj&%6SH0T`niT!c0UZu^NOY9=wWCH6d?QI5prM0qcYe` zo1OPPMQZXWl#LI84UjBoIE}%#dgi#oX9QD+%cQE=fg~B$F}<2-H17WlPg<(+3Gbhw zHiqKCAUpD>VGgSLyu^W_wHPveEIOaphQ1A>V62HEo46wpgJNnibKX}RwDXSSc-o1@ zL*I)@&-dVmP5NA%EqUsORiN({$w$7xQ<|@DBw}x8JaH)#6P}*Ob6s*Uu3`@!3wgwr z#_q!zGY0c_-o5$Kn}hj_rP_SU^-LVIEEL@uZsTwJf4D&Sfq!~_#d|wK@m;biwhXVw z17m?lFjt9#ym;j$b>498Fn)A1;a7L;;z6^s_(HFl zJoL1*^M2Em|8t1tvnzJ<(9js3I3k9-dIa%N4)(l6axRWmXu{Mr7JTAE%7>N?;S-gb zamz^s{?l_R-)88}HS~w_rxF8Vd(kl7+GfM6jPIgD|GD^hoOB&`D)9(uzs$lU6WgV8 z{o*=*#6rNHmuzrO{!;v}!9}?J=Cb{^>z)R6Ef(wGt11=R3IU^%r!5 z{=l@XR!A6EC(TgG;8(9q(j2)52E<*2z}TMD^3MQjc-Dq~uy&!1!OC>$jds{j-wvH1 zy~8KuK?o6RHNVm!Y4l_Go#;vpi8p;wJDPrye6BSUd(x%^bvpmT4_H(5 z13Ippg>9b;6s`Jr^Cd}8Tv#qTtoOY*)K}%?h#%4;TMqc%W9o@OK0ao1C zr60rQ(EZnJ>8waidM)4sY)D*6T_;m|Vc;a1?YEsS7`v0kZJ0tww>nVwpsUcQ-HFcF zv6B*4PdYp78!TDoPV@9KsqAJHece2cS_ZGA|BM&WVMpAly^1~syR$H{&WL)&`O>YK zBWX`v8NI#7k2=I1pm{sj(x62nX#cxYY2>bXG`{z8YJAj|_E2dCdif;$m~2UPm-^DZ z4q?>(r#BtfIG8TJZ%WIKxzOuzu5{W94>}=xH07Z-G=ZwqUM|w^&hajEPhU&=>DUCC z7xC% zYvN{?TGA9Y92yPvA)>T}bU6}B^4Dm?!Oypd_k$UP9k7ReMI&L{w@^{{a4xHyvV;vA zFP+UEokO&4WC^#L+C;5s3z?n=6~p`3v!a3>822j}ZR9)fXa97jGHwLB?xjO)zFr_- zw{!#2OrYtk71+5&5?1#{^jhS{v^uh}WMer#j#`6vhpix9hOwl)*KCq(~3`O51Gxw3el#4xI1Q{QOf=98y{ zY_NbM$A6HGubz5R%d853(KRDkO>8pzXuGIKQ(PXFewQ?vYrBcTd6?%1%<=- zW5-DwuSd1Pl{iA79hZ*k#%FbvT!Do?d_|fAua&&0dtoJPOYJaPY zdVd=idEG$w?t^&U4?8}*;y2E|?ZInIT>1V1@#yk(CbQiX4x4>Ofm?2Zy&4+J1Va_KU>cM~A`GZK zA44+mYeH8Zz2_S?R4>9CzT4P>V~>T62}mC0}M33 z!DP=Dv**K3Ryyrz5+W6X$>Y!V!oE+cIB^?b|DK!hQb`b*Txce7Q~Hx(4+HV$l9Q-z zH51KN#C&1bf~%4v`3A+BR0(Mj6|uWG{J!5bvT+b3dg+n#7}`lpbx z*%1()*Dq{ zEW(cBbEI?T8Cbb}Gz0@uhU*mC{5u;e*~^G^xv^zX$A7M~(hStl`ABg4j)3+SLa zh8&xo3He{F!1G=hTR1YC!q(fvjcr zGtCp(pnR&*%9~;B@1VjG%Zap>zEAbw`y71i%(Rk2836*T0NHc>X zHfx#!ijGIHt+X238ZY2QX}^D>kcn2N2hi9d6>qg?Nlu-`m|Xvo=?%XvmTo^PHdHTV zhc>&jz_DtmW%E<4uCO4tZrqhv-cLxL)(aB;TVm%NX(XOEpOZc(jRk(VP&T${ti<;y z7JudxFx}f`tkP$)SX$UFBtLf{tt!h&byH6`S~(YSx~6LKTz=4)fA^RO1!ab+cB&hJ6B ztp)0!{~xWk=|{iUw83|MZQ3nzJtZ2uXnfK^I_2wjy0zSw9y*=_7c&NcPU}0+&$FW4 zemT=|P0u0ziYtTB=uc1Y=}P@l&%ts~qy=lO>7#>|^qcNeVXNI- z)OlRPRz%;0YvE(5-Y@#HP6F`)13g^n6AE12x1NA~Mq6S; zp+o|Fpi2aaZMWl>Gnid+2KzT#p<&2sn3%2tCvTr*eMiOPpclh%!a!q;c^*oZoe{{( z+n>p&;N7s$JR5T7WP*SG2}r6L1-7qqiP^Va=%tX3WB$Hk12eXYJq^$2<-oE0A{f(OikJ7zCf!EdVGGiS;*jUZSk7NHVaA>WGOuDSYz-*_y>GVg z_v$DRqz%i3(oC5TWg@#A$5i^OVC2V1DDewW;nY4B(B2;{{7=BBHxJ-x(`7iGwhGKn zSCC1r9^_YqH@@tUdYH|nn4vQX`#73m*BTvk@m+|AU#%d9EB}MVACutZk3*!bVIHZO z%Y}biLh%+C@$=UQI7yr0{^S3|yRMpeyqm<*l>ZajRS%*;{bRH{Y{CB;yhg+11Gvwi z68)n}P_)d#d-Z>rd**QVuOwYa9xr93Eq(EkSvS7w*Hqqh-3qSg(v9ck%);6rb#xux zQhDKh6N@~U&DFz66Nlqc zCnucQRSo;u9ER=~4oWwdL7;T6?R=;X-Fz=pTE3Mt2lo=JPaebsJ1zNaKUQwCme*^DNpBQb@J#vbjePyxykxL{NHYU?sCryzr0N(m(rr~;z>nmU$`9m zhnaGJ6)o;^X(KupFT-8-$~?Z06R$pJ$%jrb=7SC%#idgguzw!5_{P*4XX(vjyC$#2 zq+4gP{Bk~8N;}7$>Xy9R)q*d)rNsl*+(ANfaMqo(V$I+&EVS`3sN40UMV_YgOXvWq z(04NwdtAbf<-NJ9O*R@hU%{#VT6{@sJ+4|j6cw}0aPXj!^uBWuJ-0uZ&UKBW>7B~- zo{1%UV)vZ=S))X@RkpEZdk&&%eF{E$a+__av?mkG{OHHCa=KugNXG`nP~H0uRBzQa zxH@zYtr;3!ie8X-!aOc0vbqvNkZ6`s6XAykViH z0^HdCfZS4z5Y!$vjS`1UlXiS1H&kdLp8W6#RU|%e)uw;g`${RE+wX_H?rEV}NIOgP z>LgEf*AZpE>*PtFSoVFM0v@?s69-8CS%rIRcyFIUTI5Z3AK!vp#~xo1lR|8;aLSD&EGwO)4N7LrS{ zxo9k3_dJ+q=OplpPnPqQAv(NzZVW~?F2)7dU*qRlgwH!Zg#S$ZilG&saapqlcWm|I zqpwABgP<@z-eVf?(ep2+1|Gy2{-f}Ha0`}%NasrLOgIU7g+tG_;@zkwto$aiP;VOW z!Do%R>i+L|!r&_Q`h6Zp|4>4~W+!&k?Ze+XpU~yxEj%#U9;>cDV2_9Q!TnYJ@!w%z z>|Pj;+f)x=_y4wIM0E*k)%nJ9f39Jd%0^+}(uJ7Q=85lDY{HFt+pxZS4F0++-Gkd! zy@uq6Lyrc}T?}^6FUMHE-69v5AXDDip z(Z!)!0_N|y#df*2vuw$g_$cuczPhHx)q+iV2z!VHJ_YzBJObN(9mZ>IE@*nSQz?4GF@gA6uAvnGW&K+bSFx ze1zqGb%Q#Omn7_cUkqMHec-3=fq>NL4Dv6I~B+D1&B6ToY| z3Vq{#9c<#t!1vWNFkCyH8ivlH@G6@6ja*NcD}Dmghzw9!SpscSKESugX|&^-E3Le* zNH0$a2cJ0uY4ng?RGv~qXI^Weo{AOpL8b}4taToYw-|$WTqBrx%%M@1|Is^oy{P+| zXz)s#|H3NfUkj5$VQ|U>c}sL}z4Q1m_K1Xrj$Fy45a?eq0wvcV&gr5o1vp`G}z7a1I1E* zaFfnL+B(L9?TBLXGJ622pL_*IPuMHDvo3(XS|^Y{#x(AVCLLYS0Aj>>IKFcSC`#AS z{{g~}{_o(-zf5RWN&}_T0Or_ur6Y{|2O*zax zM4@zk9(aFm0;gPU+CI*JF7Y>{9xD$(je;YL-!TcgRQR%_FUQ%PW)(6;@fY!5;7hh8 zJSVFkq*Nx1@PM$)I2dSR0wa%35Wei~icSZu$iup3w!M26j!mA7!^i26*)cAnOX7E8 zBb}*@4%fkP-JZ%S{H#H_I-RJQRI<&7ZEfIdgHedpjl5;g{HdNx1i#zcmY~nY2o#4Y|;aq0&6$h*=zzLJ;nESWely*>Dafc1Qz#A?a&y@T3j8MxPG5H2~SMZS_<==)-{L>ak8j@lIp zizbhUk4GxukgWsuT4cbt&FsRT_6oxb&iinC##-E&>kP_g8buGi5E4=73HQqW!o;nk zsj*@Yi52yNytW;TrIKG@$d+xm!EGlN{+WmqwF*FPa}4zK4#1SIweaPqA^nxykNQ2% z2TilhvZiAeXrLpY=6-_eH61KHEt;(^E`)@O1>o}b7(AbK1dJA)hSeAbDz_Vmj%r2a z&`ZDA?@(JTka^>~CIN%ye`Kj6FXMpT*Kj~_6V7|}35(KR<7nr%XsywWZ|myBqdVRC z<1eFm*$OMJy{Q(XM~%X&6<5$c=m{>2MLg=*g%94Lz`Gr(mvV1vT;JkGb?i_Pv553bt|{I|RWKlpsZzQbJjd0kiD{*3Se0czZ$ z<_Pw{x{Z7dWK;Jsrbch z0Gc@`qD#X#OtO>^5@&J8@@ZUgvnh|r(&I6Pru?Rq#cLGoF{@-c zDqUKP=IkfxEuG50Z`jVoPt*DL$f^APoZ);(1L5Bqr}0J8rtzz-(lxT{fmPZrSR`fk z10aC6z1q#a>m&J%*QR{ljX$XNvL{ao?ZF4z+3=?WRe0Iq8hrb)n!P(dAFulvafi4g z+~m7r^hMt{KS1YD``81&QRg+N9gnKm(94hp8+4SNuB?j z{t{PYNcG5XP2M5>J?k&sxaF35tV`a8U)_7Mg8z(AuYMWsxtD-92cE@*K4-B1*E0NU ze;@nBw_zjHw|suv~ZJ7BYQgcDi#~)^Ug#EzPh0=msp>Cl7SweJ_+XSQ=nPO`t;_q6EHs9 z3C8S?BwE{zaX?cZyOS}Hjd-txW8Q4R_a&>)W`-v&pW(z_?Ai*dvp>MCg27;u*iN=x zo&{s~Ooq*#r{PWBDC%wvv|jllyy(^`41W`dNzsycB!cB!31%vTm(;sGeX(1H){~(XsT*!-WX`nW_3?iovRA{HO8}V|NRp-t{zMpPxoL!o(IIepN=v|t;u+J z{X?YcAMnlU&n%IYlCvGEz#R8L^~+CW^|t;}J$69o%a1Tul})%mpbGm&De(=P|49Bw zMP8-Z$qIb5;P~N1;3;^3o@GB$tt}8i^4d&FHnX3!iCPgE6Of;D&$) zgil@$ufu1-&wzz+Ps(Ox7OxXGjZb0gKSqf8Cl+91atWF=RpY+L$!MUv0xf6XWJ(@6 z?2mgeyQ7gSt~?XXCig00H48eJ$shxI!|gqAdK#uDg@9v&7i3M+f#0J6vajXB`KF_= zV5J2Z{jMkHl!JkdkAORtTC}QCgWA2z0OMtnul#0{u<}MpB_HGqW{2*=g9nY$bAK}g zeQ6`?pELBxj)If}Qg1iklIm-YqhlA@(U`JFpyPM}zON{S7yCYe{xn@`_qPVdlx+ap z*!8fjwirH^+S2t~DczJdjuy=Eq0hg0(lc}2>GLQVUHH|6=DC^B?Y`e2(X$GKqJt zbPZ&2Qm=L}kVOCeNw)X0g_A*r;4ZhOSH0C~$&xCl>NWuOeR&F%(s#(yyae3k5M;Xt z1N~?X-ClHuthjjCl)4vcGbAL`e{OL3|GcZt6HscaO^fIEqV>ZRXnU(V&5qHb5jt<+ z_kldpr*0O>9;(B-wVom61vfo?=XaygSsnVTE!)UzAaHiBH-IA^j@Iz@`@Q z`mr8(s%#~kdKC?`4@F8d5(8?K!^KkNt);nT(sF!#rOxVb+CJP!4u z+X5WvnPeAw>zR|Zztfwp&*@9=2WZd<4R^t1-8tBQZ8rS%O(!ig_6Vb|dO*x$sm46# zL>sgnXoWOW_vNhsZ=1ot<`K-8qRI9S$YfVdHVXaU%0Riz6c#vcB$G|T#FMvUng7G7 zZ2FAdLQ(7uVciOM+^M<&U-jE1v%0VfSH&J@rKO+9_|Y*WO$vq#i!j8)t^4ryf{SQw z^%!ME43%APVCCLW+@#$_%quWQgDVT5PwG2x+E)&#vs{6VwiJG?*or4EwqUm9OVm#} zf+yDuz~pO+xW-Tc);9&omR&gp3a`4-WhTAorO&N!E;bmHAB7XyrV$u4ISak^E8u`- zP9#xC5O$I%nExya`f1tH&s%+Iw)GG?@}UiN_L62wn+fA4JCNAK;eJnRJocB77zl># z%Ys4tcn=z$_M>@kY-zytO<+4|Bq-Mp0L!=+LcayyS^b?I?8~Nba@;S0czun6vqmFe zLYD;T%%o7tkgWs1v>CS zF`I>}%E#hWod~qok{FVzW<>4y8(33Q2Q$W;gJFK>Nw(=e=A}1)<$la1vd5uVKHxRJ zH7~;g^Ki@r_Z&}%qN9<~K2C?|i0JnXVsnzYDP};f+OZOebyT)Dd{{5%yX4k*WVO>AA zrA3tu_z=&ce#VQ34et@vyh_Np^#nXFcYv}%EUSwh$1-bzS*lhRi*v{%W{-x#I`6@7 zhYW(ODc*2zLNLfo&%)3F5^L~f87y->%)He+@$(Kdw90Q~C)%g5w?7)#(6I~9W4e@6 zebtOjkE`*@^#uHHy+H4PSSZ|fp#r= zS>MLNk0#-axnoFhKnt1ZC$IsLQ&Jn{KY+zms+fcs(&t1NN|4@T}+o{Tz zzW9KiksDCY&p*tf)N)qxn7+y4^3~w^(~WC<9LiHy zeaAZe8a(xI7*Cw-&b!%6o3+bX;A)_jQ(^4gOh_gl@sXvp~K===DZ zEA#FRbGh#FKyGlwmmhc)$RBu(;F|^wmG+N@@R5_UgTj4}kC zsOZQe94=z8g8?6A-=Akq9?Y{kO?cpPJ>K@88&5Or&BOoh!Kvp0*s0vz_`%qS+g54v zGWRNMeBYHn^flmOnpCw=I?Z8iXnsfa%m(kat2PTg{%68dQvX*`ldy}g0oCS(} z(GUZEXnP)pzKOvplN+$N^9$^?_8v}i3BZCXOz}c?1q%!wQ%Uo1(@&;L2sopjXOJ>y7l#; zK{8Y7bN30DTWWxS*M!h7<1wvLr;^<2PXb4j5T!gxW1T;gO$i5_0Mcwh}a7(ltrwPuJ0EVRAW?vdFNg1{GxM%+Z{Ovc8z3#mpWaX2NmkC9J>DNM=k=$0VaR93@lXjTH+p|9KdY{1yn%Vi4Qs8r*a20g8*1 z@yU5{8r1e6(Z9+{m?2hR_tb7uI{#{;csKhqI(rL`BGerOAwe0Czew&o=qQoIgVzYDaz zVl8!DaF}NIIZEq;_E8n(4fKO-C{3*%Lo=t$rQ;v%qQB1t(3g(J^ybDBa9T49a>gNy zy!i_rfj zr7N_YP6GcYk}N>tj=rl=q%XgIgos`uD7@Vc%+{Q(o9Rdt6vu*cMhJOZ`&{Vp?U*h7mLATFcs6xj9=w9TRz^a{3;oAJ)iqi`=}w* zOaB~H){TO9mo&k?w+5hL4sq$RoW&|JHbtohKCG;RTb7qa4gO z?Sy>?y~xsO=b(3NIy8-FW;1=1A?Vi)nDSKGgBf-Yk|kf{osU86e0rGd#d2wm8~qH{ z&zJ-rK`~(b-#w5|Q3B5o^Ps|ODGd8I1}=ZU3I5IpVc@>`Oh=3mBdvyl#p+LxUY!bN zZ7MJ(&j#8I!(EEU$;Isz#^Bo4AJ%DUg3|y2jz_0g+AiKoY-a~b&yp^*L2|$u)lDSb z!)LRvjuEK;Iuf5fZeV>+{}PX8xMQht5Yv1TROW@NU#Z~5r~|lkxWsH&YKPW-gVALj5%H;Yk z-?^>0@Y7iI^^-W_OBC5^{dMBp;s@fY@JY<0Y@ayi$$Vkv6)o@&nj@ix17V<)sh$;C z#PolMvE{~diS~%~FeRV}N<=dBMqb_aQF)7fc#?6RKRlke2Jq zpx3{47_4GXXWIGFjQ9YWH*EqXg(fuov?{&qyablkmVvo(4LJ9k06JYS;@s7txW|48 z9E|TzcRNp|Rlg?CiVyZQT&*iTrJ+C%%ML?P^eZsg%b{RfJb8FQnICvjk7r|h;RIb% zI5hwvN3|063|I|6X4rzo!;4`4dmHHD26!kNL#zw6q5I6k7}Bhd)6_K3w#)|G?X(fE zzh_lL%*nI(d@^fEB6toJg@UIW*x0ET*sfoZh2%^=RMQnbpyvEO)`7P^ z12!DK3l&7aYBT`9CT^QQ!-V`lP)MOapl0Jgjk6O##MRnAh;qF>-2zcQtawB}NRQP% zXVASqU^6VgdPzCI5>fR(w7gD0q>w{Emn+Rb8ZoawjE$8)8wj#K_=rb8eZGo6hp>b{ z&&n`AHrqu!dds)IRBi)6jSql831MA8ZuZtc9e6E2El5^Bw_iy=Srf58*mC+mRM-eW zS-8?av$0-3meuY)n-!x!Qf3W6Baa0@@ivM-$W#!$`IN^#;1MT3T(p}%`E>k0(;*B% z|J>6*ILUlJX6aKuahn%EX3JtfX7)=zYG)%qtju~o;)Dl3&FeZp8x&|iPw19EjtQ$j Pe@3N0&Pa+sgaC~{y6W^A literal 0 HcmV?d00001 diff --git a/ngraph/test/files/region_in_yolov3_mxnet.data b/ngraph/test/files/region_in_yolov3_mxnet.data new file mode 100644 index 0000000000000000000000000000000000000000..7fea67d7062138100d2e0d637e82a57054e996f5 GIT binary patch literal 307200 zcmX8aheOS8^at>Yv@{S6C80ulXy4~l+NnrWd+)vL-utmvw(Q8v%E*km&*5tniHt%* zNEsQ~Y4|9qBxmB_!$d5%KTOT`b`!}tc`#Bn8fKnbP5kr4 z@L6yXZrv7w5930q_e^hA@kt+zR6^AoEloc6<}YiJAOKNTO_5b1D_%}s7C@f zPwMh++nM0;0S<@qEO6h5jrNOg*uwjZ9GqG+1J+#HB?_!|#`ony3|5+ingKE-ug4qq zZsx-**|D%Dz?n8>1mgYk#Tc&YwRHX{~);S(yGP7~poHlAR zyOum!o(coj27Ws=_dkXj66;kFm?c>LfVYJc)G8L>Mb+`A)S>d!^wM(9XPOe)5m z`au{{fg1(`x@Ugd)6WmO`%mD8cFii5B-b}%Ql-hNnM zj-p`>&smyc%a9Up%VQq66teF&Sr`1Dp-8374QrKzc%WDvr?^fer#8%mq&uZB&r1Om zR08SSm%%urx(JQ-iQ__*m82m%3O=04gAUpEq~`cR`V|sUp(_&`rSDROnk(e*>~z>P zI|Uy5ogt3$9aO$J2d}inqO;gmy5Q(9GP}J18uy1mcu)>0wUWmE*b+=X>5s>sSkW*0 zmBDbF020OKz|(?$p5)re82na(#RjbPeFeF!}&xgUUC1AZs7Utegr0o!j z_3H}JIB|g9y}N-7ZH)m%|6Iub+DT+=Ptdv3lhMj01DBn>LFE)~5!KHb5Rs4sx0{ZV z`4(N&>rE~msE@{0-?g-C<1lF*Q3OxC7r@AxDB>(V8rOQ2Vz-tbS`X{crQWLW_M!kr z+j+p4^j6+o`Du8?mBVFKfMtR9q6#pEE5F$PC1n=0x_H$eer$t|4IGwin2Bq;H}DFY zXG31C5VlU%g0J63&>&TB^l0Ma3f*xSGy;gh=>W(SF9C__Q7|gGnEqTFjsf8X__**p z6_o5CjYs3)(W`9Gy7q*WUOrC;hf*+Wa~jU`712kb_lexHELc{M0E*W8h>?3Q)wa#Y zlX}iuhs@AMY&m#ZqNu+WB!J%yt*TlXzFq4y@tb zU!;d8HnUo=+aC8^?Wu?5X3+nSgDw*@sHpr^KjyPN)~(@i`E)&ukv-4*dD<1?ZH17v zb|NJAjHmH8eNju6kMmL$@#O8fBu6(G9u5`3P?H376fdJ5w9jFY@JYP{G%^F=s4MIQEpNVF{!523ti+n>hPaY%s+mm4bwG2=* zZzMn0-K0ZV894rT683~vQ=OZy$mgP5kdKT3{)hFX%3_dytuDksiBPQl6;Ey7%7Tq_ zDa5_?gGE95G8g&C93 zW1kQ&M>%7V#CB2a+Zm7|;=m@)7RsC#+Y4@4p!r`8pLZGKS5+fkhys8hh=WAYG?0zB zE4p&R9Y?kaaK}hBY|NiRUhJL+rwdA!3E1>YLq%r>U1}7D1``T#qs<>W_Kv9X*5pqfLo-$g9nn@OWP$goIX++}s{|JTni&Uq<1NmQ~cJLkvpti=ZSp z1n!lGkmGCQ@%jEz>`|VF$8Jrcldq|Pg>S|#t+Z)b2>DFm0ZnsC}kj`kY*pyxt9S{)gK z2X$OY|EWM&x2qTqYDvOnyM=UVegrmm2Z>zTbI`r{CrVBP4_{VWK*0o1jSx+U8=_g;ETDCy&|PLBwuS z2xP1;f+IHK&@s4@Rv(K(uMK%ubI6@%^8oJ0B)P_*Zte`Z>Y)XaR4zqXEi4=kS7)HQpN=SNCqD6@-0Y=hsF_*z$9D^0cxi~bTlir$mk_3k&L%&)ER6M&*guS=uIJHcSut-9+Pe62u@H@H`hC1fK_Q})8+Y_pIut9)Q-`vsQ z_I=Urlxc9pmxHxI07w2<@a~rzWAz^nzmK1VaU+xMFHN@vkN-F@vYZKn2g*g#>dv^a zT8OW%Pr^G7B#B~_Hzd{a;p2_5Aa>7zp8FPnOL~g&Ki5%s#H^H*G=@X4eF5w}_>JU; z@1*rF;xMHy8}If!rdM}fAS#Bb;FORCPx9-@dg}*N`eYXFaY{hR;Jx&S$|rI)EFU@o zB0=Zm5>mQt1Ugt0qjXgeE}G&^%}tcR#+(mk_P(%o)>zW-sevB?glIm|6_3}P5b>Vt zLBUQAW_sI0%7~}-lI>=AmDTuT?WTB^O7M2^cyM|d2lwyjLf*|t%F&pj6R*B-B^x;|yJA2J- z*cq;5|4PXc=X~Q(W1|t?Gxg&6&US$9795NXoC<2~9iouHbp5HV^h1dZzp;PT8k;_fbo4qBz` zjPgSwoI!tXQGvnj0{GtO0hy&8Jp0J0Xf=yN72<#^&U%Vkb{N6xZyfl^Swf%q*m`Xn z8}x1AaE*;V`rcl}+iEi#r0xm9`hgaV$Pl9|*3UudFMQnadK}g)w=75Uiv51zp;()lU!&wZeq|Ky?3zetSqSx$4)B%rdZ2<}G(!>xQjGEPYmH~uQcLdm%(woQei zY5SmUSx)p28Kn}aXDFy-U%8A_--&zs~_2SK)3F`U~lf}Qb8=!wus z3=PT0Dw$7IDse9v>zV-lN3)=P$^)``X+8B!PQyPlQgP|h3p8%%F&Rm-!R%Qa#QN+c zTQ7a1JD>npUJu8S%B7U=JqlDm6vNEk0Qh;zfyCVzi^nhV@kg*XZa*MNm769(J`4f;!65yX*p7_v z!JUS<;RA=!|19xJf2v)-r!^eyVCR>e0l2*si0Xoz(EW=LSHGH!JC%O%mWjOJ(NqDv z{;LdrhL$um+aKSRlpueLG+Lj{BKuRrVAk&f7?=8++}7Vh;||5*on1LN_F)IjT~b48 zCZs@VS~{4PTqQ~l%`{AuiHkB5(XZ$bP4ak0_HD|8*Huxl@6$^1dA2yZZ7M=J^$;v; z2%^28<>7v9DU{!v3!%R?NW8W>4oe8J?yehZ+g}qwvku5)aUeT|fIL;lI*~znuFr@=}@6|Ks0u?3;LG|@sz(NT1d!~o4G!KX?!Sg83S2^ZdC7U zAi|(L@uJppmG0?k+ z5AxG}AV*4$aF;Yua}}%6ea?79vr@Fwb_N_e!@-s@cF?$NkNssm3v3(Y@M@U}wp`ZY zZ7C#>mB_)RR2|?f8$??sx#O>I0-RN+hD~25kwUe3ki4%HQkTlZlDZIjK0E|BOUnfKE=F_C^(sR1- zdJZZc zEtZy1X%#V;MvCC<{Scrk;beTvXe=u#MdR)BaC6`kI!!?h8ruZ0^RPR_ere|AU7LnM z!5pT_0v;+c6#f2e4Bme^P`qjZapFtrB}#4aRz2%yyqtkv(c5`T;+$djURDQVC&9%% zQgr8GZ)`rq$2GBIarnFw@ihyC=;mVh{zekoMhK|)s0duDQGm1V_tDL^yGXopJP0;r zL-hE^H0c4EccMKv}Hk?VgiiLt0V#6KT#^kN6m?m__2904H+H* zOXn6tc482i&G8}w>y_|=8y|hY`=D2f68(Bq10-XGFzKEP%x*o(qZZRqY9EJ7hV4*6 z@^*cdnK=Zo8t<{x6c!K3hyvE}P)x|-wLD!6UtG_-qo zCuc({-IVd5ga9Wy%)w_}gCc#K$?&vY2-muuVDX7%yw%(FaaJqq#k{e`ys=$%KNBtC z;TH~EY>dDm(@&JQ$pJ6Ux3o89?!u6K~^ZBYggY!?TxW;axEk`~B-|ps|Sq*Up)s zkhE5$wPH5@`cH^!3bk;Orvx!>odZc7eDDyAgG)XU|Dz0A~OsderKOrWe?VyIoMpR2h-Nph}6?u z@rJ7qE8b4TeV|O*mimIOCLg9Kk;09w5xbf3{U@N4En#W-)MX&6CD+ig9(_r90t7xX92O3@w;7m7F zbeN$_o=o+FqaLMDI3x${_oC>k`3q1>q6otl4%7WfYf1Q(Xc%~z3%~WcNb!rKv>`PK zojzvZP=6zJymEuIxo5!Olw_Epe}c%3d_^q+b1`~T49Z{FK)>e=kR=-n!P7nz>JB85 zF>7RT?Y9!#Hf25*j4`BNo=<>*VgU^Kd&0PlZ+PC-Q}FE+4sYIfz>i;oMflAS(m!z! zf7}ub2hP?d?y|<+PdL2uRv!=v5L0Lv=D@Si92CW9 zef>o{oKnT^{7a@|d|MSye3J|8&Je=ypBiwYS%L1q?1KYAeEjfM36&mtkg>)=;E_-a zm*$Ryw2_PHn~xDVYgaz**!`KBi}sK)uj5(X$%5;*9ul37%d~%N8rCgI#bl#eY8~80 zw&i4lGsMFL@d{F|*GK>RmyeSzBXDz(kZP1kf@^It%t;M|!^X~Jhw@nbP|nAPo!(e) zCquugOak6kA-ElOhOJS%c%>U>U|21O?e(^JXVixJZoUOn7735%ZA0x)je(mB;uEp`>eP2$TgDfm@0gL|t1= z{WYUe$1@L&*LTx8|HCBkaUu-HXF~hJCerGBjoRH#$8~3uanhMnRJ8g9DgKZH)HD`8 z2X7`-8vS(8lHJ`tg<|Q;G`b{K1`eHIHU8av@Cr90nGNHy&P{+ydS2+peHJP1*M_T- z92}~3g!rTkUh@|N4CrFd+%79z^kUFDrrHW#zTrT(%K&zFWs3U$bHwTq95!#!#;1dy zd1bS_Ak#$v+ULeY*)B7By>C9Y94SG=KpE7uPbZE)L*bQKA;^FKL(B^|)ARPR$bXxI zE*URq>x)xF{BklRUrmSIde_La-AyzlBNO+xBx0T3VVZcno4oMJgP}3e@crOw@HLH-*az^th99CUK9s~mkIC( zb$AXACU}$C-H0>`%op2aKc(3gDw*9KST+NSCRK>yYn)NES%{0}CSi1m4Egif8v-}+ zVYK*IIGgTFKj#Lb)Y)R(E0DzIM?&(47XdK?`A|Oem6&Q(&}Sa;7?_@oOYGX{qXo63 zcU3C19&Z{gB4~%R-H3@%Ux^9=hSuXL77EAB;L9;OW~%L_K~aK20vh#kxUw zKHr0W`=$hgp?vVI@d4%a3go)9CWd7R@%dsGj1I06X|10QQHMDgsI9FgR?TLoPT-qi=i(njZDX7l*BA#8G8- zcVj)UK`$^h=;xi>IvK}xGb`BcgoVb7MT`6NA(Po%-Ue%MRz6}^c)=1!GrMztWr!KQ zA-s%Nte>IHf$H8V;P2BVs*do)g9WU{-=BcjeGSQIllhSLr36M7$%5~zM4Iayin(Ql zm=ZETFTUA8>UYF|*SuVCd)i6fxu2jd2Fa*6I|J1Y+@LpgZjsMlGoUFt2@YR5N@hEE zQLCO@d|w-lFKyS-Tf2sd%%~zzcUu64o1#eEn9+E0PAT43_e0Mn9r|LvDui4TK)jU) z*z30P&Z|trU^fmuD*-M4*oo9!*?pMVoksjDSo+Vq{^Bzme0`I{XUk{eE}e}${-fFO z{Gt$ct7(Cz?npXD(;GM5;^P_Zap9C)gB*=?5W?o#teS3tZSuj27ZIT>*c@MqdBv?E0gpv)Z;v_^`a}BwGl$Z@`+HO zK7mR!`J$yXAJY>QarLLUB-Ah%h6ajY&wmo&v}_rzxEqO?%K7*y;Ug_G+D~p*C&1*8 zEO?mKLL}`d{eC?S{qLlpbJIC$aQZ26k;{SEnQ@@CW;>}1{y`fS7NDC?I1XkN(&-bV z;Gsqd3?>G^b|oI68^+nrlVluNkTcHic z$Q#r@x;_h1e{dl6#t4Kt&LX>hz>z!-c}J(>x`?Md`!Wxhc31$1x>ewY#B}PO8Y^afCHJ84sao79`C%T5gIVNa|p7;(Eg8hd7rHz z_z?&9*672^)a4?HJ||r8RERrtC!=4?5U<#04%mqaKxC^7JD%In>L332-n|4*G)tkY zdmizA8wN*f3n2gIPcrh$R{BFJj`ezS@ZI!h)ZgVSd63VHe_T3nA|$F`?^2C1S?DO6 zh-sG&&`Uqwldq5SAj3Qg4(YBS35pU(nu_p1MKC_p@TX(XDnQtqQiye)3n6}LMAWH{ zpP1cs+PY!=^*Yh@g}T6T99&q!gGMnKUgj24Ok`R2khVD<7;duX$=Cy} zPcDGmzu{1RlTT9nB(eNKF&>@~h>L@r=;Exg5ORbMX*;|@Axesnf=T$6*bqnpS2MQ=&)^doeV zY!XJWw7%`2?`F)$DK<}sQHu$FeXF*zn^Hpbh^nzxii{!0c{InE^V!D&YB!aRRx1Asn~X29*onMCHX^*lsSsI=}I#m}f!y zE&Sov#u9k;RT|zYWYRTeVYp=+dnb(l(7v#8@*+1DN-+ndwmv75YEIKcZ5zC(U)$%M+?iO}j?Mb<3pp%po-x7ZPduSBcpwr(+qE-V5vKLkXA5c08H9_0@) ze^Q!3B4l-N&lo)H>`Efe2eN*9F&L{z z!VSNLG`%e??f@@hVc9gV=_Lkp>>X(Ti*Er$5FfsjSr$gO}eD4WIpc8U**&W@&f z(V9>y5JHZn3vAxCpBMUmI;x-G&^y!)A4*l%OFc1%d+fVC>o9?{e^o_uj0p~9a5$an z;LxROyot};V1<|v8kE&xzl$cVeKHqkoiD|#7xEY+8BA793juCj5wx0#L({lbwBTeE z-dvN1_J;3h^X^0BcVQy5T+D=<6ProiwyX3_ZaP*gq+oM=4ZZuTgFN1w1O64U@K$mQ z!R5bc>RbAO6jKTU@5eL`sS zbb_wee4f9j0UAB$(1vG?FZF}#GGy7D8ne4m>)AalIaQ%*MLT$8_1z3*@$G zD%ixP!G*|rQVkF2rPEoM!b?C2kG=G&!6%{{l@AkqB4KCM65_OH1om4N&GLneQDe!*APo!)6r$+_S8Vb-A+qV#gWz2pEc3L71EZeUuX=5UwXDY5 z+%rY>Aqn1RCm#G-%I;s+bV1kUk|=E06$Sc29M+wPGk>a(W8>#S$&XU_BcTYs*ZkMuYDX2RBfY6^nViheFe}a9}ZSDpGfkh(8sa__vi%R0yR5o);kW0`uMPC z$s8zC6eC}Hv~XsN5W|gUqn*Jj5%JQ8@cSJ6HL-z33JUf_%@P%u-IXje!kJlKycO;a zaKxO0o;g!tQ}A<t*_sRMQ(u#pn~&3~J4tw?JOiy98fe(62I8z^w$1d zl-7;GTYuKk9TSF#{@p@ouUi01CdQEuK61ETyA+r0^ur}EgR1XP0i~S+*n8CjZfJDy zN>Zoda%OigtsT(+r-x|iek15$c2_QK2}cLU)dxG;;In%iUNY3jy!WekXUS}M)hvYG zMlG0hLX389oP#Z&`1s_>IONXTk)3t{;9*(k?^r_86m}?}2>@Tj+BwfpU_evL&nce*| z~UU@SQ`y)!x zF?2L;w}~X0nhW5*W)Uv;*pgwF@h zCL=O`$pkb`5nxTZC$7BHEvgBa0z1?=$e8E|uQMWf^Q#Q;=m*xT>9@p_orQLG^Q~bj zv%A~Y22dC-6qQ6eq1k648nm)GHRE4ApZ~l-eu@Ar`jp{)swG{S>yNI*C74KyYuTcn4`t;Df^x*J6L#}gVm3x!`|Qn zq9dLzC{rTDY3`bMbc{U7De{49DSYOZW8i_UJ9YgQh$UPx?mImarR^4xgS8P*cR3%P z#(p6q-|eQR=i)IOm;l_HdN3TnP17F z&U`qN9>JVtA#pa7M7Mp#n0F))_w=|>u67K(TEd6QNj@;}Tb9(KCdRB5;TUxV=;Wa7 zy%mg(xMKSt)(RDw-Jzu++IA=L-UuDx;%E*|_G*Lq;*X*^eV(}3Uw})bC!qLi6Y}89 zd=O#@{5c^5l82ey)rDfOZy`Rt&`+CBZ6aq!$Fdrg3u@LciT&tPv~Ybg_V%RX?O)fZ zTFY&6#5EIY2NNN?_b@Sj^OkxV=HXnoX!K4mqs`OAKoyE$O>+o1y$>Uj?W1v7X({$? znTO$4Q)scW8a#d?fJ?jGLH}zrZ{e+J_%4{k)_(-E_y(f4%OKiO)QVLNkXxs zfI2d}3sNsY*K2+Bb?`1CoEQ(wncc}qJtlisU8G~9Q}M598b&d@i&K9{gio`eLplK- z1XPk4%ZjG{gTT31^Sl)4b_LwV?9SzAFkHV@1mk~*Lv!p3`jXk*i2HfyTmPOe z{CR-%s3gKmW_Opa-zA=ZkTz+hyfHUm2Ao1!&CdPOEBAWSz`h3x+3J1J2lfxaqrl8KlPF}|(PcU32fD_E_9&OR5 zd%pVNw-Ked_MR**XLh$#HWYdf7s45ZK_a2CfpX05EC&|M z?rtvdgCooI$V@R+Og%2ZJcp!L8g z{CB62m@Z&huBZV1P5wa+H*TlDisP_sL=N64cuKd8I!}H)NrBJI?*1uIBC2YkGI3cb zU73IzncYck{7ANG=7S|hf{gnz@~=$-IdQh{p%IKx*>maXg^Cc)>~4n07sPgtCu=Gv zVj7S2KAGKRuQ?~WZm9>#>o_>vVGjzUe%jmHo8b`4y50F^C^_)KegU()>zg>(k*f!< zDwpN>wPvF?1@8#m>u-c=I~6rw82i8RcoL@o*1NEIsDD#o(UdmPu%`!_FuS|w zpo&@Ux}?{@4=PT zCW{(BN>FRke7yd^fHr)X0Kba`pgY$S9Q@w!%4?^f*)$H@nB5)C4i=UDF@(FH*fY7; z5)OYpTi3bQ8Uvp&YiQTU9R^Ey9?b4K-w0tqFd4q<{S}>7oP)sZZp>C?+&9gdv|aXx za}gzQklEd>@*KLcIt)V|72rSqFM5U9-OuN-@GK_>`n#SH*TrXO9kV+*l8)|cuh8A5 zcggGCOvq+-cWv!KVsH4K7GBE3o$*oF!tCzuNpYa3i$L~KFpN1DKz;=%pwqol6fB#I z-#)3+mbL2O$?VQ#ksEAwMBZUjUFe9S z^?qkqtG$aCw|fSvU*fQr+1($R_4NxETR;-CJBJx2P+X`da=lG(djxwXN9o{#tR~)7 zW_M<91n|;d4LbWLQ%szPiaV& zv%44nXLo#?H#Js}$m}lli2=yJ$rMd(bi}_B92Tw8Mzhi{JU+8KcV_|Ao)`~??Pm1U z-}%_e>~4{-40bF?C$U4JK#kbG3A4L^<(p}+LoDWZ=3wQ#7qp7movkPtN@+UVZM#Nf z>Y8XYvpctDmUUGR)24=QvS)rC$c&AK`Yo%;*=1sAA5nzHt3&Vxv%69&d5GLl3L$gm z!5!XYV#w@{*CW93!S1-==PgmVz78}qyE8jaKu@T{YxFX~AI$E?GrRK`+GM}#fi1ke zz-GCMXF!c+g(&ZWGrrp-#DBw@m~u&myk~YddLtj!{T&07tDWg?ejv&-yL(+Ai7DTO zq{Afw{{G5`E6nbQQ3Z|kjz_tqY^yA5rN$n(^Zn2}zLU7A6tTHrzR`;}mBI3GS8_JIj=70793cXruA zY!kTPmZyhAD!ZmbB(uBShwY$N=Cb|UwdPpPa{NPPcgy~c;hA;u;9))o>GHY|z`G(E zDtE(V1tC67R7ay@6UhT+ck6DI!YMlin3EAm13v|$J+r&Do5k^}-Ab~M9|a}H^WYn^ zyUJS!shE2r&hE&>oc3ng-f)Gy_eh69yA+U|afW=e?V$hoIaqct7JWLl(5#kU@eEQ}EA5Q$??PPX$uUm*$Hap>>(~CqN zL;8@x?C$SsYq&f4pxw@^mUyg}!v)OlPNyv3?d^5|_sMK$WAhYP>)j<<63gyw`2w7K zeFEmMGbF>t^P%HY2_$CA!fdT1dSp&07Bjm$_QkMgCKvLU-8BTApyN!E zk;lux|CZgL+nL>6|DFLQ5lK+B|0uZ})kTe&-5H&a#*_MMY4pWm(l1j4dTt9KFgJ=w zsE)=9zNL71j31t0c4xIf6%sBBV1uCtoIm`Cx1ZVF7iM?zI|1eX+KNukGlpNx?p8(4 z0;_)C`l@alv}Shqnc3a3xQ)EcPiKSc1tI9kYr$omk@SwXH_mJ1<0<8FcrOl!4YNDx z;bIt3F$yX~#WZ?nIJPsplMee%BlhedTh7M8*{9j?k=dQ}lk+rNG8NU!($HF4M9rAp z<*m*F|BM7Urm&Bg21jOhmoNK}$mfc9XbK;lb9}Lx z*n==XovluJFi82=&DiVH~r&z4v_a;V3>jhA84a{dvTP*`40sA~yRe0ly9`qyBA? zSjg$z%B(IFlL&*V4C> z6L~-A6=runp5ZvxuaF*_Dg{=POF%X$09N$b6SHmOFys{@IcpQ=Y~K z4;VfufG1B?AcompW11gonwFy9COMRP9YaPlyYqTo2wwY#NPNUPI!`Qy?JeYDr_&pH z>fSNpx-SX*F0*}4W_K#pH)&B}2FiX;LLdEV8o=!Co*)P;3i)RXPMm@s_7FW7ZtowCcwSoo_L4Z-5-Ui;B3gjI)MXJI?Ww2y+ zr`XTlW497CyCQ{h%kzi>v%9qy3SfKfPm(0RjT)-PVGFanO7&+{+y5-F;4|ZwPlppC zBx{E6()Mv#m^dO4J$A9ZJ7#y#o(Fr3qu{sH3Zkkhf!56K(zgd=_t1RmdPM=ux=SI4 z*`2Dn8jKITgJUAz8yC!v6ru^UJN-r_@>|ju{G9l3FJB3?ncYEN5c+x)qv4woXd}0T zh?w1tEM#_9_lbC)uB7#v33!9qT~p@+YI?hl%y&+MK%Z3D$?Pt{;W7PNk&X7F;&IyW zPCERzkH}0dfF-}e;p<91`7zUon*9pX88z;I}Fc#h&<3lO4yGssIB$q!4R~-$b=*FU`I|VIGq^{=i|bu-ir{7uqr~m-~Z_Q#xgRG+1;(Fc~Eog zEs1eGLfe#*Fqzri=&iS@vFUXp=ud~ArOEJ++1+5)OIkZN7i)gS;70#VREpW%_~1g= zc{~(Oeo7%~Z8G?b*`3_J`B)flLY0jtu$?vmjC$b-VkdacVf)$*x(4-zAWUOoahAd{X$q_rVY=3d=uF&@xn!>EXOmu zGu&)J;%r#fT~`9BZ=_*4v%7N3FkCXG5Yxu}q0yz~q0{`wqfSn-_vzI&R^>r2KC zW_P3Q{-fC|?~wJoGU4ggM7DQOMGmg-p+EETutJU}NoTXhre1#AmRA~aqm!nT?eNcwa$3@KUj>Wo?oWBxz6&1{31Km_yY*rFd0th$A!nt<%Q?|Ok>bW^k_T577o50UMY9c#kcLmJurqt?S zrS)~5+*>zT&FrpFK^%QV0vv{*wL7?zCHDFm*;QMt*-u*Seo1 z=FIM9DrbP)#p|R?={DW0orwv|?wsx&q3ScJpdDrv)Kl?#( z;IPlS*L81`DzxbpgD0d3d@p5pooTe6aU&QqZVqGJjhZ-|vOACVF}TqBhU{{+F{B4e z;cy^;@}Cic+a^2g`XNOF1t+}JmFux+gd5~hc9&{q32&2^%GObK_iYDtOdQt7a6?7r zQ{WHNDZBe|ObxE+j^c~|hGSG~H6EtyuA@-I5^l!B`;irJu;vTvDqY2Iyh_7Yhl=sV z>AT$R{1Mhj+1>D@Jdh8j>@MymA9kb&%PG6-;kKSPxqV<6DdpfYE)l|(Phq{c$m0c< zYK&bSN$>R&c&FV&oKhjP3Txrn+Z^U)3h^-dAw z{OJcmYY1;DjzX_TRVaz=i3YXP+48t4$Sfzjr*$SXa(YOjg35&Navk!_n`T z8{hI}5bXU@3r{2yAmLfJp!3lH-&1zyY3YsTA7;w*f~}yQvb*V4u5eMkug4Yrk@&KM z_TQzpSn_ea!2LZznXlB$>T|H_MIL%w=X_E7HMZHU2nMudz`t`FnZdoc zT-05Ll|K{liQRNA7WV|tH&vh+76ps_LYS>ye;oKrgztU^;w_gU+{jZ0M%hWA>MfmF zFCG=zUYp{}=~CQeBj8$jIax)=aJbtog?E=7;MC8D4IdLdaQ_M^`Zk$iV*q7$mcGyw zB7uj)b)mgUji2@lL{qz3^w%1Ir`Gy2pW~4rv#SP^De|!I#S|Wso``iJ<#_Dtd#*BV zJv$bV0WO=0ps?!(6R&FEF@g``MagLSw-obRU-Ac9^-Re#3mQ}l z;n9wZ>|gg8zT2n(zxZdP$+O+uKJpnm?NkcQO)2n9ypRQ5{l&jmRib=JEG~D=<~fdi zpz>)AypIY6lU=s#=qwEk$Q9$Gx^Z}HUYqP{tPyx=le^Ox1)Dx62!^|C@G)g~p!p+vSW%?qnPNKoe44E(|$89-a;r!}+)B&}+^}-ccEb zn=5OuVX87Z_b6tv$~e&aS^*h>U2J#oQeLtr6&;q)TvzspYi-!i4(aE>p!9su4LHU2 zhhODwO@-JyGZXKGY~i;`+nL_7G6-0g1Qq9JGR1H?{IR?WFAj=EJ}Z)EcB?{eg9xsk z3W5U{beX)ZHkK*SGrGwS>m*Gw;f@ImE+%iH&LDk~hEV&{9{bZf|BO>3u-ET>9%C!q zL5s4xRX5CG^YTry{eh#=x>|y-N9o}YdsU_{Ci9kE3sz18;YRcrzTkTV9u`;Q)B0W* zI&Ko1)sO&Fj+Mi+@J~#&YYpFXl-%8rV$7Uzix1I0!B)P_g@pQCxc~eZd$;N~e-T)W z3-Z&^``CZ{ruZ|v*IEu2vJ+sC;zTCzs({Lys?lU~1g^d?ns@VoU^}fAK4}HOj(dGr z$5}men*-QB znqmTBhtA7BT2glRS&Y`(wXn#*fGshi?xi&%cw4Fpb2VdlM`|>lE~H)LGd<9{&umut zDG8jj%fM{xYt~q|mDldc#D6mi@h@EAW>cHkf=&6*_#hjaGWM~a*B4(^l{upqVGIB-=BjvW?;E4xPUS0#gCkEyFuCE(=V&Oj$!Z{QTW`2d2=odr1 ze^#=MU+rKrWp_^wJA%^hX$?Vh+_12Leh0KzVA9hS!ue7kxV%OJIq~|C^jL|@@At=V z8)~svkQ#2j<;A+(BH-P{YN)@a09r|6Zr>*XEj26XT++ciGFP)+2INO8hdU>pjh&&s-Z0* z5?7xlx* zE5caK?kH$GRRz8uANT{OhZl8@U zta%qIGur5h8kF5l`fP;12R;`L4;u&SQ^nwKG6en}vf`TGLNKi-xw{K}Fm!wdTiuVc zyDgRQxyK)7ZMA^gWvAfE;!@09@|@pWyo>c!$Oet<0`N>d$22o8@W^KB5t>bVYSVV` zU9(;=@%d74ZA%7O=REeI?JqZbUx}6C7_9A`!tXVzz;PE5jE)F_>I^eBO;Hm?yTr&V z$D(fPJ=wnV!=Rt16cnBUgjahBI=^hujIz6r+nsRuS0|62OI*Q}vb#yQEJ4|HzD%^t z8yzUSv&%KWn=^Z|@)r|e-@RIBuNVXm<_i3QI2;qyX=eRZ5qG?+WEKhWpk7e{I)lEl zhPx~Hba5JX^(euLtouB1@L{I)CF<=IhSed(&L^OCSQ$4no(E~62#qS^aoU}g*nFp;iPylySYUNqnHHSHu++;{~;NA zn86&%?t<@nfPe3=9_^zX@E@IZJ&GLgV%Iy513Nt+kg~hA`DXBN+yR-f$QK`vmY~{w z9ki9GvvZ3AL0+pC_BQl~N#TK9^eqy{s8yryV0rA{FqM5=35S!%|Fa0ci!XA-xcH9RcZJuWF?=op_465tAK@)c<^ki zVryiII8(6(D>j7Vj@gWBdkunl&9z|K#~-?9%Ck)_26&yaJA+1VOte@aOWSM-Gbp>O zY;=XAn!X;{A}1VA*`14p9gZ$$0vrP9@{xl7&tZ_1d`I>ya4Z@f7Gt@aCZ5}B$_j^v zfb|#=%zUE)GgOoLu(TL-{!@uJO8)WGW%Jne^U2_UrxYUpzF;Fh)bqyjEL`%c0O`%0 zTfRTb_Khz96Rm8>QQggKgy(!kd@17m6ik`9fQuIVW-C`#LbqKkgigz3gV*=LUEgYO z<$zH9@1QjoUTQ#~SPTK<$HB-Yt-@|@gfoXpQLo7pGdm+?>V0hC?+5C(+AKw&)j)=( zGDr@V@IA-kaMaBT96skeFY{f->Yk^9ZEgu1yz`K0mLA~oAvvh#o{xW$8u?()X4e00 zA=uhy!WGG8b}8}=zj(3?CuS$%@#{1A+QV{?eWVKRUW)>331=Y=>`9cCiAJk z6HvUQ952;<Z@ExI zYQ}(Vs~Ap<(t;schI~kBF!rBL^9&7D+|oCWjDqC6 zJRwHT2J0xhbDHCf&$r54XEr&56=ip6PpqLOsaWQJbre49A;pL(hWOC!lc3-;9t^$3 z5V~qGO!(=*jlYND&rLOWXH0K=d^DdG$i=~Z`${Oe_=8n0UCg7$r{ed=B{(wa37?~~ zkCirN!+(eK$t^Z9n_ZXror*&Ia4{2CzuU@R-)>_gg3DmDVlqsdKAR=3>Vc+7RaiDZ z8k6qC@Ig!!t}GG36~AD(6hD*&PSQfpb}^0^I|hr7T$I_Em_S8}6yEP;pwz=ym=bJ{ zkAKrVBi0f1el7NR*6IdQ%I+>nEoevOKiTAyHhVHraI~oY8wb9 zmwou~st7!{yBdcTDPYeaDZ4N>0bG8T!=jeYY`pLv?~Y8zob+N0x4FaFjAKl1K`s>k z&4rXjC)la4Ej&l57)@`bV`=DGzWc>T_IN`%RNhJe-}94L@Wfu|R9KCEM%0<9IhOzE z9snW9wcx!e04$68u?0GMs8vF{Tm_?X(W))7?j_WVv0VznMt4wAJ?b%I@d(^N=lDgs z!|_>_n&A3XfFR25mJc$8gTEVPpI7=}r3&p?Cu^hSY#p|1OArjbAcAXV{lIZv1n>PO z3g>OA!iKqWcsqM0n=mm69_=oJ#7pgL>AlUIg=FHy`-SNH<|-fdwUIrWkPi_KIpC;$ zfQ4E=;@hW|;OEP!xMI&zKK99XcH>C}WG{#Vx8KEVW2`cI<<#K)&0#ovoHO@0LVib! zb{Ia62bYgO1n2spIQz8(M^5#^?BYqXaa~pb52P@0nhV6+tZ+9vKN25NcGrB(20Pkg zgoy8+u)%=#4_6t%B9&LN=&W&QRU*a@G7X$nV8f&W8PoSQ&{70tK1TgZ(1mHwR?l;ummO! zGJu&=d-3U}{y6Il^{Mq6gnxa2C7lU}us_wXXNDqpKdR=T%i=LAv;v0)bn>ptE1BWZ zG>Cmv467#IW6f=c`P+WEICy3rj<%Qa>uYZ?^La(^HkGow?)B_u`g^|JryL(mO~iSl zrt%Zx<-t|28ZuiV;mqm)X57&q<&A5xXJsG`pE8K={-*=>{t_4%>I=WG?-LfSF~i@93Q>F05UH~U{1a_Y}`3lI4W<2y|2={W0oua z)iP}ez2pQxXbx3-*A~te_{x4Mdt#1<6hAE+hQ5#P3wPI!h4-7p5dUll>`x+hR~&*9 ztwnfnjtUkxrLfwyF%a^s5>8kBWsCFY^C^FlQTtFS9&voZ@y`yncyksoSpmd%oG0gc zj+;*`z|(KDaK76v?ylI%JSUdIg7GO(vA>R0hW_E-4p(Bo?=ff+mdA{uMSPPk2W1o zo>9iaI^v-CPz7wA|BX%7UCt+}r=gZ#2}bmN$X`!9$daTv@M=IlL@z`pr+ST>>K0*l zcLr|Uv55~fc+2KJDg!m+BslhCIy2DdiTAHp;m9RX7`Hx@-#*t58eftf8yQ4)PmA^Y zrHxmNB>2}PT*6kBJqiLHD*4N$A!Nqvl53y zI9pZ@cNTnL!MKjwYiHp0b48eT^CqwVev-xe<-zpeT<9`9%47j|`P0qCc&KMOE>T_0 z^-aDoCzA?T`8FN`a%!2{4+T7Tw;KNqjlh);M{(t;YOuP#7KAzekY=dFTE>a_=dkU`$&hVW2H&Nx zSYY^eo})^gCJu!-bH*he5r2kd{K*GDX*Rsvw1+uLpYoB~HMruxP#mLV&ug4Cz&=I{#n;Bch%s-4`XNSGHBgE@(??--VY=*g zs0}P^r!F|b8P4rjI$1ct1y4Pty_zy>%#W`UGBmv4&36f;SsTKM1D!I3nd9-GgBa6b zFpfRo$WHl&!Qgo{;Qdw^#JPohwObr|s#M|%g&+KR?h=~V|L~7KFS)i z@#~y?%(HId-iNNR$hCzqVO1tv9kP|l*1YDbwPk2=BMI;CnZ?b1_W(Ol736zS&NMWd z<*io5+#Vd?+{crYHQezS?SQnvlup--DX-*$NAHfx%lNqF3L_F z=cj+&V*PE3LAEg+(j;rxzWPtx_fk1}u1UZJA188+XT2bJW;JNviU5^lKjsuS5dW3d zqIz-w9(U@?-p859a?BgPF26M4Z;+qP8Hsc#z%`E~%oCcN9xt&oFn&rIUhUb=7wy`@3@2xT&w)Y^Kfl7JpFYK}R^+3iLJpda+0Sk4AG7@J zC2(p*Dn#8|!g4lt@$ye zOCvlxtzEdgYaEPE5kqXM2K3)*$M@8RV$VA@sL2Csny&_4SN%Qm>g_v#l?j-Q1QjobPC&{r&M0QJ|_CPSR* zTsC?}H?Od&LggMY7`ZNi@0`^aiZev;<#aGO92~|bp3ub5D`JdzF$SXwuFLLAjiEV6 z3M~L|OgutZbI1C^sSl|5~o+`sp9owlwOP2uK9O>?!Q}PI(kq zHEJ%8#7=T|2mA-XIOkfJZWIU)&ktnTk-GRZT7tHHeQ|X7E}7XEGYF^b?uVlXsHxuZ z81vHschNIGp4{D`pS^@d{sPENk;3Jprm)quCkEeepMaZGeVQiBX@V_qX-^- z>j#%1l>aJ^LUVF=?|ge=;+*NsR5J+%d?axff%}&51ECZwS_ETe6?~Lg2K92%2nE;A>|Z-*qnr z1#))-kNlytT^&>OOo3wyOUV$mvMbkia+j7Y)SpEi6T{B)?wRM=>FovJ*pLOItadWJ zYpwh^xjX$KDOmVmK5zB^%ZzST!q<~AP`fUbS;wj12{RGSTo{65JT3T%14F>&KQZ*# zL;djU9txr4?&MvhSZM2sYCrvCYI|(q;TI{4{Ner#q97yPIQK1Ckgez@-)Zx zSSv}P-o`n zYMAvR9Ci)!WY?am;kriJgC66L0qF{S@N0cIw^su9et5%|%*Ddhah7OtMv4l9-0+lM zT*K$#|8sY0<#zC@W2Ee6HNCq`kYXvhyWu7+!fJ)FaJNwmn)5Yb1;fU;G8RyMyVmcwIZ2&m7YS`aGxaiTF_XCO*kLb)~ zXbtzGr828BFKm1-!4PtH*4ACZ)63&wf}R+nU#f#Kxx3NR!ceZF2B#D$<0t)Mwyrb| zE`O+ix=~#$L%NjrKah$SmX+YfMUVKqz57{$IXx#*@3EWu2f zANIv2SxLJItSXkm>0S)_%4-N4$=wO`&fj)&1Rm--=&?iO4pS(*+kob<{mv%Y+l0{= zR3*XjWAt!GohrLDDFDjSYoXqDAgGeNn=K!SpGDP}zqA*wjh@8j97}-a{pHZ^_lZq2 zUdt^{rlXkL-J953{JQxG_Ovq>{I=%8S8{i8=Wg@qA;tJLB^{mX|Kroi-AUe*!_)Kx zSh82jnm_?P$ld8LkHF>iqxqJb1EFSmEy&2-eXj1qEU)O{nRyaA^Z8)v`n9rva0|F} zknT6>bc2Zgt36&gIARTDcS~#RadEkkFl08Pdn{<~8)O2|d0uwQm9o1}V%$pZ?zz~2 z4KoXd*p(s(%~yrCAJKe4Q8YftufjundtjQ@Y_^izopwqYsJg#qoef+0z5SVZpWL0P z`W1e5Q4=fOMg8!1v*A0ryCJ_GbA>A`lvE=SFj4B~TKMo!YDPVs!dt)}a zyVrxmP;_qu_n$r(?z)Mgl-!-(-_Jr!q#+*oC&9okqwwFGGTGuoWIQRmJKW+7tZBX5 zUUGLfl-=oA*`QBrj^I9P6r7~&uFowxpLD;I?d~-m-9p5;i`-p6;&68OWGJW~uYoP= zdxO{3Tt4hdEZ!YoiGk9ed~ow3_TT@xyMHCHRp%+|Ic6{av@{#L$=&s8IL-AxTw-5? z3!(RS>N6mBC$HPadpnfj2$y7R4Vc3>lDm88OC1wuqd_S@j=8+@~RIX;3^p>5bnZtvW}yk8c^j{niM3Uw#JAbo{ItzG2SqhpJmSS!8b2>xpVh70GB_$QW zp0IP&S95{e-zdN;a(CKMJNWJ8FW80VQkc}53=`z%v%&%YxWUIt>{%Rx+ukPg{^ah0 z1reMH4gphTGxof%CZ_EaMw9@CxyvW_%MG5vuH^8m0doqXi3Gnqkxx3Us@MfpL`^*W)J!&(6L3}#7yAK0v;r&6{Q!^OM zGh9JQkOMG!+zeBQn0cOy%jyfdB-JR*%;jzTe0ekeAhzoNZ@buTW9`{eu{@Oy? z{f{+++p`YH6v^EU@us_!ZtCFqN9rtNc_8GHyL-L8Ka`gR@}QnkSU#W{-N@a!zMaaN z*CoO-t8&;W|DNs4+Q2`|%D|B|MOfT@ovSXV&P;N5zVCA&NqU$y>)+=UAB)kO++DoM zN?x+JlMT$RfYmkeuv4L$eY&BDZpyU7up}H;=Q1uLcNciG7IGE*!PZiqwJ-zhN!i`e zecl-Owoc}F#1bA*cGu^eD;WV#kKYrW@Hu^M<-_do-X}pAd8>_5F= zI)CcPUVu=Y1wy}AWE?Xjo3Sa7y4R00BpD{vPV|wpx@I;gHNZHf= zwqX5{b|cB%&Axs5WXnz$bfxTWLNm<~t)>W{8oVI0RRSZ@heFYoU$RBy?uz<~(P5T4 zp55TWHk=LvPVVl|KxG(lvxM($h(q)16&NZ0&g;ULG4ofcuqmqq@-IAO?UN4hIpH~| z?UIiXA&q=)Kr_2R?(U*>Cfq9A%pRt{;o7_m50Ja7JvDBQp=iO0dGwnF!n&XDhZGz?2(eO7{0?Y5{g7^6T{MG3I+!#`ei8lw}>4I^LdD5O* zN;NET>IHrdlX*sW0(vbf$7XVOwLjO={SoQ#`ArchEofnBKTmKsgFGy($;E(|NBPx- zci1&@cc#PB;X})6wl2Se&rq(wP;z(2{)&0{It7qBRSiF)Bj7K&yN$MLcy>iCx?J?f zzFe7)BX?J~QUY2leBj=&Rf2oH1!^9bqQy2h3>Yx4q3=pZaG~rjjojVaQVZGjTMT34 zr1$V_bfxB(l)+*!8VriJQLa@3t_wOW%e}rH2-lsAD`7{Y7(nLHni*5k$avfp@&n({#=kh`0z zECu8GQDFWtSFlmC!HX|xKYrT(@62@VeZd(%wMtOH;V^DuI327_*W>#Z>&K}uim)&PClzrptCNyyNxG*uvMEE^WOofSn;p~ zf0DaP(cZ^)oz8~c2lCuj_&92G?K;`^e z$bU8vEZ_R@$>i>ocU5D0wgRp#kg|jR3836n4(rZ-W>FLV6q)YmT&9$$TT*W!{X}+@aO0xraq$=b^BFgw;tWM z^m7asDGr1!DYdY2Z2&wbcX!p0cIPO&Tai5)>teUaI#!uOuX-sw`p+Fa1|0D)U->_G z_gG^%x^<}u(|!tYrHtl>LS;lS#dOmk`y%p!L;p`o44eZ84)2+u^>okDCTcXzJmDRwt7A583WAXVi6 zTTbrIZbk`)Ur5E8bxXP7`|s=xxx0+HaWM5!F$+mi#g`w`QW6BtmtO}3=PhLmZ|kDiQL`Y&!u>L zcQVfUIG10!`Hy`dcQ<2X3~YBwV(T3G;;|4B-X(Wu6kx*NWoW`$E{03P#)41%EuoLS zG3t+%;@7Ewg@0XTbI9G*Qg)Xd<^b69a7DLsQgke|#0#wp1=Ww^QOs=@49BwVWvV7t5eW3ovtCX>5+n>vVh^wI^b012oC_(J!>eZrhgW^@k(nGBH! z{@3qC!}H}1@Q%*9_Sy~*{^d_Yk&=Kul--?BG(#`NBf`V$zR=|)ftTd&-Y(bR5AFq` zhg>b1Pw0;-#TPs zWp}q|o`E^|jNILVH}}~eHR@A~Poq0HRx}> zx*2cSLcb5bvJw3}@v1xh`GR3+r1L;HvS}>MXvEOt!4P;VC3jaIfPaOi=l*4+db%~3w7>G!AWEVlI=5O z^A)`D@m=b(Q5}j?^#2K|851D&cP)G*cNZAr##`jValU5_UOu3NW2cp|kZ*Aiet_<` zocN8cwpz|LG^u04r-ZuN9`HkT2ie@oIq-(u-TukQ%5|>s7y3oG{10Vdwg_A;g+SPtw(MB`t;X$zMt0s%?p@WUXBq%@J4^1wd zkeyp(3O^{jODzy!t%9=Pvuij`q`B^Bg%SAS>ot#8gFHZCEqQ+pb69+Br_B4qXcQ$% zP(h-Ljfw-=PR&4A59CeA-CZ%6z%SHB;y^Fz5xOmp_xem>!6Or4MhRtilRvQV=5>54 zxx1gIi}3I1n>x)E5{Fp5u^tp?A?-OZTp5B+;8vAm`FXt9N6E#&UIt_sgT?B+Ac-DRaGgR(&x+%A5_JkqxFMsjz9?F+G2^(Fo( z_Y4c_&WAp=*`U0156hePl<(7}e)vz6N07UlW%7%e#Z`j$>R2${m%~2Yru^bc4Vtc? zJHXoQ_%nA62#FQLx=Z6Az4u$;yq*ynt4Z+|xjT=JG})GD8#qtd-8^S!IKHC)$q$2R zPmQuWR$z@A9##qSbi821cL{i!8iJ&wQ+9psc$_(0j6YolW5x|fW<5R(D(2L{QgU}L zvkUnq7Kb*yEAebs7w<{#E_-q+bRMO%Zop%<;r%}D(LuY&v-0tZUK7WrE6iX+A$(kx z2@jOFvfDdf^E62rCSRp~cyf2Y<>Wv^Tm`_Q;i_CTlOuQcWQPdrdj{iL1$}P+TnmcG z-4*}ygSOvig`q1=u%5EJ`-u!kP0^LbZnuYY%D28NIKrE68yikKy5o_3^yjJ;xNOCG z!I<3L@+s6Eyh;y7cdBqFp8yUs2jZR$zU=p<2pGP&8qScrGhZ-~n@vl=Z_VZS zr27+JNbb&ZUOHq5#o(=Vo88Jd&O3Q7K5fp$o(GR}quz7}o?S5jxx1LcHEi3_Ph51R z9HUnz;OM6l`Q5f&pgy}Any*DbA1^<4gxp;~In8zB0x(;HIx}nZz>eIVtI=qf_GE)# zuVjJyD7#x3uHw>P|M9|6_$wS72i54XN)$)@@+%%be>#X1wLK5|BQ|I!Z@{t?4n zIc-?js>fB|1!2?y5%xKwib^R_Y`$wWv`wd7WD_}f5j2b6IG%(GlgjYO*VlYHxw}cz zGC{nz5ae2}FteMd_$hLCH|26L#C1O}V2|0C9VIY~+@0&iB@Fg;@dt{PSRNXOCKg3p z<&-k?n?k$DL18eV#ECVLyQ{ULyTP`M$Is4RWnx7`m`v`@exw%!WQl}*nbzp|gzPlA zJJm{;lRb2tL7uX^Bb7D~ot7f&GnzUx2TO6Yu@Npg(=O~JcW0R_hVOA2;BeNC?gT{8#cTc2unl&XT(`A2OG9E$QZ&HdXlYcQlG8CeUt4U-*hWmE=iG@7WhNa{#xj@OASt}9K}5qBQUkK8rK;rqU&xE z6I$b8g+m3Xh`zAZ`&RLn@6#}AM=`o?yUQP+JHi53E*Qk*!K1+qOk91FtC72#Ye#-f zXFWG4{lH#lltVDNy9d!z*zyDNsNz|Tw--g?Z*q68VFO^Z3w7=21;V23^h_pq_augT z?38@bS7Vn<|A!fzrR?rMI}aE);I_xde-5~Xp7DFg-3_Wz5MG7|Fm*EBMZ4b==1wMe zH^h(TS2Xih(!tLmn#_#cUEv221bpfT#}h)iLroO=le^RN?1}4EO=mxJlHl~)GPq3c z&LLnE|Nb@uzZ(|eh>UCe`AB3{|MK9&q8tcXe~_6UdcZ@;-EFi?!~NCExpVP1mbtA0 zI?3HB7?(4n1xmP&)nKh=IQISR##i=Lr@QoO;Y2R=G^KP4Uq2Y&56bQohI-@JPcvi> zg(qID@sr&@XiM{6>VJCg z1lQWN8f0;<*wIW~Jml_@P3H(o0p6fY*_WI6L}+ULfrWnCOC0lN!&938Xt1X%DWuKkj|x&n(E@6`NO6r?oto8cyW{ z^Am9mxx282_q_k6^=wsK2DGdxf=lG?*4}R5S><^+e{e4T79in=?z{jrPOo!?`Jw(FF?=!)U}V|9Sp zBl+%GVVGD}gDu(0xJ@W#o5pTCo>-joZ@t;g8$TerDI zcrlhFrlZxW|9CaIyM6D<;ahS7{8%7mlYJHN??&pLUlf5$W{>8>?hk}>GipIqI{?Fok2_AajgR#kLW$v*SV0}mm5g*-PirOj4ScWErY(VMq)=Sr(07a;xy^_8$13IqiLu zyQ2>hI;~%`SC_YP$HSR8U}hn1Q@Fy@HZ-x6z4;JwD;tJp?qlJ~Px$oa5*!ekijTf5 z<~HQ+OpPlcMmr94f9A7#gWjk_?yhEF7{(tT!H>=z4BhTx*hcOyoPQQ-;|(#rTY_dE zM&X6rGFjFMYdA#Nol~HH78ty-d=3 zJZc1sadb}&ER+mqrj4N>I$8t2S5jvtxjXyov3O-{B~C8>$z2~WVqWC#0{+lhSM@1- z7_paIt<1(juktZ__i4W2_a$~ZtPu8p$%IyNcSe?NJj<~Riyf13H_YMH;pH}NFdKgACz^KdBmZLl^Cl}@H z^00dJNxp1!3;Wzw1ZDry!P;*ft91Ry4^A(~Q}T%zI%zUrOzut^T@5i2kuYoFIJRRS z?M;xo`~5Hg^*{9IORnicNu~sblDn&3xm{Q|!5kM;cDMG0J2nqI-|!(|1lZC%<45vv zcwW(0He#)SQjrvSoGGsQD-*WG`+*s`JGVV_-<_&9Z_N+FlKUczB6ruR9?o7Kj)LS9 zbhqtd!4CHeJ^4Z%qQU6 zH@{_L6NbW)ClV+lcNbVZRrtHt3g1w6r+>i(*ZSN$DVs79^gc-;NyCnl+g*c$F7`pc^BL@dMl3vBLl*VbZ+69U z0avI>!NYl_I6n3{U$uW1>*|$F`xOPC>wAu^TYrIOH3eh_vdC5J;D5KgV8d^e!o?@a zp!aniGuQvehkUNY7kM$*aygmrA$ND1QD2OI2z+lhWgdexv22$Z_0q;-@~gWt!)L?5 zdlcQ{^prX##Gb-t1v@g8&%1dGk{WwGS$`Xl9I2?p4$ zC{Na90v!84cNaGZeqIuIdR;jF9Z-Y2$lVRetYTUj@lcgt0l{6JEdK5a{%B4bUi)2) zPi*h=PgLTjK<=)2S04Oml`+px*STF@5$+{-=XPWRH#qr@DO;4ozJrNi<~oh(|LjSB zXEhE|io%)dL0pa8UBIAP@ZTE<&$`uF!T}u&VG_Ju=ZiD09h9|%nL!n0ccphdKyJWi z4^RL9Wp{094miEzorm2i4@g@`=f!X{xOU-yte40at-RbTAuw#b+V zxjUmbIk2MWFw?iW&s92T7n$7MSdEoDO4iAo=>KJNMLhI;UB$NDQ^fbZYw#oe|IMAi zc*iJmcP(W8dicZbKJrX{v;khG>~8H&Z-kw7GEMqCtS^!aZ*+we6Xr30x)aLibK9?H zhb9F~2)+n#)>{fs>GRcldPjDO+}-niVr;b0MAKKMtjvwF+|eSK@JIz5#wYU>`hFSr zqY_gi|MAm@<}vSU$xut*SG}LVVDc(Ecw#N>B0nv_jExs~aPM=hBAD(PSIdSSpLVgj z=;vITQi`cFQm}u{0xrMnH(NvB&$rB|C(tjGCG75lmwwdXHu}DwP;SlTIy7LaR19lJ zje~g_t-`ZQMtIAZ?n*!EiLKuwWIG1iLM&x>4_1wY9v9D_oUq@8-ut9jbioRT=1dU= zGuTen4WmZjx9LAPekRQp;JEY_ip6!@y%?}r$Wk~GT~9uX7;}H4bN*V z!2Mc2uCi^=3f_UgYggvgq8S#zXBKb zc$;EcwG<-{3IC6zv+#?8d%C!Rh=8Jmlz=qSDY9Mg1uv6 z7j}0&1}G{j3h(?r?_VIe%e`~wobx?6w0?xxi_X76bPg7$*?>j&={AR}&e%-nWLl~b zd6u>bEOvT=25EN@H^#uiEIGd5f>9SP z`YS&oU507Kaj5!C$mO>V0WsYlTSLR3r^AD3+R5XH)ipTdtREgvT{H%?Cq=%dD#441aw;7q@K9#9a}4xqQb{_VH^GRB0tc(wHTzBmXz|a4Sc} z7tt8%nZwO$hrvX;_jh&#!{h>Dtxi-DAIt9&slc2*@2%`B) z&|q)F2lfZ!$>u5)IS<3AAGvJWuow`Vmcz~6zgXMBC4Bu9%H7>B!Zwem+;QAK_NF}( zIL%FQPbqhI@iNb;$w&J$>3IC=R<7~wE&CW+0_yzLYRe$|&wO zX*kWn)v%uhL6p{5_I%>jxCg5S3C&NE!;E#L5m{8Im%;wY1n&wXPVV=BWY8d+L zsKl*FL(plyh^-HbgL~ggVb{?vmJ+>|OVAu^m{5pKBk%I#n~$-{%d;V{CmX(%oM82% zZga70AzEHd#bmqnd_(^i7Pz$(_FjpD1KaD^z4;P&rl=B+sD@+ALm%EJI|_a!)4#pS z9~N4TVA4h!c(=F~8&f=S-k2@oSItI{(?U6*70!^SaMC$px&8LR=BKGL2$EA z06wJMH4Zg^#g(VTcQz4Yj@|)XA!@kgf;zjsClDr_tA-tFBS7ME7|;D0fft)9P)#%t z{btW*uV*EIT}uf#?0L_o{oKq4)BEGkt$Yl-a+PO|I>oL{$%XIqPFeJKKYQ%+m>-=} zgeT4>V{GFx{`>b&HsDnmB+rh4tKvd7h~7O7xm75;HU#@*?78Y0MfjyIgq5!+!Lfp0 zf|-ZMVn1njUG$D}?w%^{A7ly_NV}U`n^y@qKd04QlXYZUAnapup{9#=t<@N61g~0*T;#g6S=W*tv}6!7>-pE?>6I zU1tsN>5QMQXbno8|Ju&V3h;6RdGH1pqLb$l!JRu^FpRXjb;asnesnZ1e-eQAXuhoLCi?nM=_5hLDJVB+L32rCtE+f_v8x3^YO77Uf4%$O? zT{egCY;SR{f-71%Q&(++F7`z}6fE675zcQA!sc7bAlpQ_yQ(QzNPDiLS{WQYAekNB z8U=BW%VBctKh`;S0q+`2`?0;nn56QGH>mAoCOb1=&e1&ZxP6{oyw}d(&ZG{I*BN+e z%r3q{=Or6EvlvV#B!SkFg{&u!`tFXGN`m}d6`$O3QLq$lbbmDfBp-4n=RM|BY|7oX2ECaTo_h@?8CV0yqWf*7QfSQ&%90C)cHR) z?nMdYk4XUMhxM#gcMvw-CXUeD2&_vA=H0hO(9c9%WV1jBy`#c5N~%*Pq84RMz0qn8 zX?H6Ppp^Eu3Yh|Moi$YOeZLjXqP=cYzcn8Ddc(y{)diNX6T!ccMzDCqF0t1)Ph1gK zi)YKnptQj#7D)SIZ&m|E`6Hoy!elO67mgmZSAKMDFy$tuGYhA9@F^&TJ0+i4#FHkj zp`C^kPZi+u!yQ~sv5gh^<$&L$Y$%dA%CfWXab?S5Dkwk|_l^CKDhV|>*cweJDai417Mx7s=cqz%6*J$Ez((W85O~AzT<>IbE#?Xt@ zu^8$EEk-piUq9Po3+>}awpw6em!*ItJ2)RK0>dUfIBR%A+;`CjTW$)`_mK+T9Hq;i z@F393tA>u9vT(LPmaqCRl9*o=J9BwrqJo2;7}h z1@9kALHG82zQa8R)rXej*AG2hr(r3Rs!xW3!$n}~`h=DC?&C$@snfJR7ft0(bLX2^ z*t<>n(6}TW-u>RfJ|BO_+olp9{$c{AH_qYLqz1y@T4H88MZ)`+5p4PP;TW{58ddrN zG5M_quY0EodB21({kJ##X*(w{Al=G3n>Y+n#9NRUBUadBNqyRsa~)s{C%$cJ({ggg zVAAeBkPf!yUbDbrIPu}9)q>tq4Ok{G%U}8UZ_?HB77-VD@DM0z zn#$$o#NkQO-`;=ig{8;2)ah)zayc7)HXrA8%6C|_bs?Nw zlL~)85poqQkZenrdT@b~qpeC1~exIecN>Mn=DFkNqEpE(NGmDS+!aDPN>G zHDK7(S}@n}gsDv%1X`pwD($6x{3Pf9@iE$Fe6fLa`rXE#v;@-Q#j+12y^j&Om&-uNuq5!|~J92-YTugn=_FpjdMtXe7+xtHlYp zhji1Xx9@n>r7i6F>~uJ~D<5W_y}~X$KgAE#B*)JsNQJVT0hnvuF-e$ZJ`;gc$p-2@FixZB6Wml>R{5~4}#hw zzA!9-c-Ud1A?&pUUp_M!A9PUf&DUXg(IShTT^$XZQp#aO{2z8oX))J$o`ij*ceh@6 z#%03yu<3c3P$ZKF$8yiIVwX!iST7%UXl7vT#_e3a;0;q%DuJP;iST3SJl3+YpNEsq zjy?1@n;6HRFPDX!%xZW=di>=QU1olsyvWzcuk?VJnO)b#-{u@znua~Hq}|nhrn9CLioN1tsqb_acbt3?Zk4!rK{yWHHkqG{ z8VUW5HQ=He06ud@vCqk4@Jw_q8V~Y9pL;vSihYJ)x0bRp#x78&aNA|6gbl8zGrsSP z6&C&+BCv}Pz)-pauI@B|x>Lu+lgD^d7f~&03{*$!A{BN@GZ3UcRa0r#2-qqN=9#q- zSp2X8104t9)0TR6QHvOM?@IvF|6^vcn|S;e@=$9P;K}goyup+FCIfSczmY}XyMwIw z^h5sAk(il=DY!C=d@7GZJ>GNhE&-+m$%ex8WJ#X?*cuZ$fL#;p4|-T4aDP&-xz7F(tAXV0V1<77E5S^bv} zNM6XY$nW!T9`)S?yktv0@8p*rWMCrsf_@J<&(*h{XH|RhVEy3?C?CF)6@PrmmuxG> zLvl%|*tUR2W&UF-cgsP1FbWUaGfQCOP6J((W{MOt3h0u0S`~118YS<*-i+ zMtmGV9J|T*>3t3UB45=7D;MT6ArvePt3bhTD5%{k@`j(XWfp~QoWlikgn_lo$_NYMo*#Ld#h;WC7 z0NJR2ZFPgJ;c>qRYWLEt+x4{VdX)=Cub^yNn<28fq}@4sf!+UnZdzj?>xDcY5g&k~ zOlq*|7jfu&{aBVb9GVTtLy&M?`-Pa1C>S7AKe7uTTOLlWxpEA6#BbGGNDy~DmzUkeS@bFJ(BgnuKdG0#R8dk@94 ziTY8XAW#3dGW~4#r+M6PeInleL%yFiulZ`P7IrK!16+sX!-2IIna{*?Ji#OnR|qn3 z-of2mCiOYfcQ1x3N0Xq{Y7yJ~^AGQ=D#rz}(dhalgWn022J_cdup}fHHp!W@S8GS( z#{$Zs*7;&-;9GHRl@18Th+y1bS9ntzE4aMh3?Gwr$6t*{Yre#JT&4pwk#@I0$rQ$z zOcfW^x})DmVoY2di+fsn1o?L+!51|l)ZSBs$d>V3X+a3K6;+{bvJ}S06tZpAG0^m( z3|{E>unp^$ai8PK*t)0))yp39#;g0;Df29_j>(0)aEj%vzshYd=c7zmI{wt(!W-+} zv)da=;Pa9M_);~SRb>vu>#HiTwJ!pt&BA&9Sn?K}tcGxLAhel`VV$mO7*BrN83(=5 z!tJ!U`nx`CChgATj{qVil?224EisDT{9E?fV9?jYF1hv2aCnCZ_(3Ds8?ss4n?d~= zWwm(ASpz+<4QFHL`oned18>nDMP2YdTvIk2Qz|O)J^6-hD(YD1$vBAJP5PTjCtK#e zp6i}YMN_3h)Q8*L-RlG!Kt5!(=4`0Cb&M&!y2G!87h)&*m9^%s<+*da*vrqQARQM6 zF-0Qg<~Ibzn=4VYAPgI`J^99GqdH=&TnTC60sn zq}_FrpL(CtDwi?L76WKkqmyrmP^2r^LRK9g((ckG=z~Jx1@UeVA1wYX#6cTXF}O{O zxmpH6!SZS_OrXBI@lpI;MI;W(szB+D1Mrr81JmzLfRxx0c%b=?-F>)~Hy%qzubKHc zuIDlzwC^+vBLBGBP3l65*vFQPe#(d5EW+G?WIXX|3Gb`>#q12qA#ijI#68Sqp$5Zn z^O`CgK0E|rwGAIjzI7vk5FW-%f(4>3K~u6es*u0E@0}Yi`CKd(oHT`Rq}?4RAAG(+ zozvkwdu*j0Te+4Q9($TCShB@2fT8BW7 zP{_M9;&8(#%2nilS@uo4}bb*B}=_N#Z<)R#wjZEiOHFv-C)<6C)e@GZ8M@*2H= zQfW8Z#7exs@Xpz#_^hA$p^K*R2vuTcMpuI3lyI0|n0g<2_@nK=4CWJZ%&CHmXF}7G{9+ zT)1Fuk~dhX)xwu;YM^SS#w&{gap-*_^vxZCPi#V&>Iv#nJyrn^?hl0bb+dR!Xab%+ zUxKrnKk)wC%`BdBGXB5v;bp@$=A?9zi|uoIpd*|v<7J-b2yY5F7~4Q&@%%O+CSAn_kb6IXP&+= zuT}^!L>VgNO{v3Y3g%D_sbQNm%D+ux&(xyf*QRp#($vf1${YEi>Lm2YD#k_il)L-8 zi&ay;X>C*6}-Nm`0())amLnr;&oSbVbTO*_?-o4>jOcCj0H|2 z4QNN59SUB6OVnORuqN&9>@{Op^>U$DnQ~v=ycRb^XkqHMLF}%CAF-FH+c``D$X3eL zr-foZ<;PBUNaCrP6>MZyEc6wZf&Tt(HdA*M&t8~<&c6$BTGRu+NO+jN`j`cmTXJC1 zZ83v0H~5Rn0*q`(L+5Q9xWUDbOj)NCyqe=d`O^&cp??tGma4=7iV03Jjz$IRZ4eL84h7EO6R75N(ISVO&f2LgE0?{G|}+~2tF zL3mPXCKGLo2e2%KYkNMjZq{JN zQnZkhW?H}111)kEigk7v!)5whbhbFc9upUrvGeS3+)oiM9bkbkKDr9#Ujr~9?JlWX z7Z#4WFHWVLWyfA2rW&eHH;VzY51Il~1=XQ5Z{kO%3@z-hF01``eKS zx9$_4YSk;|6Sjlz7G>awCwVBoc7ea0(#|9)2O2Jy374MlV&3L2_@lgHyhwS`H&Kne z{dg~{T~Q9!3ef<&(%9fb(ir)t3VoG=@#j4g?lejn)=w2eyNfRv+)9g?c=?_P?^E7&_x2eAtMl&g`8oAu6pn>IWB-bC zOedp^v=FD%D57kV1G9M=0;ecP8zdnG>*R`g^`RInqMU90xSzbecsYyylnnQCi$JdE z5vzN2fWMEX7Lf1213yx z@~It&fE&LrLV*7YwFcac8v%hwT;M&Y>g3 zZvw2qp3cEPltb?AJkz$K!x`UhCT-8#2-|OL7ufCd1edH@NPaa2mP?J~qbS$>GoS{; z&W%KO3ty%d6%L=nD?!Fc0v-fT`sNO&I0ImxW$sDwDM5> z9CR+p#yNwI@s;oIvYnK>RyR(C8;94h5kX)1F{LtW936+1Cx!gRt|3q)u7sPE-<~wy zgROCv$FMau^e!h}-2f^6Y@#OEt*M3I^Cy7xmem4@tK-nGm45cMPPlpW+_pvYZJ~+o zul@rod3jt}TQ6GFU?Dro=E=9-j8-&|LX;z7d^K1H*( zvyq_eT>&O;10Y{+E}v0Qr%ZyKAcPSclwkL58y-U(0lg;jso4(0l`C@DYmFEPC*Ht- z=3nf|=_P!&cQUblittYF6F$;mAJe#$3Hy%a!osDeS%Ul(K3T zVhM!xCcyKE24=Hl05;`TpnPE@wpB*)`$5COV@Wj}AYQ`3IxXhCR24lx3NhB-2fO~9 z7oP~w2elLt&6^C?&(IURthB^(ng=KQ+TzNtHwdn ztaa8R-ieUjUuoP74<^S5UUs>{0CoD@@^xV7g*W2IWxg0zEW}sDPZ{e=?>XOKSn;k3 zyi%k=RymW~rbgowQ8_MD?BzE`E@Jz563i47!<*^PnfsF6eERrI{Gy+SZ_LhdJ>f;B z)}KdbO$MwqZed>zzvdf$6r=mrMBFueK0l=2&%{a<;9(vGzk1@CyuK{y^wn7RGYGky zK3`X?0+nslbuKv(UYgz#7`y1!q z9*D(Plglt&ubb<)tztUF(NVZWJhjmKOiSSi-yoBXCyBRn{6ZTab?GMaSXKbD($nD8 zvS!wN;S(>KREm=;<8evQ3_hoNFi06!!nXEsctuWnpa0~r*{lYm!vZi$Ux8PQ8Urqq zYvC60fc_}%71ZoC#H8iqHLiETBBdv7nkTH`HJ$NyM_5D9r$22w3_OV&=o>6y2InuY=HP1LBxQsg;>MB>*;th#*piY0lfJKD!56;s zW%J80!zmVzE0yzIkwYQexC&1Ag@Q+^3+vvbfDRvPaPgMOXk;;vU)!h!E6)=%bD{@G zb~Xs2luYm!X?K!&(VYJGmUpX8mJJ?=RpxMkkTZT#OFVulShH zJJ^-h4A5xFgCpn8v&gsYTwj=nO~kiqy1A2|vwX=KXB9(XKoU&%TF8JrS*ApNc~bbzz3N2v!BV!u@JLL2a%% z4yFAddW}c>C&OGK)f{0ZX?KY>b%7VB`Gd#V-?*D(!;yKUl56aQo7uS!7CC;?pN*R#>YG0VPNfs1EF;LVf4T#h(r zJHOE0)rdNaj;OGbZZ)hKSBt-hi{|1>+TD5s_&HSs4*midH*APt^HD2YNVD#Pkv3@b z<&I0BtqT~gqq9QB2+I9;iWkXvp%P^}rWcOE5YJIeUoHSj>}x>eKN2u`G7p>?j;A~; zam%j3=$<~E4QJFjokt9eCfYxbYv#+W({L3pK=W-K-1%KAyBL!Li=4Az?~fzwROLOc zv#}5_5>GB^6J=BUzp-N`WzhD8_PUF!*(^gz?0Q^@{R&}d`qzyg+%FIF4%C2}keHcg zB-!EFnmA%dEv~YffT2dq#rLI*;R7d6kfRffw2U>i*L z+bTWylYU(+eo6a`D?;qPt%8zPx=dz&5HzP%LuG?3G-<|h{v{IU*;L@S+5LQ~-aPgu zClNf=O5oChH*7lJ&Ohp9V6kyN@vkoNA^vCC`;mE&oskJa<$KtpmS=pOaxn@XBw@hs zMO?$}4|7Z?hvl21Vc^e9X4gj?q3iUWSV*(3p(V}nqroUi2)|GI!o|5C1cFi2?L1n9 zLRU9D{XJRim1+hNq>~tqwucpI(E5I?0}dqZZhVy~j=EPZIOOCG?cZzRm#Q{ADCibn zYMF$k#zNFKRl+7YJErL#0?l<*VBRVPd(P+c{a!JcGrSztH}`PS-KA{%oMiZPpa?c* zKVe~B`}szREF3dE7v~K)&E>XSVW0Qs!(8GscK^49NnCixS5%gui1>~59&`8xxqHc*5(tTKF(u15^&m@VW$lG%TsX z@bXc3;F%XQJ{<-OsW8;>lg1SQ?936j$b>eZp zn0T4TE@b2UWykrRkGI(|pf0?IRPYK~$Clmdq+ZZcJWkxr)@f6DVyq~A-zU8d_VO^T9Ox8d25^QcN#WQG@T048m0{)%fDbaJ<N?RGsONXwOeAw4@g>5uG%~ufTG_yAo zcc|~@4%$zcA#qThMal4I$5QtGat~iMs2o+DV-WK5dHOXe2&k_Dvqm=fML zqy666Nx0O-Y`ZbqD?d#{c<0|m_%SgO+E|yoXqwH)}H5|MU1djJ~nAJNK%(*VaO}Bk8 za^DT{j2(LLFob3%`s$q@WGz^_#{!?wSyA-L7BiogySPtu0`W-^-0K+!r=Khr+qzD` z)@?KowrJu)jiIci!4KZ;sezGA@-QaKjhBxN!@u_`(MVAer~X&Xj2^~9CGmed48F0p zYpeOs?i4(^w-9Rs@9}YCkFq%vvOzW^2mac$u?;smct|^SI~%3p_kT@1C+ss5m6XCq z>v-DRPG@>22VYdSZfn%d@xkPnaW5xgW8QZMzT8X5fK9+KK(?w7>-vHA3Pl!FVy{YH07JU`f zamYwj)@v6C&Hq({)yokOsuj!+EQvscn-w_CdJt+JuV=c732^6e30xfeA2ZC^#4miM z?h?ZSJU8MxU$78ap#(Ag=Viebevqw@e#9?&QTK^%3W}mv@QWcon8l$oxcM~(HoYxj z4eN%Yr*jql`4@sxGzkb*6=7~q4OEs+hLU&x1fB|GvHDIeUR3hHMFq3PF;h(-|CR_g zNjt(jGYOZoo??C(yl@FWAZ=jCBR zTRUIYd7g#s&x7Ct8Su(!C;MalieEcejQui6_-@w%PJg9L@p(D?TTeMEk7PEYKnAr8 zs_{56NGKH~Ft&7qrSrr9s-fIZe| zTNLDo3N%yYOPJuy&2t4T!UNV^t_8_OT97MaK|+lHaRL3rs-Ef zoELTbl$P-ap0QX;9Q1_d@BFydO4c8g0w+cm!JsD(*st0{eDC2bocAOL$3;=!9o%4w zlL{c|TpFabY-D;9KXNbPucy3>$4B30@{bLJVDzsFXmW}Gi}^t;$zBdS{#N5P;<#7V zjOKq*)nTAbEo^@71%1~J3;xO&;#tz}!c+t}Kze}Kl(_K!|No{UUm@MLa!1eGX}%MfirmW zjCkyvK(l!BCoX@gnav4J10nJ4mp0#I{omSnS#=KfsAuEa_lNnz+WTzo%|hrrmjVx@ zSF_y7-CQZS3~z6ZMctSxz9Cx@wEkAY`?H}is~wnAw>whm>eK~ zMxO`d?_4BEEHOqC@(;ERazdeHT$@*<9mLZc@}WLgV?Dh6;+DWi zaUyh{5<*0x3QW(`=hFvI!J2T|_j}9Y@xTOT@fc| zpBdW1R(fSXH}w*P1YcxN!_RR~^E{mCorx70yZPB&&)J#@#W40z5`2qa#H?I;xi46>ssb4n48krm=Dle&PR|wM*9>2DQ+Oj@HB|@RkapK6;|58;q6Dhc zvoPWl%@t3_=1GtDGYS1H z5cuW7=N~88yIoiLudDeOlaP*C4>$8|7v3|Y%_X4Im;i|_v)S9Z198fx3Y_yN0(C}& z^JLi(goCUG=dFQID?5fUZ+fc_t;L4b-k3J{lvwnKGH1o)&yZ#iGFx8oOx+4I>CG>( z(*_g19B^5>!5Lb%lZRoq5gb^uS-dgQ6YYv>F=UJeX-LD_<%RxmH?0QF4j2Us6({oa zk>R+$tP*FgmB93Kb?o1*IMCZ!3V$^^8CP7-PuxmH@z6p%V|knZ2|2;GcV$CuV>Zal zImQaV-r;#sg}5;-6?I@Oe{!>nDRz~@*r+%-cwNK-^M_y~^@~_Egi$8Lli#{L3RW+y zfj$5HpjTa*-Fd2kujbXFcH0D$d%aeym_81^kam~$+X;rLE_Er0vPCD_)nsH^;@>W9 zf%jepvy(&+%k<&j-V5RuTOW*nFGQtjsyOeF7Q1E}1g}dnFzz8>5Dw_9rN71mHO<`(X2clTi##h)X2)>T5{n&^|!|1+ z716Ff8G}|V;iu03VxC6j;4L2mnhtqv2=&3VZ>qv3sSwnzw&9CJO7PEJ2y3W6rrotm za4J(9fBvh*&Z}Be> z##7p0*7ix<+)o`KzCxV&NC^+LS+SuPgW=?fDrhMj21=qF?t4EP4||s5<@vw)HHRf^ z3iaiL^c2DGuBR-=VJ|B;La%I@D>MPfMv4#n#=SX{M zAq3!EHuLaNZYazq?|KdnH)`bxNw-+(ivlS6kP4GOt!IUyU%2VqQamIRk5BZdSIJod z6k^FA;TI0lH+`A%{gHSrxCVE1_~WcSa{T=1F>pGk7EV!LQ-%6=L1lyy9@!|O?oVf| zQ#sXU7i9x#bjEMVwF2K_Ik6uXppdk?6M+V}vPLY}Q0)z&O2h-*pazkX)p%7(AU?WL zjT^E?;Dsrn%%4ZVc)CSPjtqpy*JkkzkqKCQt^_Lyg8gGU` zqJ$&6zd#Xdh70kaCb9T(dc`e4V?p*+E#x0_2i@-Jf+MXa7xVY++D>{*Or+Hrol^47a9QE~uBB2nT^o@oMR;Il3-xPfEqYB?p57-ll zboN6t8hSRAgYn{CHmkmoCySEsMmqU3DqiqaGP@b7WWtH?Jm@iMXC2cnaOXF9XjGPg z$tFAay={gypRZN<$Si%v5!lBFUOG5D4aYhksp6A15ezl0VYg=7eElIpa3ev7%#66MX_WRN?(!0tj!fnDlZlu6_x?p z)Xi2~ui}3er{K1qg=jtb0gsq>m<{dBf(u(|uX|3+cHg|gm)8{Fhx#;hS-F9m-}}ho z^hzObZ9Lq5JcAvO9E{tgEAfn61o}n>@GX1fKyGvmG4}(Y2b7rqeRX`}QHu{}dg0Qb zgW^UXLvUM6-jky)P(AvK%ZiBq*X~xkTjRR_zPdcV>;jdf-Ca*47KP^_vGpo13~{Z+ zf2fY%TNT-#qXBSqWDR&PlY_vU0sP6oa9l9B65~D%!a*uCS@_O)_-$4S=bJyWq>c^T zd1)Fd7LgZu#7#b{Pt3{=BiHn74 zW)*DoTS*KYUWJcWhT_&n0UwsB08)2rz`kcPC{7y8aqQzJCyuM93ko}G|h4oT)YLa*j)sHpLHQ8`o7pRcOstIBSdL!6?EQW zz>bAZfxphxAa|XGFd66&9mfVG_1ZYUFdy_p+(0%0XQ&8j9zqv27=$ z@qKR#Y@5IP8~c-+MWMySB(7=D!w$#9EwT13#}Xv z|LwirDrj@S8Km9yQqSa>cQXW2uDF9GX?HuQ@9t>)U$LdtWV|mW#FwRts9Wd2mcI-E zwcsjf`ZpAU%t^aD7K0I|%IMwulfRg@oPF=4Y;|@KoKJql0{=U}N5^NOg#~rNC!XY@ zL)Vz&fC5;hlMbKVH!~ILyUV6t%_qL(6<9KhKfN&!f~fDVZ+8T|c^JZ!XNPP!M~mhhGZCEK@FXAEdmUfGhDnf)C%0_91NRa1Lr@TZTor8 z8LLUVOL8OaPOe4Zbl4MOGKoF>WDLA89LbNr_Qz)b8tmXBk@@?wgt&0f39AGzZ3)mx zn8t&9<4`oM6s@(s@c9N!?7yF>K;|`=Z+wd>C${n=qa4&O%*Kn|M|s!JyUe$t5SAIF z!s)GRSYO;%{#LmRH^|4~!wo_{|KJcX$4aPm4g(Km52oxckL%agpusgiw0kj>D+N;C zo4g|*W>0{DX{!ZaI>uoOX?I_iJK=TJhBlKWwlI_KuO9;};q@0|@!Ho6zs67|{)Qep z8ebK7E}*>mQ|bk9SA}pz9o{l42#u#!quyUx)Rc)~uP#J_v1bK@I1PX;nsd3gX(E2N zD4`wITV5jG#&SOr+bJp^lAd2;cP^aaZBKL2qB#@Q%J*^|qi5{*fMT#vNe1=4#cX!) zZ@!UwY?ob##*U#meA?V$Fz8SfTt6BNy&hI9=DQNE@uyi=VG{1X@KK!gKpPfIi(um_ zH`rK|Be1tL!#L9JOiJx>pL?v6`)PY{BkitauPN+~EEa!z;f5c`Q`8frjpJ><3Y@|x z!E{D>lhsOax!Z#ESud>Dow$z`9lW59cCIanmhcg#4Y1R8r1VEe=dCbfD1&dRUA zw>gpcC@PBQMGS}I)OV+=8w6EJT5Q#7RrLBK#6s%3lWaRLJ`<`>yL=JcUd>>lq@G}n z$P(w#Jh*p)E#`b);j-qX6OT9OHJG;O_JbnBQw>^D~RjvE;~eUVxBWJsvdQ@chUweX^!VZ ztZ-tHqTpw@0Q5<_Gk&E{e6LgDwJ6(?i(-evr-Uk_{UEQ6S-IGN%@l()oYAhE2N8Qdd+)?>stvK1h6apTJU|GHcl*X@g-r7ALCy{oy@|YQRzlaei z{&aj;R?T(o!rJIL=TIcxAxffaIzf zG0xhUh}C`bc!o_s+oxOs+Qv~ZmLO?+O=NL$ENL`fgK%D#9{*XX0yD)z2pBjK27Kud zIJxWLOVaL28UUrHj~8oIS-@j@7jO2kg9{Hr+b$|Q;gd5WoE~e8-I%Ff&gh3P*ilQCNo{&ZAv!y~cS>RX1Q(cRpE`tEk$ zN`b2LH0yfbXRo!7@YB?HcPu{#rB1Z*n)^3d&x!(2N=btk_04S4-B0vcm11FOJO;SW z;6c*{!vm8_NIOkF2`ztivy1X@<~4XMC;;Up6?mKS7|`=0?})J%{JXP9P7SyKSuL? zZv*haAYxc($f4noDQw832zPl|QX;9}%`;4JC znCueHU){*T!j>#t=Xr>iQP_FES`jEDrhx6670mYTcYb(b8Rm_TMcJM*o|rHcBuuLy zdSWQC@m*L3_1zVIqWh(JGG3J%$a}YILF@&Zbtia0_udA9v$_d-UlZYy3}TwvjBZoz zv4d#ZL#3QFhZz}OVhI~pth5*5x>{X4bnl^H)%uCxwMGbfyOkmRi4pgjJ_VPUR%29{ z49-50%s%glf~@=0`&miM%y$cTyjl`w?I_0Ke_!%J)OV+h)H$;?k7l;>?8EnVE-^I^ z!)|0?>*<~R8+*wHG!z3;-(9%efSUN_}@vR%XGYUpa6z4A~8@>wNH-0$lJq4dtyj zaqsZ|*x5HF;GvfQ|Ah7I)#O3wb-x0mrbpoG4Z(a}?+B3ZqAowZKR1hcUF1AiE%dWsh>$fcy&_4by_1!J|WFyF*0I*?-2xOP&!PBkR#T(!GVAf?J zuDzy$PJy~?3H9CSq*lYz>9SBQh~uxmMdDhk3UsaR=Lc=(vCRd6Q?^iKXDh*s9Y$D?W1b#}v?O~3ecWbHv_1!%c5!T2QJwj^)55XNqey@ew3fi!KZ?|~qo=G@yoDdJ|D`Bg- z9h>770{x;Y5FL_&%(wacsBa8L$&{n#@*cka%~Iw)KN&Lj6+u$$6E-e%KfgOP3&%~( z#ZO;P@wo@Du!D#4!9t{2$p(d zINI&4#-8tim};QO_YYTtoIaX)I=w*$&Izt8rp#F$bxAle;{U6QrysY3DgP+P`qTyt zKDD&Ho#2dfNxM@}9ET4>HVXEcc)~E!?)J{ofQ$|qZl3Co_ls)qZow!#)aS*@+r!{& zeI<;0C;_s3Du31(M;-O0n76Kzw5)Y(`-)U3vnzxMqdRQJwc~ut?QGn9E*m@QkMrSw zZnGHoLReX!3NG&J*xVPL-1rjlDTHyDS3H%g^-4h8!b%9DzPr+NFE+n%6gpSdV4S}{ zdfCfzxvd&tIkOg26g;6+dxPM|fN@wz+TB`XXZ)+SrS0@@8<3+H2naBd>;Q#M=hsm;MS;bIl0z8;3bxmm1maWsTPl*4A3KTOtkG0%UO zgd=q*t6KDof7!Q(^_OJAh9P;dVeVPxx%3i`wW?4wfsRq-tK_GEVht2v-9HARR6kqqjvcenUMf>z1CzST> z#sFnAtOOy4Ebup-6%U`-qVoMpm)-tObazp%;p;fKHfDv`+j9bb+FFZmHfrK)m!T|v zp&xAAT?5Nj%7a0r8^5dJF=6s!wCVPqz%-sC;8rpRDGho)pyIU5JM6 z_xMNiqimaZHVg{Nfrq`^QP5I1tSt$&+hzChiI#b&RqXDbL zb0y)p>Ek4RHGL!;aH)Y))OXi^c_ceCXAI7ZtwsG`o|qi5ORT3uJ~h(r=4g@UShd4t zz;A0Tpfi5i1}ik}mK5Ai5`fhIw7Xdb5XD->P6pn1L%$XWj!;KW3stt%DG(e!RKvzc zBfx_N^A9T`(Ed7gA(;%q*bDXShjIeUdRPJ{b^FtNGYizQi1|vARsAq_M#L40j0aUyM|%J?f|=6u|=_9_A;>Ju~AV_Q4Cb3h=2x5~fUA$#*4uWj;qs;P$&H z_;jI&v26`pDz93{#A!p&3{7(?0JF*PH2GPzx9^W6(4FCiBa`R<|z znKF&L0kBG41P%k0Am>vO-|;g7ZI2LlMe>J-RWD{OkN|pBg>dobQx>MQmq$KHL-)Bk z*uL*PH~e>jSsuv&g#&5uk9>E=t{vRrXd!wjCg6$ni@1&NU#9x36rQe$09)fkw!cIP zcZ?ujOg6cl$B*O|R|mt)O=6h1-wSlNKM*|D8DY7j1Q%$#U|^4@bm1d&(D_2lu~u97 zde@*ymV9?-uS#&?ztOm|W4;g(?G7!M$-6es0Qj!n{O7SrSl(HQRyYtF-#N40F~M-h zr~-C@0yr)!;kUe^F=%E9R@HprH_TVD8uHzZR4ssp>krtOg-7_k;~BV+e0O3W&Sm^= zuxq|~@cncOq^;S)%tE{PVzpvi+!l*#I%e`?OJ(5Q&oW4|3j>xjg>?(mANwbwnY2IJ zMr(4{EM53O+1>8e2_V0uLHOB!1Pb#c*k_=CZi9Q89vEA}N4o2pZL@+cub(tM$aTi8 z%V;-k)d;lfdr%nXJ^}huc4zie7q)L0$OF>+F>GWdw#ljEhogSXuq71!8J2?(-v{d9-8tfj()iqTk2*`42C2fS(*)$}livQ~OWQI4?ygD!jNl5#*9%ICuX~uTfY2xM_G3q3FWA2^j(j&7BVbKuEugG^d ztRh0_ykmwgl-=#WZ-=Ja=R5h$vj>@n#J1`?1}4=_mA)x-!xx>zQ`TY>ky0*Lc&7T>KT4iiKCU@ssynG0~192we~dCTr%fV>P|edV3j$ zeG5aU?hyWMfGWga62Z>Rlfm_!E?eNMgE5Na$yn}*CC#U#(|eh~KFaRolkCLj zOh{gs3BDD_+0~xAynl2)4oyrZ&iXq3U;76({!=k={9-`skc6q0$f5s%a(pr~1ZSI# z=K;6+!}BGT@b`x=)cxtpR9+0hlMAbGz%dV$-@Z=TJ9`xPHAtY>7e}zwt94e2v!VGI zeQt?kF(B7aSa^iN?|2CW+7E}$_ZOur942D)OJa_RwDH_80~YBp1#(u4z}sF0Dn3SV zp*kGLrO@8p!Cv^Jb{_lOHy-wf6~Q){m&~qd2XDKYiU~!zIQrogE?hgqeD`LLFmXGhIRj{@Z3dLRQi-BeR*XJWKnka`}R1f9zDxJX~{VBr0nk2Kw?}y z$`J0?yMhB{ck>&E!H-{WrFrDLlk*}*;N3xZyVa7FUJHbgCo14yMqjwMK8uGuio`ni zQv4_R$v=A5vhSl3iT_jp{8<|t?@7M91L;_CBO6y$pXDP)TwyoKcc;~w3hRq@GEq<` zWm-j;U>}c#cjoZ}k-cD%Um0vT5)O-VqL{y)3Wm?7E^_`9)UF)HcbI9zfDW2xluU&5 z?^lHL?~Ktnl6>Nq88*xsDgB@~7G!=Chhe)7D0eMtntaF+pP!JR#?evOy=JwrX0r## zPXl%vjYAprw)U+3C?Sj3d^T@LbL|4de-W| zs!Sc;pL};Y|A|mNNfqn!gW0q*VG!0-2HOtw2JN=lJU2QH4bK)~+Kkuy{rqiglr{Ob zKjp&L?CWfn)hWJT$i^D~4Aj_C&$H@UnSNpcq_ic1rNav58vBi}zCzjExhR~ss(_Ez zSAC49j21dK^8#IYGqd6~{W)<-WL?g!;Svd(!H zwe%t%@QS#|1!*YK+s%t^bg;VXg)qN49#rQpWRpGr@xNb7(Jns%OFqT%&~7D&b0_{S z1i+!bBUoP-Eqr@ejOD>zcyVNl^vnYz5RaFD%SixZ84x~bT3{_@cQZA&e`fn#SM#|ml5oHmVoV4RxWU>6Hu_TrG;hsd9lFT-<((%6Z4O*hOozT__p;sIPdP6v#Ek3&T(~`Fzs@ob23=Ef_!(I zx@jM>&<+b0Rd8?`~>vD>L|6&)pL;&~8jN zah*@`FK4c^pE7x1ZIB9It?67L-(BMk;=_B!;p`c+`M#FkFqZazFV=;@uf`y@a~^dI zT14nZzPq=FwRzYn9Z;bC;gUo;&pf#xpJRd_DkYe?O~B9nRHQzUmf%C@U_G;j+Slir zf*w1eI%Rj+PL$pG)d_Hte4%Mo(0NY}%skb3+-pDd@~uSQ4)d+sJciuUa7|lelQd_(ZuD_G&>s zs2L^0k(IXhJN)fjY~F6Q1R<)G_SIhc+M0p}lXY}v#C*twpbFPD6Y6R*I1 zLiFJwWp@vzd%$;-HA2h%Q7G3W!I?`P@u%M0rgN)oAd|AYalb8KT(+t7$!mrQQ4$m` z8>2z`HNm2GB4|Gr!%-(~SYU0)trtweLscT2|5X|Pj*4Rau7!i(UIt=o^2VCY=Y46P zeVJJiR`>4YiC1^BbMI3@IV>02Z(e43Eob?zr`h<0_TpXB4)7?uc2?4-5awzm!uR(z zEI9rr-{)V7vzsGP{dFcEQ_~mL(Y}53;Xrs`Y{{B`4Z>6Y)L&Qg!KZV(q;uPcfr}C` z)R(zJ+LuhBubmmLr0i~2?l_dRRXU!%Fb-rWyW3bd20B^_rMkoc7)RON9iL(Nx8S3o zK%9U`K@3jI2EpqAHr!4>2)ou+plC#29CSCEeH|VJ}f5(=9h?*FBD*S z`(rK*KFAi{NQYa*D+nk(!=f#&@+&iP(fC9vras=mX9vGzGgHVz@i`9i9p|yfn|fhx zei@!j3&)>M5nL`-1;(xr!7}|R;OcI`-fq%H&v(>UBHx{^_=5CG=4V?_lQqgT7aFz4{_YIoX?N^&Kf?_v#&A=cEsnizwwGVu)G3$DueLQemkCcPXkU8 zm*n{w9|$@6P55zPD4KQA8Be~u&a5h_^O!MEOWB=HhCTevT;^o<(+;&LySrR(hSqV> z!g%uC?boL6NrEBl9^WaQGu0dQ^2Mk-LlZOSn6v)^1EJ`71^f!>2Wx`Td02KNP85*~ z_vbG@)M6=~1~h znZ$d!LfjXx?gd=ewTErhE~EYA2$*;>mibsIik(b%iKM!5RU9memk zf`V3kV8i6N32|=1&Q{{C+XL`Nvu}67F|4clTMR(mjO}w-7CfmJ+xWdHaX)oQxc07N}zxx-{b2k?I zS>h*Oj^SPcgo2Qcst1Kn=gciAKZuEH!n@azBhM#Z>Wwu|eN?@sMr3Y-`6!06|H z?AT_??rvw{#2p!U%kBt2Yxt146F;dsItlddtz??dzwovZ)A9 z_$y)fgGAQ%a0KKMf9fdt?%sb}#5MGZr?#^YJKjCxulns_@n^_)w>1aW?z+Ir`&{51 z({fPdS{mLzyoV3C+t5AQ(K48}t%$m9Od3#;27OQ%~K!FdbH@x5HYM>0uJD>uhz%I%p>?eeo8+Nji09A zV56-(DESTRPrR~x^4&>uX0bzoGL%!7p^+pEzfTL~rZQ?!_E7{23?{?UQZ3fEmoCPd zR-s~FPukx;Aw9R-1U^xAx6ECDEM-|imN;tbX|B6a&Ki%uY;l$cXP8LYolegP+VkBb zT{dU}dWZebTdjw$ruS!gTK<4GbeDEjhl86Z@s@?5G>F?MW&rMw7Ks>b*c^EVA7VUC1v4;s+5Nw+XqwXJN*(+Lj#Lj$dc$S1NGuQCOj89B; zYzedyZ%#Q_#QxgIW84#BW-5iCHg_eT$N(7FP)YL`UpO&Co();9kGuC!KX8NxI`3XC z-K8-a)+3#B=8kaRDaZNfKN}4EN&Ujp7Wg8=MmXRFpi0?Y&jMpO`{F<8oVOD({xW$o zE^1*;t`XBEzTUP(5x9$$Avh|AD-wTiuSFRy%;5KMn^DpUXXm^oD&is4vUHLBDSp`*}|VcODdB z>W9fVT0x)p*3g0U9x*h$@dVpm=Y-p zJD~|>cgCutu&s8BaB}Q;u$W3Nis~V7{G$?YP4~k&`IQ))(H|Rzc(UfpA<%1PIi%i} zg|KT=`T1or*#Ey`ELq&mH?LXGOo)ehgSePKwePZ|$0zvlmP}ks+|0pcCwP|PUDkU% z?cGgJ1}p3JY+83WkNqESvpfb3lcw^Az2#uo(sGc!5dsAhY42`jf1F!Ui3h!ib1_(% zd+i+peP>mHit2dCd$vhx zma(NTzjKr?rDsz62Evaj zSTW2EX5^L&DXC*{+!Ojv%G;xRt5&0?6#rFdw_>p{f2KKxfw=EN1vWkIi&_gZSoMlX@ChM5*x#Sb(!GXjbtT|RgF@O- zYUh`5KNHH*;e<>M^c6R=OIt2;3w!GQ6Q}k>@hr0b6w!#~RI77hm(E^jFe zB;TFJFA2KcCqDd5k+XJ?BPdaJC;xU7JcwK=jqvtBJ<9G5tLO3$KU*sv1#(eD#`{B{jj{GEhzcjw~`>lVIGIL4a2 zGvSAC7Oc>0V!7{b@%k%y*f1;w-?dYASMZMgDJuqZ(^#P8S9a)HAN&I4I5jU6>#q9n zz#Mf@W|g3)?+*=o)tT%PJITlwlADX49jhmSs9=WTnDWf1S#ZB_;-Z$8XM zjcDZ^J_YEbmxR8;N3`{eg*664@z1>~T&U!Z>7Qmx2QM%M**oM|?4h2TV=w168)ExWb~j;)IiBnA7Ziwd zJ%6kOHuW`vV~g9RYJa@Yaj_VEQwHN~8&kIUK>#SLiD0F&5;*ry=KOC2+SOBsJ^v4% zv1Bou>z)96M1`=X>nU^6*~_-7^e8 z`(kf?ylD~wam@Mgfp}F}V2X*sFx;?$SUL*us-C##zR_qky#&|J_`+WbtJv`HBv`Ig z0I8=Qum$Um@PCbz-L+)lO=r#vqHeI+#8;osQy^>p7G{@B-1Yv&c=BN^hToaVr>~NM zv%ky8ofQVY{!>_`hZ?T!5n=5WfBYzD^0@`N&~8%&2g!G*I;BDQI(P&grR?s2ih$E} z{x!`UZ3UluBrs!x6{K}OZL%tMMhnXBB5FpU)x?8>-NXqn%AfkapLF4^bRhqd<&VEi z=#1}C!_K>YETJ_N#tbWm;)p(wG;{_ZydW0ST#C_S##^q@zKP{TrGUnqJdjSm$zs)| ze0p^j9vzg4>Q@?g)!O^)J2H-IyTt3c+9I|P_56-=iRGn2BrC*K`VSBPlJNwS3@ z^p4^kW&zph4$}Xs05d!#xKiI3SH#{C)bzdJK2mojNDDf54(AJ$15gws!dORTOj{Jk zGTkCzoN^gFdiIYsK3c$wj>qGN*M;~c?>P@ItYgl;Y2f*h`0#d@m}2ocu1>xJ8P{|? z>AjDSzVw96CZEA2@)~S)UCJ6mfAdju$#*a%5?@VCR?#kD%`%r z6KBmlC7msA0+vP8E$Jb*^Zfq8#1WR{RiE zb*l~hz{6xrm&qqi#2ublaFSj6M!DU*Ojw+GoQ+hy#}CKm;|B7CC=Of4Prv)XO1>0> zcR&nO)=1d3N;%rsFUQ5zAvj%iJimFLydyQ0FzJgggg)-e#&-?DCFCcOt@prx$?K%= z3P!;J%I?;@cZ7I@8t0wKHmFLonlI5~apyZj;noQTxs=^`TMdUBW|ySX$afdfDaPDl zZTzb}l)Vk$8sJB#s2=z#wHZvUklFHDwt``p8I*%PwiHCrYBAEB3lZ_zXox_b( zdhgA}Jy)*q*48sjIFJn~C(~hH_W|bZN_%(r3y=jSqDpx!kMH=w63F{v(=Q6P{K{rg z!F}=XjtXq<3B(R(Yo1KLJD>4l=yCJ`yR+SbT*)wu>LHHMSy#MMm?v$#IR@$|yAxd> z2d7`CI4oQ~4pk|;>!oUjEFx3rbKDi?%1a=#VHliscqerw-(9;W_0+Bp!q2}f*{hp@ zu=rR71SR!_l}%YZ;YlQ}cP&NBf}dO?w3e+iON8)G#1g;L#tcFZ@KJ};ap5&`rxl## zb+%X7M)KX!VkqoO-pN)abn<6H5pJ@HN8#K&J~N>gR7@^|l7r!J&_9ZG8mi!}IU@X) zH3gqV4dcJ8wPDb6F(lv+S;pS)Z08w_g^Bm1M6-P?<8jb~B%I?-}w1Fot7dIV0 z>WH&W5P$dJDE#?gwNSUi1I$-eL5-&>W zRXs}vm+Ko?M*e%gZE-Pv>Jy7T^3%Aomn>|LD+kx{p)jD;VXS?5+z%MES>Q#36{_+PhI-@qJR~Y-5vEX z!OCA!Va`lXNEt{jrR6%%GEax^%ASle*F`9aR>gHog4xmYVNiau3~ul04I%Glb0MA> z6Q_zWwB$8^v1S`vXh-iKA96twd!3CT-`x(EY(l?ephs;zuRqqx>XHlK>cd1BGxz(S=Q(6*|D`6_(V;NqXtdFukU|Je@736na`_W z%ziicUNl|ExnPQ&l-&(jXOE9djyHC$wu3~tnq2fuH^vM+${9r7>w^t1Dw9+#n zCCD3`MPktTqzNB$$M9$Jfhhe{f$M7fVWC?p8#*EqzLFnoOUZBcaN81odR78Q(3 zFU7&x5xD7g9RK=NiT3Jf|JW%2roAy?i4(Ml#Vp2ElfB60cTak!-3SIvkbrCh!2Rh= zIHG5P$0!5(m}iUO*T*~WK?iW6>`qrY8ZK;HB8^3NeAh^xrwImFAlHW}4)lel>y@DD zJ`n6Y88=@XjG=uhhy*W>(x+wYTX8hhW|u(z?9c4%Cdq0>rt?AgCadaZFZ;1!x@EOUUx%+C4VI$K2g+$MCJ6TP__&9@)!U!=Cc7C53o0 zJpohAmT=XM-)uU4pWn%o$I(B9bzkU*vON{}L?I9>DolBl@nCo}T@23V-cYCgO!)NM z5dTqj*SW<7@BIyu#t)*oAZ2%uV+Xfav^1Juu*a|T`{ugZ6yGP$6l(6dK|@;=gpu#g z;_)BpamPuRFHcOEbPWtZdlual1eg6Q!0CemoX9NX;!{y*C@sNHJzx3Y%`4b&@?JWS z@6O({mAzLu%=5{M={$<~iyo(V`pxU?xLh7w(@ljx#@m=<;cMRSb`di7IP@x>&BblK zL5J^$chh1#AybhH3PmP#rIMiDMgdnY zP?r9UvxFcz2UQ%bA*Sh5`t#X@HS2c2x zc^1AUuc_C)V|>%V7B*{fKJ_+}Vf&o5tgYfBchM`s=6*4_CP~Z{PRqeg^4+yqg@DZi zH>U1C0L{s#daBtM7i>}BQ={}@FZot4R(Zg?m#c--ZKE*0iGH>Pj%Yk|PE+g#8<;}b zUBy=mnA>G4HST73o1Sl$&BiFV>zbgoY9eSn5<|SLHk=7C%=JX zc7}t$YZ>eq(+kvv`TQ#R?rcq|8}s=EpVGRM>3>QE7Mu%FXD_pUoo9LN^K3k^Ivvx( z5Afw4?d+3WA#{+h_I7&>Ga}#Ju_>ikaVir1E@$$4tNMa1J@?=44}@cKmdv!bCO)4m z#wq=LP{XH7y6eR-=vI<|&tg|FFUb@hI+@{rl-&)_7>6T-MUL*|yK89e8i#TzxkZa{*8(P6TMPc zSn`9peW>Mm0f~6MnfUNGAM@IzgRJ>>I&{#z>2TT^)+$`(%V+1}#fDTYxxRyMN_@%M z(uzRtLmW7m&SUjEdtqo{87@i+$H7A)_}XL@*iQE~Ih`q>U~ItHPHmj{UW}{BcW22i zNN>jvhepcoCM+RlW?v&gb^cgvp!;BngAERNx!O7Hog*Bor{6b^Q7}7XgY@154?IQp zNwcbEU~1oNJGhsO=hM;9c4RyPGm>tMufJk!rDP zEA$%w>T{ayh{*=MF&W@K^AOwH_=w-GEkN(1i8y5Ga-NX>on8K10zqX_@ZK$-?MqX{ zhKdSYSr&vV7u)fhk2K)5t{8N=4=gMFCg@)uiZ8pW(6Yh}8{UhhgRI8DJ<9GjCfUQR zvL#MO{@LM+Hxm4_*9;5WqJH*>o#xCVED)YP ztAN1C{UE+DooD1nqFZ?>I(Plz6B#ixFD1ZGyF%~`f5IN&KJMj`j!r{z&`9PSKe6i) zTcwaobEq_E`MZmSKY7k?{3%4WwehI+WC6eK*Ta_U(tGK!2*_I-%U;86<$phtfZ+1pZQD!gf{Ax(PE zO7Pf}(Wt6lD?Iz;4rTkQz~!z!EO(LP?QXuPb*2(6t_?t0u?s6~3kK~^<*<}|cOL2@ zuEL`+b3zI3{q>2*yk5=DK1_m%XNV6!`aUB9BbTN3{krrl+_A2SPyTw7MX%4J`Be%$ zPTR!lKE36R0mayu9gFA4clTs*A1JdZhvz3kq1Q8C*6~*j4_j7Z!9;(Yc3}W_GuDHz zl-=DJO1`_W{X+8T5qM%H?M|0Cqn^RTCe15WaG%cjYkw_a$+N#rTgD0aZI%QtyfQ&e z*JHv8^4*EeszB9M7o2Pc^Vi+}_@;+=YU*kz?;gO)1H-`mT^Y!(k%4KGX7QIlV)0fF z?H_-8!*$4aSMeYPl$`RQ{>^_Z;UHyq_p<1@oPi1>k8m~1hwP3)0d&(mNA|)>_Wk1* z{iGeF8q2|qAJfy(@UQ%{v=jaaC zE$0b;Oib}PWq13c9q=*?XtI&910~AtT8@~5X0oTWNN_=C%I=B^j4&g(RS4PT1^TPS zkhW!p{F?S2 zw&z?L%-NI!Dr+vVz5_1sGc$AW?4>mPvTYCdjeN%178OFIM*{rQTFf>){=;9NrZfI= z1gdH!^A;B+u+*iy?$H2{fzj-F(qKHdQjC}HdSTtB$I{gfMvyv|yq@kZAlrMAaKFkN z=X{i)ww)cuT~>2GKH32qZc5;Bv?&xmpDlf0?T&pZyX(_!fWyOjgx499Amu|Ptol5V z_9Y#8!}9Rt;}q1>*vjJz->{fBMX*UP4#csuSXG1!{%I}4u8J`1n-s`T zD5^pGClP$onM~2H77LWu#VKQ|P)*hoV-}u}HXk&BT$a?MY}}vWD+0`^4<6O5hF6z0>SO?3j~0YCS8*fAS%ywvp!J=LSINQJU-K z_(Ec`Je#ssAItVuVT_>%j?G*yU7|A@zM}*ZM>)bsw=8F0SzEND>@KX)0&QR02p{|a zL{lX2ah5UcFuozp`8W~7E>ZXLv=(kzWW=`eDPWQyf?XxbFjf@9&3+SWn(~0w*dDGO zuz(FKi-(~Diy$@W1)I>di@!2Y!@t9FF?;M~e!aYzZP3nveKF}E_1@2_?~t>LatN2} z3HbQRQeK|%iv^^VLh8y$IIudMjTz7nN8YKx&gwv1aM^;NF4lxO$zqVydqd2BuY{FG zhG?lJ!4G3yv0p`k^k|70{CY*Z)pFw?OE^2eOBQ2U|WZAM9^4&=*E5LB4BD5Lh@#cUiv{fj@H5K3aqdv>ojAe-s zzo!76lJD+D!67cFW}tgTHtxA|n$Ng#mBC43)s&@z7T?aiKfUA|rWT>$o;bWeXfD?= z?hUoG%3y+HIM}}lWeXpxpjN#I?{!Va?w3ROWE~wy?o|cnUU-7n>1N^KhT&M4OFJ23 z8O~MGk$!3(3ni3q4Zcl`i5GjC_WC>Fnf(&9RUCyKx3&n)w&Nj4Qbio1A)we_na|Gg zL;Ji+ypz};k2-s@{^YxJnoj=ZtFo}|(Nw-~RSZ6&{7q8b&3D(WXWKUMVN_E>GWsKyKK8B?cGVp0b;zKmHqAJ%dQt=PjL+14Vuc=D#}4eO*tID z5(2rSXzy-)e{2y|qReq?(@RZ{u9aPDuxNqw85p9J`dNJg0NkLMq5>| z>Q5M(Eri3-;xd@_vKPp#pUWGs#^DvpO~(|!}A<_ z>0>&Ybsyxvd>*p{rwbsmC=u3*m$Brp-}x;?>f~5Nq0+!yp7%@<%;#6Yd!HaU6=uWQ zrVPShOX5l{^g+)*pQSXtgwK@S7kUa)VD7)L#%O0nEQEyypHx6#lcT&sU z45SZJq#t5j(T%dZJNnc`{_{qd^4J?@M2g{;vnE`5X2Fx{0&(#93e;%ni;s6_u%&Av z!8M3{cOQPTkboL~>U{zZ8B&N7L)!Va>-(8Vlnys~vLQdGnN2!$nfEy6VqpI?yqdC$ zr{%t2E+dPeJtQ86UzyLcAM|iWIeXN-aD?<|?sZ5R;&MggW|;zO$_<&#FD;yYON`y; zCgT3j|4DZ@8N=cby2lR%xHEgKa0@Nag0j0kw`^$VL+m^wobI}Gmgsekf){I6O0)bu z(06+kidX34B9Q{Syvi4}_Ef^A`2)c5m@6+b3&9(=%JEx|9HvdEWWQfT!&m(hQ2P6k zz5Bg}FZq{*@2RIyKD>qJPCCY(`ewp=?=0vb-`(Bcw|M;ZJOq8(QNFQ}=S+Ra97V<8 zHzF3wRHn1iJALr9YdJb)h2nvIKKwzEIwV6Slxq7!#zJ-Gv4VKUu~lgBemtf;+#^*r zCx6pM3Eb3hhNz)8=p3}hSUTgaS6Jf9ZUvztTL6{+Wp^SIaBXUojvVWWZ;Xi5(_0rW zWNWi??!=hr6v52vs-RyP$b&b9Vbzr~)Y6y1*3MaMwLu)bxmyJ5zrALIR&S*}_!NvX z$iuXb>wIP-vJ{nU7(6|LfIf#=zHKYd^DDq3T1jYazLKX@ePz>6mO#skC|EqNh;2Va z%uMRF-2M=R7oR%t+ha7q{C6eTq)q~N)4#&G;X|>hr3!Q9-7)y^Z0Xmfrr<}}oy#wK zD6RhEl<8=Pa+KXw`j}(Cgvo+X-~w-`cQdDt5iGjiE`8I>8;usx86QjioIq2i)D{5M zDkAtIrvyr4lli^gkvQ&PDK1O=!w+m+%$&)0=U83{nU9~cA*1&4u1=a~knc{q`GugoEZvb*~?Y$4&2QBzmE z0~%eW4&JBH*ciV+Sd!`vest%mtTKSYm%Vw(=}9Q}yb`_Z2jU}BfhDH~L&?wz5Udnn z`OOmEJS7^{t4r{B$rt`_(kgb0e0MT^3qZU60Sn%9gbzo`?(Srvu{rV7Gj1@Kz&y}z zOo80WEer~Y-Jns7&+o;e*14J7f1?bn`CA6>%)%gQ>=dTxqlP`bD)H-if849D$#0bD z!gbp!I7+@d%>fO8hR$$)otZ6}oGd9*rFn+-Z zG@DJ?o$my2p}X?7x4K~ScpzV%?~m(;SK`$lYS{Y4k8N%bg-8S1HwmP^oW%_Oad9jf zv0`*7dCLdA-^8B8raJeX2^lc^a<`MeofxO_k+dN(!j8+G?tz=M47ZcKvBk5@CJ zlFxi@cnN-66O9(VD)=>tJUI5QfJeuIfvPv;2p)+2ssGod<%=PsWjRweAh%c*ynF8s z?(dff>a&QMd7k*XpBymVJ*=sBhAsU5M&G#r3ut`lAXS+Gcx!?LXKNW_$%Z?^XG1Tz zO8rD1KP?!1VK`UsAAr9CM0mzp8QH!#wqQa8xc4gqle_=e+pY!NyeS^%y)4B45}tGS zX?1MToi!SpHE>e3U1~tO#$s9P(WHrp6E+Q+Ah0zPkcq0WL@C%5JHP)6CE$v;yNji{DdR=HvBiz=M9aws0$_Cr|s%7@;G#c*P) z2JGKDjvv|>gyvZlSeB@W>jDed=e1EV{%Hw$0Kc(k>z4D$_YyHyT!2c#Bi^D|&sLM~ zF4Q9%dS5=p0`6bq9`|z5mV9@!_HE-&KD}awdx`*O#K8g492T~zH-6hyhF{)=VdkL_ zt~*o}oUV$XYSm;ozh9S)jL^XV>f6@O_rw(wPD$r0n?M6)cR9ZV@KEV5{2613HT2Fu zbd5D$ecs@FO|emM$rWYObuHQUu517+KaLG{afcF%PKAKjLW*pr9rXYo@&rGnLoe4Li zjuT#(bi!oD{-3w(^iN;*Hi_ZEFyW$+;B`vD8sld zz0j|I9uxJChwp*p)$Z(MS`T;d+S{pkIVTrQrB`^_n=@=oeKwdKONYBp4zSt&w0GB9 zfNT8|@lARyxBU2n<(m^9P(BKtw`MbiDDuJVs=$!nf!I96n*Ucb2=XV0!P3qLvS)M) zBPxfXS}zIab5~sdHCMW%WemJ{MtgS`sRy4{I!<-%IK2H_f@X?lSav^CFq68%13Ahi z4-A7vW$&c@$#-{Rf*2=V9E2J=RxI>hAbg|#eO+{42yM;cInN_;DU@Pj(ob%eQp;Q{ zi9^s`0AAjZ#7Hc_Pe6n}U0UVcg$Q8?;`Cp(2YqS>;!RM?a0xEsD;$ zQw&w+jgY2|91Av--St^x0}aoYG!1QX#F3QU!R}GmM|+KMe2)i2tf+#h;rg)WxFS!# z=!=(XE3tgg0IYlK&K$!-!1r`HSk9D#!lEkL^NYckiX|wU_mNLhUB|AlWN<6X2ajd< z*tw_2c;T!}OcIc@0hoYFBLQuv?{JE95l82p8*N`{q zXIjJ0f8@rDvV^Gj>QcMw0{$$M;1OnmISyQ?oaYHS1Bm0WNCz}`>u^>unS2H!d>5jM zvknBaH&zqXBUjeT@T1@Cvcpg83^yXAk1uXoQz zYp)C}no-Yxo@-?`X$4@}k_bO_RRGMJMEdt z77ZNGUyMoWlj!;XOKO}n6kK1B+qupSp4d(oysw#}S&IZ$udv6|=|>v#Hrjy=Wp{1r z=Fk)qE**H&1$`*HyM4|OS9m`Ygm7=#TNcCdE=|x`JcdUq1)|x<3jDU9AF}vVW@j1+ zpVyW`TiS1OgDv5?^AfN$t`M*LKIP-=_p$kg=`g@Qhui_@SpNQt9N*=jLslB@`B%r! zyzOA3+l8PdjfZ_13n>Kt$KU)c#UHdsb?HbPul=J0sS~J+Y!?6lS4^06k{0$oO7lG* zFI3)hPulsy2=-BSci{lQ0y!p>8d_j0Wk4~Rwy1p3(^>bN1H8IOyEMl}!@ag8($Z#k zG^XsX0}SwraUXVeurG+ORl-W=fiOOW@wv6sMebdJ&yLAsO0RPEwmcegGfQAz`DfM= zyPEGUl< zUV9L`bIu=Xlq+FJwHk~zpG@BBFjVh-Lsj`-jIUb z>3Qheaf2@$&6&6{3o;*MK)p!=JMQ>^$14_KWknLUUtYy;%Y0#s_7SB4(NKM}lsWy9 z$HV~@cxiqx7F!EEvvMHt2bCc6dJ^<~C&PYL8sHTfMyr!S+^R2jMf8br^pLy_lWU_iWXj$8Nr+q10Z{x z2&@{FAo4{#pOhDYqRvvZZ~e;;_FBZ+p2maKe}(Wau7f>ax|??{NW*Y2i#=RVdD0^OgZ%_-&|IR{ZbN8&|kuDe)!F1ZY-raehE**;UKZ>0?VaTQpE4}~t>zfx+kAd?egO@anF4eZ&e?jSJqtAM$$6(D;a zWp~X{DBoCunxDRMtAi^T|CI>)X%DyCzLhm;9_Aa;Xr5t`jm@^Fcw75*R;HK-samNZ ztGSK&RKMoocZ)Cw;!r7NHlOsOH@t5wgXqm+U{n>vF053=4G%>)D1&@=#oE04st$b8 zt%3#No=|>CDkxTv?~by&YpVrxQdO27OtFM$y6dJ}TNB6cLX-L{Cv>E9a=Ha&cUS9# ziOu8TSV|S7T_y(cOm)8hlOL}0u0;1e>S(pcn~lv3g)@`O;fJy;{9ZVXf0vKN^P*z3 z|N4%v58TLtdZ)k%+MkZ>xy9@jHu5=^)W1t5-`#~{{QIyLW>k|8GJ46dqI@m8Hs>SH z(l5cg@-g^pyqGs%kfSrO9HyIxz?hS6Y(ek-+~xd%Mo zwOZKyd=yG4yE{C~5&ev2H~rXZ19p_%NxO-Mmp(?C`h}rKB;{Y+7`r;J3BA@&1hrN% zC|hX5_k2U{xqJ$hI*6zCiumxUQ7r6kIK)F4Tp!*G#!s5h7mka^yCaLx{lyCoZ+5bh zuc?q1kP8ovTxQ$8oaI$7v+12X9T$5a;MW7%S+-&!IIASWfg3gKL*-9?I^asNlsS+G^rd~MtSl@dXvI|V_cl-jeTySux)>xBB)h1e}(2Nnhj zXO^HM7B(h=g`#4&0`L5<_Y1&vV4vA**6$8j&?|&*TTR#>NiA|@Ky_m^luZtVPBBfgvO$@7qxjfh=Yej+?V=~?`rwwvGbrEDN+DioPDK^NZFG|0BS}A73Ny~c0mRH9&TGW*`drqf_azJy z!GkU}l>^&U9vsT|0~ZAuV(YGs#uWnWh<8Qb=k=n4T~nZtvAgv=d(hK8;c)tvCDW!e z|JJwZ7&b7Odo-HB>U^d>xup*dC)z|STG;&0@;9ESs$kd)HFDx&0I1Mv$euKwX~M#3 zjAA6qGN{1Ou_N))x<$nLKmus3DS=z*Z-~F+ZfeJJKL*a`V}$2*x<0CvY);IDcT=*U zpzIj=c=-v9TvvpLk0;~soVC=Z{3q%8UIzXpF)%}~ki_PX!*E^|PAdsPUcn3sFD8MK zI`d?l@rK(rKRB1WT3Gj9fHI{{IIn;&`Y@Am;wNn19&ZEV=P$Fj7n_O47`vOcm-XGn z#BqDY9U)4IefBUN@IK!svRvYY&+=IBXSo8NY@bHX#sq`^vnputlw_GRnUq%=jSl7I zSla!E7WmeZsOw3v-?A9wy?e>o&IY>FITJ%v@^Jar3v|}$%OpoKANF`J@NCJ&OJ9%I)c@2m9ah~-L*0q~*jiw8uT z-{vwx^l+z-5YJQrs*JN1B`!4sd&chiEUclsJ-S79iXA>^6XH#8V^oV>&Hei63^+TV|0f&qE>k6Vn+KkAePa)pSH#EIPTA;raJpXu+?I z#P?|m9Kk{eReL}z{Z3JRrEJ`hmV@7xx6rerZxh+N0>H>LcpOqdizc_r*T$@1aP`;u%i*6nP`!yC^2Xtz(E&No(rZl)!^q3Q->l@D{Vmke?0 z2IloFbih!Zhb{D`1>9k0yyh=+24B^#UPYer|x$vrls?AGyIY69Kf_se&L_ zk&eR}X%Jvj z0FF;?5NTxW?qLr8*v|AP$|q_)-}amW%~S z^C}MM zgqHJTXTb9>%rALl8pOYF7aek%gmLWXeP5jG}oJ}n>P)QGj_LZI`gw$kaIAdZVMT= zSjI&7R9L9HNc6+T8QpF&kMA2ze7bI!JCN@K(?0S*e_$eXOtPnHYN6=QRWWbZSUhp2 zl+2Qeg+05=;B4x55`2C=Rdz^0YwIEm-uH;EI@Cy_>a*ba#~f%eMq*ragW9t^vVccv zs3pFW=FNFa4!$pePNf9k0u~VO#8KGQU4e~d5vb-GOxMp2j zvQ|`~Wefr5SZ|uH9aMT`IlPdtMrFqCHl8%Y`RUf&^bmm80YZ2&TMw2b-xLM^@Ib!~ zKCW+8!mb8gGWC2Q>`kZ!nF49{T*lF$;YhqTwF0LE4O5@QWu&J%5kwP8z%A?*G5@oV zI#^`jRLy*RqkV;@ExJJFtK~s%bS6BrKTJ+P@1gUwi*enRB$hu?OPwnIkf@Au2wxoy zZ>loMT_s6uxmSfn{9w#DXhscq%$tqA2W8>I@J-c{Y?}t;F-d7 z=5ZQ7VkX3nif?{0!v=@D*}FN(1Rvbsb6);V@bL&S7xYE|rquk)CWAXW~WqTNaC9o9uR zrxIRW6oZfNX45rW;&3m^y<90X`+v^R?(S?%YhmYn)ER25 ze~);2G2b0871SngC3#Yx=;fQGs8AG#i(F^ZXjyUCv!)WVFNHylq#KdmC5QTa9_vi- z!|;bvw53TM&Mp+dkI}A>l2XT=mp8(VjNRp_*yAF-`j)MdRuINMTgkHNF!!B?$To$b z4P$pYYxME@v`d^&!GkdnK74#=UfWrX%eTld-DxB_8zD> zu*v=%r}NwklA`#qW`+WM966nKH3Z}A3ssnXQv!Ef&LVZ&qG4uGIXFN6O{x=D(Tp!i zxKoAcemr`pp!+as5@bT(Kh~=ge}UY@EA$4JkApH9*c-l&F6O@?b;c#28Jq~8o0k&x z*TZxg%eS?@9f?U^vGm1hX?Rk=GA2$1LQ0elSu#Qym)zmwxK0IqaGxTEYPX4+TYF!jMe2lY$a1?@NuMvEDxLza-aY6nr0eY^~z~`IB65&=~ zxU-*~@$==urOT0an1?YG0_;#p!&j}0-K`iP75q}j*Nq4Bp}B;6JQ{1985c|s z$CE3(X?*nrptE>zdy+qB=1m~;wy5FE1Ock`yJE)XgQC$^Q$W3rJ#(@SuuAua!&eDQ z3}a{f+?D36_j4R~rHF%-bA%u)H2~|s%_2=}cl@CvK;6G8ICi-*iS-D8p_kS0vCAUA__#8l1R-GDdbG5@osz&UU_XuU3(Nkw381F z+dW~yn+KfmurB&L2(d?E7R$eP6Lk+vgJq1}d0e#yrStkNap|^r`>GJ%yf?;cb<4Pe zInJ=SLjZmin$S6B6t!yi!7I;sc<Q3Pbm1(G#^vY0%Ib&j_AGf(Gaswh+i(F_4JUvmSEJ14jg zx`w!qvAZ#&IUKG1ujQ$=1^i;OuK8-_zw3F?@>1x4?u^}~FEGR@?MFH5AUBBc6+l9t zDpdWONN1M$Bk1wa<%=xdnCMTQy$FZJ>P#o+Ga3fSJbGnyJoea^qJ74DsxVkbf|AqV z;M@Xum3E7usfeyzn8SLYvhlx$lT@ea0qK8S2=sIcqT}u8+k<&noGH z1!G`fR2BT!7z)8&KvZNVV$cO1PM_q9Ly}@NUtSX;P70u_-x;iqtmM8gHOATPOnWk5 zi>E!qTGr%S!z0G-+I`GGBF#?Zy%^ArvAZ&PJq+u<%XJxg!Z{HiKD#S{N1r}jGdTz= z1FKQrR2nB=OCSwiQQ$YO0tzn;5skmg=o?H#-B-o9KjbAfsohU{gEN>9G!HD4FB9`s z7wIG0JQUbv;$M>ny7E;oDe^0Zg560FtXoT5ivLpc<>hGV8I9r28PqUF5@Novo?DY( zcu{Oh?zSl4pF%#a4fVpGiZ4at8+Bm5rV#9Y&VrcX2+pU^6z?*2r_eD2YlZ9WTQ=E% zC1ZEqzox>fv^k=X|D3S#r2wOAwXopDFD_)j8#YCg;vI7#w)q^n(s!MXeVC7T zSl>j=wq5k=h<;MqPy%0Pvo86ZMZ{?DNbEXLfo=T}_}{89de~q*?7LnK{i_3DQH?4o zOHe@{DFM<2?zm^{IZ=(g0oaxbLFYRM{VOJLM=i{8JG=7>)>z`^XQvz{owf&Y#_q({ zO#xc7TXa6x70=``E!b#vyqv`{Gwc0eIqTs#)i(hatn{QywZoD5&#`Kb7_JyKmn?k| z$3Sl>OzQhUo(62AyPmN;%)tV*{dtEzsyR#E{>uindD$TE*F>HgchMgyg}5;y6$eIb zp}QqMlLx;_p~Nc=TtbD!bBQ=EK2eDuE5fkyrVH(SB?oP5c<|<}FC5<{K~DZuM{cD6 z`}Vrv!0XMTc7YM(Gj@0Vg&j2MuXN}xu)=4I-3fzOX69R6F7^U}&;+)RH_(UrgO^27 z9v&F_l8+r3%9tCWMa0|#L3(X9{HMouLVnS-WqBl4B~;+j%_DGL>k))8_(mig|wi|{w=cexa`nvNL$MTFDK zL330L=!+YI(jW zrP~CwUodU|d6t>^xw~m)-Avrb*j>~pQ#=-y!-cmw!W72tjQ49p#OVRiGkMm}>&nNM zt&`Ep)`DDm6b!Mf_hv9m0u=w`(5g4lIFBo5{lvd%Wa(=1(kdBZ2Z~^P(^GP*_z2x{ zCKDSkyUO(q>*1{!=38hcct*-lq&ICk`21yi z-8ELQ_*resqEI4eL78-F3!f_2{~vr zaF%}Ae1{mYo}|T(Q^E4+Hj=#j165s9ie~@V&v-qDt_c%^nJJaf=nxJnp*~`qhJcA>|#BH4fg1#FKTHmu!I4|?jDDk!>JrO z(Y!kxmRAVzxQzkES5Pj$#vRJ!1W>R@1wt>Y(0gS8IOS?J#srMV{LWCKaU%knPgQ{B z=8qk$`3`C1@ShPwzDCBB5M5EP9&{wN7mJ9(Rteb<0IDw=CRMdYr=D$Ao4T z!TLMN&@Hu&++6g7zPwe2ac5#M)1in)T8snL1oli|2#7DTA#3+dLT7nCZj)lU(}jOT zzFAuEkFmQqJDuRlkGY(|ovB#D*qvsr4SudU+WdRh3=n7RZlcsQ7M4$aF#|J4<_;qapH6P8oo!zUMCN>Ck4jB-|U?o&M&BwcLo@koWCHnPF7hK(i zu;c&$EaW)*DP}mt7|_*JYdqKL>9FLAEv)SjLdY>=c%@z|T5;JKqt6Pk(N+`nTt*YV znlF%RJf`2D2qE(ceYHLmKMz-;@v$)|t6522@MFO{vkZ1-eO(F@ORdJ=e0ACimVMFL~5ih|ICaw{}wH*$StNYf$ zDgXc3UH?=I+|~Wv!Rna@-wPs zLESfi-WeZ(GyYYe)vZw|6FQ%qIvWo?dZn;C;~j|}Ur)d9NJE?S0_?ealiJx*BBVLs z+Qqt%6iv-g|6e}Cp)vgAONFTLKu5b7oLVa6t&Ox#JLCfxOJ=&{?;}m&FMk##=06-pOAv-Vo5Z+ zBns!gEJxw>LE0<3f~O8I`Xt#;Bwul$TveR}cLq3wD$P9f|@+`7|=)o+N6GVjO;mWlT6sr5fgnptgVy zOZ2>$9rziiGmh>3rV5e2VHWz23=#d*ng)iSSWjW{3~1)}G^^jX!8FG17A&8NN-yVg zw|kwyhOxUb!CJ51@(*Jsp>WWK=za@&2jJ8&Womav1>UF$V10-?ELj2Jj?ol74`#^a*OQr71^KAMUK2-q%wJtZ#iFmoff`mGTvi&&rC z96PKuSkSU+zZIx5b~oH_24-(eLCr6xVryU@w?@9+T0M@+*oMHx(Mb?D8F zf#_6Kjjhk5al?WbB77JL#aspa)EEKP(MzeneIk|^us+%Quc+zoz2y3@bg1{whp;_Y z2pN5WX1>iu$JLp5(*6h?lh{N0C5vI5L^3RGT}4Doe$%U=D1L8{M&>!`S%p1Rko`_7wE66+?)PL4o>a<NsAEeLcf%A1acrwe4Y8?v0%iAl_#7-OsI)vnF zMI2msS_(gtKaqx$Tj-P2RNUiQh<>y0(J6yXq-$R`gnrJ3q24n@y8bRbs#b{ZO{pla zw2ewH`^fJ6r4V>B4!-%!A!9a+VePz1-1>pBJ1q|?VNuuS zfEeY99y{wro1aX92aMfWm)k?A0h>4)H)QS6*065rE4G(0-L(Rwtx=b|^E7nw?Mc7CjwPz6-ZeW?2wI$FJ z`G&Xz?511d)6uXgAG__YQ@hMoQk#+sXY{jRYx*%_{@@Ai*-(UXN0RY*(ONow{!jAl zXBl`G#DGA&kjRvc!(aR=Y|jtDq`(>UQ2!)w*WknA6W%c4&v!2CkrqY{2(Ulj34_1z zM3*=dh+*t*U!)DJuU%x{DlrrJjNQfWFvTyA;Z`&>6Z93gA(P1~>`DX@HL}u0ekbUooesn`$<0NX|iL zVGGrhzfIce3&1BV4IX&ak;CH$s3@!yN2kT3z4$zuw{VuHGxIvrRzJzX>szw0K=K6L>-&g& z*DHc6K`C%|-+Iy{^PO64D8qRMv5Xs+QOoMFaN42@dS-^gn}_zq_5#~yd}e)j%Y86@ z)d+f|RTJ{B3SiL88TK_U=2|SK;%vt5+JkH{%4d8_pv(+7%h;Xg{%No;(L?mYeHIQ` zv3Ee4E^6F)%*h}3gm3FuhvRBRP>C|4j%$Ok)PUtn*-PP7>l6}&Q6RWk4s)FbNr_$! zoo|u4TZDO?mK{cZnF9U82IJd1!V%15Fklqs4EvVgM%r$5#Y)V=rnn;{+-VFgBdajA< z?+tS~B|h+DfCtC>Cc+a-d&<{hIyL($Jd-gNlOL242f0{Sw3B5+hJPp7x7X9dkb-+G zig4VfM>M{XPSCy4WpS&vbI|xwYj~m-%oe`aHF@Vhfgs|0; zgMivm+{C-)c#Q3JH+{1}*{6>j6#O0F*Cy7-FklD;iw}u5=(*vBP?qBmq=pxt$&ok% ze`uJ_gGZ(lVB?Svt=kli=unBROGe|4#{WoSU_4w-Duwxx1H`hVjy`ryL&IYQXb^v! zHcxLMU$b-I*VJt2Imj|ITklhoBZa8&AO)o&Hc|eXFDw_O48}c<1&!ZT#Kdz9E_hXm zK|`VVH^Grkye$t;ns~4<)fY}1j3H0gL4|=eqId&irUPI-Pa1Y`*)PMP zQq~&({bC(fN6hf;2Z9inGViV7GkY|Bd*{libCg_LMmf-3;)ajzy7r5NCz7fF?Kg9 z)&wQ|1YCZI6Ie2K=lE9(jvV+VYHIbywR(IUEj<~h9<(Nw5p342tb&Uh$3akH0lgU& zgR-N`@k88Cs;sk?oZFlX5qpXtUay;!tvp6|P0GUPl3dI@-Abzmu8}L44+jg=VS3$e zQa=6-^_^dWH9He9<>w-b<|83?Nd@$qM?%h_aB}jk49je(#>VFXSh-)Fel$>FyiowJ z9=k)n^9Anp0eut}FutWv@WFUxQNqpXu#xet`wF zuFMl4fb)6ku-9Lj_LlqM`doGvg)w$lU)oR@cR0VMi&hNc~7# zE^Q?{cBewOaUneUaF^6doTZ<7voY{YHoo#cLl;@zBLhB#u)cz2$dB1dl9fNvq1&Zs zk{5^eGiFm&1#!q&$KD_3!yxmWD_M3x4vPgmmY?f~W6w&_?^GRDEfT=nAs2|#uj7s> z8R1{X?j$GM<0-=(E%pd208R z;S|xJl-Fq~(6mpVBNd#UKV(-^-&{ zK8}Oj6;+_@90K!ItjLMD$#}q$k23STkqW+wLcFvg=8pih<(;7aTRHcx!UVrEcDM8U zOw2YI*}ThfCZsTSS31)atnR0ayfaz1^90tjE3bp|tKMYFTbq&^x<0?MJ{?r&08u!>Ij!MY)pim}VKhep0XOn%H^yTRYNFe~r^ z3GTc?x6jJQy^EnT5X0Jnf$0%d%T|R2Hcwk@Y4N+T%9voxr?$Trc^QC5-|5YKpuR*J5se7VHk6x5(Qtx z@z8%f(mfChn^nrd?Ad1`qq3Rq5KqNZ^@Vt4+57>#2SvYZr@)dr=7*GYfDVIBhmEq9=*!OdTT9Hb{{1*kwSohA#_k09 z1~A30MKsyY9Y-;C_wbtvx*t*|-+cmL?TcziZygW6&ji!feGz!^QU&rRjKWCqg~Y)y z0laUOfbda2kvP4R-X4{Xy~+j1Z||gcA0UaHm<#84S+KjLk&Fy_Ocw_i;X=6-95G=% z)!Fo&oS|j#s5=Ix`j!wW9E%&Bt5Et)2yWeJOHbKOVrK#Ct&a79s}27+WeY8Icql-N ze@y-;*y>r{Bg*j?~P8(6jJoBe#(8F-MfyMCC4YvKaA{k&PQcbX6)e(Hd|Pp@dZ zq!)f)%15(cMHE{#m5g{D1euc6aO9UH?8{B1Vv^BlbEq6Adi!s8g6hs2B%k}s zA!kVxyxo^X%xb0ZyeZRKMF-*L4ntc1N)c}C;7q5&(Hc6DU2m@FqDHQ2?$d(x5c1o&?mqqsNrkzTieYuH7@A_8l4pZX+w9Oh1Cn z=>bGLg8A;o^HBMPKkD{Qre;m5aBwEmj%;@W?Nz6^cSeTTSHt$UgAQn+|Fva}j0OB* zv+nMA3pn%SWy`#y4p_a8y#r<&qUGpD&MKVE$gTo-EUyNws*|XHoj*R)VL3vlWKqJ# zk7&IKhmX>gpz1an?p>Zoiz-+TpKU4beDjVfNbex^bJO6-+5)JIxJ4owMRe_w9Q^nt z3sV$M(c<<8q~L2In4d_2IMWQOkW~{W z;-MQnL~CE%{cbcxFHO)rC4dFZ&hRjA75A{#7%MNc{>az17#`r#^3UBGni#u_R5F9` zmv*AxO@O&xLQIv`!wlv7+!+&3Fgw8q@lqwI6gQwd3G-q4vz)z8QmFqqo?Q2ff{GvI z&~j#ogwJ12^LHm=%X5~uedh&LsM}8@iZbBra31WJyiD#Yv{UMkhd0$SaoM^Cdj4xK zxsXx}=XWGQ+QyY+uH0X0Ra1^qdeOK-FN4-?kYs&0RdCuc7&0PE$jtK!D9>X))N(J> zd-y`M=%x;sXt9imOS53NFosipZ;A^)unhIK8Th`a+5SK&?SF zT(eM?Xzlfb@=zXpnIs4Ej(Ad@M>w|RR^lgTF&wjQ4#|7Xb`i@)PT-G9O96wfwAG#gU^mm>pP2Ln@8M{lVoe4v`UF_ud z%*5*FLR|RI1WoP~aQnA9Lb({*LltU+&W{069r4B{Up`hhPsY>`OER-37}|GNLC^vT zxUZB;9c5y$+@8%n4}Q^?606A~n`GGCRm8M6PsxivM`&YxCSJUdi#Kn!QPae$q_H?3 zZof%~r#5?u`EVco>r#R}l8Ja-ekmPWJ_63AR>0q#kudgm6mcIZgSN}7vBEA8xnV6j zqe>b2pR#!_$pgHGI=Ii``uH%Bc`kYhrY<)SxhJuFc*gFs60M-&>5`U@?RJ=UQiz$` zj97;47Vgn97dW;?0IU1dA@JuodjF>{Zr{v9!8m#RtnW&mCWb-FkxKA8EDlGG3FxfQ zI9x2ocGfRG(dy%y$sN~JI2BvS`VzXx@v{xKa`S}r7I_?gH)CP z>KD0!=JQ>gec2Q=+{bc)?%3n(DJ?Cdc3Hx6#_oRWm_y?`8PTl=9CjA7eTRhs%AGvN zx#zfpV1fV?{Z(Mi8x^{!E&!eGRpS-U@p$NU7zw%)0sD8e-p{KeK}vNI&0Ly*-Dgz|6-pU6Rt4?zA#W_0LJQwdeW#Qcq$LRd$kIAh$MPN9P4Cnr>B{#cM8Q+(smrk=2yXn<`7WKok{wtCSl1$J{tFWW3$B{k$=4wobP2i>1|HX z+%%gT`EV+Jc__p`r){um%c16w`!k?xfbF5WOraqrOjPlB7M@mT85C!9Q10SO?u5M; zye;R0-9ZJ&ylz7IHo~0|{p#`0fS}leFcAOwDk{ zPa(!Ex5naTZ-+mBZNZ7LyVKRiU>8>-+SBQbQyK;MpQR>7CyJ5Re|%v03J=`>%0tpF zLWLNLLsC^(cwh_;T33;Wb7G+_v!{x-9M_IW?38c0!i7 z!xNDmD{j&0B?Z`2nuhdF9raUxPc)oLA#z_l%w0K;tc@RyS0}Qa@q=)jdB&fH-H?S* zQ+V)sg+KW0nMisssA83e01I5)uu#Gmx z{$GU{Rh)uzEH=ugX9v{{< zKax0yYhvaF0d9(O#?H^nMe2jbpml|580Oi+-asvfr481o`;C2{q|LDYnKS3AGz(UM z5L&nD!ln)PMSJFZ;^jI%I*wGrMs5mebq``1Yj(z0NP(Dc67AU-#qKBNco7Hbs7)(~ zZ+{}(I>qwTPQM_>jvS!2hcocs)jT{lvV#U#wG*GhJecw<1LiM0L;`+1qfMDiPZXDg zfdNU!9>F->*{Oz7dBKGDI~ zjNK__&O)jGfWP5mM`#2#nwx%$N(KW-CZS1@CDUI(~$KV0E!iU7Qqs==#vS^D=c z+a+*3wB5$MA1}R$PGC5@8&$&Uj?u96&|IoAEgth0Go9MQ0qXK@J8>DubZVCiU~2ds zvi(3abqUD9&57B#(Bd?$zS2eF))zwTuN0_=-c0zDzfgTSHtP<>;*JSC%H0!(`}dg6 z*&_^8c}`@+dX`VOlZOjd`l7efII5Me0cZ9IU~Rq&*qN;7Hk~uV5|I!`2H4?Fqxmho zCsuHeu{+0?X0S5VT=Zdx;L1?uy?&v`_+clvq|*b$9`a$*N0z78s7<4`1u}hjHBNaf zjoI5{$>^3ym}g%Bsjb80Xx36%9Gi%~GfPnH@+&$#dmqvNmkzQs@*z^@D!DIrfxZ*V zL-CcFxbnndD*f*%Ns%drkT*$?etZ@CJN%)(?&TP8A{rl+WYIf^B%rsW3RYbVhQNK( ziM59U288hO(Ooa}So&Th{!5#k159HlbOimuRL)ex6w6*SmS8dyyZ4CfTqSK__?Zyg zSDCub?*hWG#uhFVK`Pg_c9oJ6T zL(i(dChzh~K&d?eY6lk+!PgPEHM0W8MMR=YdjyTEl!2>ztHH@U5Te>ONX!6Zcc1y# zKh6V32s=dYiuK`Wwh#(u6G*91=el#J<7dY1bpBf6((Ww|UPks%e}e65EQ~-(eurrA zjSHsE7vSJSbsS|OMQnZjKxG~e^j68i@f25@cQg#I)l_1Rr8pkVpG{0+q$-XCPXyL;KJ6ZI^~IeM4gv|_uC6RGH0vz2yF`AF_j z)|FQq2m84>WbOko98pn;fp5dmbBsH!v6F-OMLaNC<_CN4OB167>iB}OyZ0upSf8{@ zwCwp5C^{m9yM^{JXmr3K?T95lA7pp_v(r)Htpew5OBf$vKB0^HK+P_S3fkTAzz`op zom8+>Tb(@o8UXgq)iCn53|Jb3(-zH0g#TE^g#SofJ$*4zSk1J7HB5{A@HMfG-%TR~ z>3HjEJ|>%7rx71o$%6D;*fuVU<>4G7#eGlc)gwh%axfVK``6H0IzP#{KV{J99RnBp z3P|_yaTs4zg|XQo7(Tmvyf(^zU^;6@C!AzETeRKT1dcsr zxyKwp(TB_jHi2d4)`xkPj1#GT^o6e$p!Wiux&*;G>#E%$~W7ejYbWvb8JV_18#vMdFF$Txr}J zQH`;W1Mx|?E4tp>l#JtZTf*)z<2<90BIZa0RT8v=Kd9X9k(S+3wTO8oJKp zxBM7mhenLuSxhj-nU7a<9#dUl*)gUCYtw+#En}!epf75kV4B$v@@VjA7TGjD41T?@ zgx6+cU_?n3eRY{-W=<=^PdC5N9V<7H;~gnbeYOxbF1=4$vQN>YGqX{(G6!o5TIj5` zw@JzF0(d?n4U$jRk)vt@)FHbR4U^*0wdz0Wn>!l%tSVvKl5kL)?@Qv{Cg25Q9xk%; zN4Fd~Dm7CLento&$j*)F*BiLaqo?5bjY9lu=YSGZdRkq#3z(${mMbR#w^_Zw~-#d@rcYfECL6M z6!<-H1KFVZomL$z!^JwWXpmY;*L#nJWUDHWnh*+IXYI+`p^3Qn4G-lP`>^lJkm$)J zO^CfM0GAMFSX;7~o0>5duQPV1r)`VnL24~M@-u+@EQGl`r@`T;KB6w2Ss2gQUH>#) zrmK3w1wHkIE!+6uw_Fj&rN50V!rGT`(2JgEHDPEK)`=!)HWIPXygdQ=>w>wY~a#_QNjqM8ITX*J~j zm_hpCKGP!KibDCeWI9Gu3a)EbL&CfunElL%tUaiRu!@gXmppOO>qnv|-ntMpQwU4r zX2G-tUfdr#4aa^FV(~9)JbYT*LBZY@qB_~V*k>v%s9PessO*fbodOgb(8MW{Bgp>k zKCtlz51@AnNE}bAxt40cw3rL!;OA)K&H!ars;V)4s$5X;?sB=8Zz(rwVk= zV;#%VLDc8P?x<;)Qxcu=Qhr$pC_`FWY3~Gk3w*9bZ{75%E79v0wWi=c>Ql6{}@CR=@9t0as zfM_K@`g~hBdYM$>)3c+oukAlFyCfbKCzOKRqxWQ&L>={aPs6$m1t@_t|oo{EHm?aCI0&ziu2rN zQPo}Ypm~-DYwUa>aqJkP)vJLq`vj;x(FNm=tP;7K7(>T-=B0791Le>(2X`lHe9G9} z!fG?rNwwgP#sQ4+X8ZaQJ=p&Awy5NR2impsabC3&a_@CX@25a$&8&v5Bx#m87Ed>i zjY4Ck3Ou%Zm>w4{BU^VQ!Y?V-cX#O}G2gzAzO>1}x=~D@A$EmQ)r+K4lJ(t%WWxE~ z4McCahnCnC<3Mi`yO-C}tkHi-W==V{W<|rs8CgWfKoWPps=_5D!T7RZ8Z`)2fW8bq zJmY!6XZbf=PPz_Woy4+sv>ox@h***SB2ysmgn-&Jf!xe%CV4i<>t_3SLlZRZtm4*0 zI>C;&0#N&|1@zSqk!ZU&rt9)?(XC17cEy?m&kg~p`Bk8>dK^5iD5Uq|nD6dW8S1wF zq&DVjNnH!e5Z_q@yXv2i8HUH`eMOc<6Pb%0`&y|(;dP>Xh;^FgrGui(i*+!986UwOD$RgPt$yOXm+tao`C(@->tLBfJLG-+2HdY>p|`@4_yV%Js@ zcQF-aOeqBEoV!GP#~C{6Wj1nKv(bnRoWU5^uD3r2&27e2QF#JTvzAnZ0i{tQv z!E8D=SR9NuRI!N=3<_QDh`}mK_Nh!U`C3X9Rq7T|(s_Bp^*GK`GH|x<7vpX=qLd$Bp@r zrFV^dl4_%~wlZyDXC@wQJ4#jGc9Ul(il8zx85-}cCOsOzXuD=Pu5gaQu2Xq*zKI08 zUsb_Vl@Lh#X+<aof257aTKc(*@4VoJA0I9mDL|ZqIX%597=v0_ zcJOiuG#Z;t)?Z@#jJR?*)c%{K|5-(wekbADk;Qm~{F=H4Lu|afbp$k5#XZPwr4ZQMr4C&wF3m>=ez^plev2iAxIXVbZLBy=;tR5&(=i8*rZkh((?QAjt z)yfu;b-Fu_*A_7Uu?l8&Dv{-Z0dT6T8t$JR521HL=%XbO_~v2-F1kGu*H2nVK6oa; zp=TxkN6~pW)cn11T&PqMZKb6>HKh4DPue@}UD|u^aqkt$%BFnnkrAS7)jf}FWrvU* zLfKo!@BIFP`gFVJ^PK1P9*}?eHF1F^YA2S4FGl4vZ^AWNHOQhq%yM8&MJBv|dx)$b zzE2gx3i0-bBz!rlk-95?BV7lXZslGK+_Ekr^qe>z3St=(;-Rc3-i`(o%fr>b)evRr z3z}JfITu?^9Q}lKuY7k#i|@5UsbXU=y3P7pg|?uv`-8);vKjdD1A8`mGpy_nveCM|g)q%*wn;bOL2NE3FY~4hQ2*0K zxVbL@Zw4--5+Z-eyZ$opuZx1T6$#{8qZDp(s6vmbAk1mjr_~zD@MI?s%5uG6Fzr6K zt6dwjoduZv4{+k5naqD|27Qd($*i`9`Ns@9V>jDj`xW+mduoJ-)s}NlN?qXiDL(w$ zqycl?MpKz@z8LzX8aHRkVewK&!YdAicIiq`RTqcRdgZi+i^aW_Y^HngnL1alBYw5X zps830(-zz#*#SrCe-|^+@-WMoD5rEl z1>iibY9y_U5r+AbxZ!Zt^;*F=$QV$*S4;h><1x&p1UKFvqDrdUh-_Ud7_QC-Y400E z?6i<>SelKO-)G`x@niJdt-D0@TLCn+CWH43)^|7UGyN4`ilg#k(XOe2rgV!z{f`Q` zwl@^oCK4j4B8P9TSEHzfANpM$L*LKRfOm}DV&C$P`82+y1wp#KgulgN^Ec=0cF4v0 z$?S|@&_ch8JS8@1tg~iwA~-HyK|D1^sLjeU{HhU+XGPPg+ZIWdYg`GS9Slx(CS?63 zrbXuQP`St(1J@4-x80cvy^P)AX(vb#iQ!&+G-W*uY!@S(feEQ>rC(tSyYI3dc%d0xp4_uIDod;*-S?9Mc^HK(wSm4plZ0nW3USBge(G&~i2Mx9gwkM^BeeMxc~*XvE`7xM?#$Eh=FcXo ztoE8bJ6jCV6|9eF?b{T5yi103@AC((kZQ=AFbQg|dePIq;rKO+?UlK)IBRDO2^@-p6H7`!z55;6 z`f4-Hx}1Xeo$2gSdZ~9yJGr8o4Zj<*V2$|+a;4)AwaYBP$<`@&GHxSXp!<=$nNSLw zoZ~=2lTQp1L~-m1rc?6^!@O0lG)i9<2I{M!_NgC4C5|JyW7To{Iz9@|v-eQO24Q`> zA($O!e#sVl=rC?@l&hYO?|%tU!owW3Um9@MO$6-YnO6C=4ip=93q>nEvHH#beG8MZ zWwjPjn-vILT@@Vjm4^N$(e(83NZb{}-k)Kk@MQ90a(+qzwA&TKl+Ksrgeu$sLLjz z_7a%UQi*Ea!I*l=l76|T04tq%a8BO`7WE8sOO|ON6&K*SyEE}?WvBJC2b^xe)3?CWCri>FW1 z19?4!Uz!IMFVf(h;cnt9JxC)wicv*80UO6Gp$~YYpgFA^Zf=PL{rgemvA8t0Evv#I z>p&EHqDdFcnGA0qvkci-PnN)Tk^3#Biz$f$gnoh(W%Pt1>E=+)*xj}0=`go{NoV+F zdlY5tjp8H!>|X4F0rKz>3|X z;Cr1v&2cS!#H6Le>37S52&#x)V`wEaXcQ4}u# z;fEB^UA394RC!NVt}DThx8m@V>s+duJQk*vR)Eg*aL}3JMKZc4Vs~^kepum;^wR`- zMMVvU7`xjr&kd&RXyVMO4RASQcNte5@UU@5XH&Bk++lOPs~XcHXHF2RKjP4fvAg{y zdMNVc6z5Rj0d+Ebxa6q#agNmkGF9ES${mi-3ze%Hh;m5%4rxNUfL8 z#`@#MY@hy$*2gvxHPs}=hpH9(=={eX0nQT62qdgz`Nn~vyEPI^ftd z-SXe5;+;|)ToJ?euZ6T7#=)WdO0Ya00)PE%NN9~b?v&?YV!sdm`TSE@x<`}k0$67A zf6lO@Vh;D? z>k}G3yq|=cWWb17F4W#QN6rUaqOOl~(KC_dx#~}L;f&pNFR(_vqkfL*BKGj^f&gxo7%@(=Tqx{y!I(pQ3^vuk!*j-x9shjc)TL^; z_C*%*&k%a|Oep$FGA;5BG29VSNv_S0g^j_bkag-akzM*9opdf4^#=+K1Quq#ol5?;(37O&X22xKejR;vSb!* zu#3Pn@e160W;ENm%qPjGLF!JhZHWmI5ibJ2R2~-V}v(ADFJ^ z&>w2vyo`vxO@NdmMWE35+kWB4QG+3?_#YbZC@5d_ovtJZe z^i{wr*D#QcbtaXYWYN2+8m}+$W4?uPG_*t=7VhRlZ6@noecQm*o;PHE1*TK;w#O?b zwVm?Mrb8!Vck>=igQo))Li4eJe?yqY;eifr*m0H1@AYJUaUQS$A?UrSMF+MAqDNL0 zF1y7%J2aLUpNfPAyK*>u_#b&uv4mPBB;dR0#rVAa1sz__a{KIw$4 zMdoC^zakcevaFr!-pKPE7Ha?10#$j|5nMSFUh`A9Q97pBGRQP``ZjpB_=^1*8Cy8b z*j+)r2}t=>3SW$J#(&>fo?4t1KG^?>>s#*w%WQaH>ZAabhpcGO^bkB+$L1O_3G7JA zBPEk#U_h=6ysv#H=S|iy29$*I*9uW%-9sv=+(x!N$$(k)Ik2_qG`U%JnL0P;Vc4ED z+&+01y=6E^Tnmd~*6G<0`EC&j`ZWscvRGeFa3sd>j-X!z(mT32H4F}vB7=ecaAj^aG%c9~$L6`w&Ld$cv8)1HOhoYye-2Sfh=bVs zCGhXgdvdOKBjpyQAPr@mrb)Ny=anbP=z~n7b~6il?Ays@dW)9W6rf*Q3OdZ$MEhpE zC*!+V-saLch%;lEnUBZf`HBh@dl`nFgYI&&s2cbS{2}7Rc#>4FhFHnRr}}QV zS-VMC{mKCDFm_j%?EogG&5q(nt?&kWHjAU?n3%4_ttA9F#_l9h7xw0!6DIU{V9OsK ziaDyFcm)=brwy|~eQ7b2^$ZfT+@179 zO&X5B#PZbCuFxO9yGUhb4rGdEg6Qcsa`ejs8gQr(`*tUx<^45ukNJ0^_PZ2XJYqol zVm?`NavY8>uEfu&A?ToFO&5HV2V)~1>yq$+#noTAGG#67VeIa^gEM~rED)A^nZUgY@( zhRcGX;Q{j>|C0dQuj%w)O*EdIUB(#LFS_CDO0uUtk>%zWL0HRU5;W36x29#Fx?wI} zyl|H0Ms$-w**xgfO@}}B&E&cA3)(ul7}e?%(AJ=y?o|9oVztZR^!rE{uNhAsEF6#e zQLGE;P9W~F)us|#mBI2d)8I*Y!QtW?+?GZiO!60CnG>L4y^V0&#%WN;*qz=~w$C_R z(3w2mp7qEJ&|A_7-Bi|a(dMpjs+AALo$AouAx67G{jmQ~H69<7MT?(KwW^Z#U5P~}lTs8t^NAkXvYv34li}&{0&wB=k($b5^s0Rp%9m#2j3=i zJBj*nQ{Z7FV|Ui>AQRKVt&%XnIgH(HwRS{j<42uGo?606cE-yMT7c7=vBGOw9FF@x zyW7h0lO0cRU6VW@%z_U?`l^t3K$(V*X5G&3tFY5p22ax#GOXqOn9Mb;I;Dy2j)4_{8!$p4|Q z?lEosl_;FKBZ=DtIagf{@dO#vu zy}7QlW@ygX-Sbb@nARrlxY)@Kq!_zPaA)~lj~5FE)LroJ6+Q+wX<)74D3a0S3rSzA zq350)H0L-{Vi=02j4E;SN^$H`C?$DzvEaLbNXjR;pLZHYoQ z>(1%0#IU=!9D5=if#1M#yPoR9D$#?&iDK^96U@g^a#Ju|Qh1`~N}~!?Ix+@LZq6goTj?3^R2*KLk0H)Csn)qpQWTlZ{Kr|) zsd$ViJ?f(cmkMzITb7S+x1NsD`b?hKmxA%(SP(feD zp`#l5t^Gjil_&{*sgB0Yd>lW)6$A5E3C%2xK;taaa)CXhM<+Y>d$3Fp#_rCSOvAzf zOYVIVK)yHgHRS5R9JO9y)I(1+KgUDk5@jrq(jlo|1HmP|3hZOXL(YeI$^Z)5D3;^D z#((trvU(D^GXd606hrdK=S1zm9%|#5jy?Z!G5p&l8e(*g9Ft~!cYzu3qM?OENk5@O zjz#GDFcF=XucYRaMo4~c8Ca!8L$6XMxnv@V)`OL(m>-NZ(2OpLRs_*Z9!QmY!_-@Y z+`^oxSgOF@Pg7>%nz=E;FH23KfU!HfDQur{ytLzFkuBOkWWAtTCK&Hq#hr|EhQZg2 zA-!XFP@V6>1x-F^Kb40nJ@T0F+M0CKhCt@LN=U082df$jXn$f1O1~>b|HI#DnA2L~ zcq$3}w-rLdng>L3MjMq-%|u>U4(hG%qQ?YR$ddzk@H#V%=?iy~CZkvM*~Vhr(>@#D z-(5&Imx_SivU1oe9?AL~BFKa9(pY__3Rm2lh2wIk(85tF5HgkzzneW^&ABt&u|Zw* z&S80iRbBjh7p|DdvM;q8i*@ zB2D9>{qbui^L!{w!uAs$WbBx5C|_IwTiV9LzO^;fb59(~9V$U=f6ua~Hj(=mQ((Dn z0ql#tMNS<&N#zH#Pk=GW7l+%F=Fpi@ zqA>Jd1?ve8gC%?1$&%xfaOIL}^tANHb394%-_eKWyL=>nV{$Z|v9p5r`qJ)w4p2cs`dhKZFLbeBjV z=549M;$UeUUmi)MnOU@kSI*v%qd-4&F}>G68(&jC=3M=AI4un87DW?b@(4QeL3wYT`% zz;wp$-bb0j-rjWK;!QJgxeVJ0+|ok+$ZM|ggEvq=)?2Nu2!dz}YSb8v?O2HwbrSei zH;Xj&u-#rv89X`hlPs24P2>M0;^cpYxN5~?DlK`CY~7v#hF@~vitr4%QqoO{dmg@g zm4?d3d#IPnOH!j=3=cgLz(8&ZQ5rW2x44&M+#t)jeH%kHMWi91gms$M2EzQGQ^^v! z$vE(c^;RG9#G}713m@*#fnLV$wm%`TY?&G7EozSVlieLVr{m?jiyh7K>>-JOi`>3PFh#1+t&kuHQsD>j2vT(%Ag&t82!@-^kEIlBKA5K-1{}kfj zfqp3jpZiFr9@;=#$ERSx!UCK*yN?zxI8G{xvfvEKhLO+?a^=)bI{98c`g}>jduz7P zvxe`;xWW>+rW6kiA#=&{_hXP3UV-hra6D(>L+P1`aKW`2te^Tri}WPoc5n)cX7W+? zK4W)J_6WBr7(nti<~Qkefa#`}9sl#R!WwqQ=L#(F%qwZGV=f0W|7Uk=^jL0Kr_eCl z164Hn=yOyB_q|mnx?!`x=XMo%9+?16pF-%QS zj#r#^LaC{{R879)6GyA8OF&kZ~_Md*5x-voAvQN5jHw_v->g+FUcz5 zr(PrS^?eX@{HkOgT1hzGlu9RjWbAHF8R`f}Xv~A5+%s!^yt#tq!+&$PTXtI4qGs}lB@h6);goC?81uV85 z12L~^ss0@Hds&yTz9h!(jJA=vixow~u+Q1p%UOOBrgpPo1f3(f!nV|Om2 zbWppoj}y%Bf=P#X5R|SA_g(bpZjT`R>RW{;2c+@|B~Z7meLpd z**@c85pFm>Kwk-)$)56bmba1%jc+fKpH}B-opUaFC}!Zb@)oK;@hRDxQ3M(56G5(O z1({$vLhVoq5q*+jmoZ&e&b`@fp|`dB))y-xd-WyYo7340pBX3IjTwG4~Z82l$$Jd&3B~ zXT%5Aj^}|Al?NG3Tl(}?2yRTN#5&n=c>Hz&8QB*DuP>B>#_@0DSa1VfbRh{HiV7Lm z?Wd})hloLFCJ1KbK;H6GBx1o;8t^m^r<|C=UiWc->~U0{0H61LAcvw&pOieOo5 zIcj!9V7GM`ZOoeh3FoW8sBRX}=_(|?NCjmS`8cx01DVuX=o+dAXG&S#XFCU$a&p{d z7Yk&1c>Hgk6_)pPIqu)!02g;Mo*Znz@=kXN>vy`LVG$pjK1^YKhBBn6)gLYgRl~6O zB;ddGV!F_9G|jBQ^ciDu{gE1C^*#=&7L|Z@=R30N`)2y`S_;a4&Br<6y;P*Lo%E|` z!}HZya9isH(d)iL?`Id_8jBR%AFz>@TYV(TlS-kD#6hVDpA4pn;^~tWOurq5nMJO& z&r}x7ma$yq2Yw*tGLFPZs$tZEj82e4r6!z zGtIFq*^sMhCaimzWs(i(KumhK@IBuXt6uZ)N+^2|9?~LvLjvJ!Z53#^OGEqKX!@oj z5<8;G@j$>R{7|%*EYf5d6E?*#>cC5KYuRpk>s%WCmz0OC(mixo=M33+I|q`^WBP#Zy5Lap^;pg3}X^z29;^a{V|Gu(jhO}(b>Lh_<4l)nYxnT6zW=Uh7D8PxC zJoq-%2Wb5;w||Wm=8hBKhTfUj`8!7_rEUth1_TgaX9MxMqW1D_HYmc_-IEU{xc*uJ z_iy)1P#D8}4Jlf1tmvK4(bETWe0bP=KmoU}vLXtvf?@e~mXBD@`tHJVXox}##@du& zknjgJHdsxr0pmM2SYO1_M`WdPD>ZD+z}GYf*B(Ai&GX zMNUjs7e6v~_v<#n&K0`CjvRAX!`Piw*mMZLTi;1<*yGM40$jDq5ckV%;(j%_f;eM$ zQ}3(6cWVi1FXGSizSTHyWD@>LawBsx!+`9sfZna5@cJE}D#XU2;iys!x$uEDUf)3a zeN&(yq5vL7-63XcPEf4KLOtKA&+DG9dgc$)(1^EQ*k2Jh+R^(82MBM$dk z&!vwt$HLdL3V1M$?KA#(lDF3ekx2Ka-q zJIPB9_`t+@1cAS7!y5ZzRe+0bTRt_J8B2a6$kd9hC8|{u3qbPkvYqOik9FH`3+LH&y zN>|D6QC+mqCI^ch**ok;8=d&CpFF86gfB0WV8Nrcgg^B=HM?7i&+B9G-`zr*fqTY>nl;X_#C{HU;<+(aF?J`>Vv9?U zHgzODn*pVa-5u{X1&`0!pKT*#=Z6*a<*`KE6IO&0+n-Pv*-zF@%K!xvmLqif9Py94 zM5jE<#qV+H=;Yl@V|1UB`VTBK^Kt^bF;T&>+SENAS_X0A2fJrLk{N6a49 zGj=zeX9Q6XmJ56Dy5Nd7*1M*!fl^z?lDM&cFy~@5guRmm{xd=?E{5Xzag{h~lNfpz zRg!ayVxc6k6y_iLO!PMXM>k$f#ud*Ca9r_Ss#1NFjEv0!(~4}k7bhg;`)<&U75R84 zHx)aNZlzIn!(^v>3GCS#55Ed(iEhpqEL5sszV~nps0yGN4`f((NHw(04*+4B9Lc<) zihkaV-PyY%UDqbOzFi+qF?M%$r6c>>Ja(Kp#}bFw8GqZ;5(j&KIL1ujz>cxIQgMA) zG3tm=q0=3GE%_+FMirITD3X_l0zh268uBe=;7ju?dV`C=RiYKx+c_E&{>~@%=i{MX zsst=she(LScKYK&Do%>e$Gy|9)6{oDGVek*_#MuK`8suQht7@2V$CuqBM98)z4gAX3T~de( z?k`#@d@{}$&Mr5UO}rn7rJJ zX<~S|_=^(CR2z`9vw|Spf@P0nNx{C^iBx%e6fS?qd`eA!Xxxcqr1L`p{Aw$L@9PH$ z|H@u^{YW}`ozKN}&o9uD!1H8$SuU)*pAM%p_LCObXLM6;5qgBP4v^I4v`_yxIeNSd z*2_i1y`w2i7b=Ap#H!HxN)V3kH>Si@3G7z!;ID!=tbF!}lX*QAO-u#2H_i#?)&&V| z6U<;HV|Vj|X29@1(azduwrJBUz?^-?7`CmB+dbI@e&6TArU{yGR^_*FVURCw`^Nf+ zJmqn2m_0f3Fa!i1tlMmzIM|C8)7xKSaL<`i+}{3`4yHB|HHl>CcV)WOqJHuvknKB{ zWn!dC4p!G9UEg$-6v^d-_4`!Ny1#>@8NZ=Vdy3K2d^Y~qQb!k#8V!{<%VG1O2q+pI zLUacwpvRLcoNGA?=YO3{RbQ$=r9SI+cJqL{*V?(P1U(!QFwP&z;U#%V;i`QWFoVsy zx2IUa{@cBsb$=XCma)5c9*o^x+{=9!G$+*q!h#^^w?0tYlKb?`%HIw(lk47dvQpST-g{XW3Z zWq}{=7&DG8u2P5aJ$xukafPV|8o1ochG;|uXyj&(-=@v$+&4HKwlj8j?(Q_OOtcgh zNdRUscK7a%4$gUYl?%V?2|w_8P=;AtQXxm!_x(0#pa)CPe{c|#Ue&*r$e;#Nea#8q8pbHEj z!?O|sM^{bZ0xQf>iSe!H@2$|hZ?mK9bO(5Hn4J}RhM>4&n^5tyD;}E1$0b+Pa7Vrr zSrg_DH8s_sJ%19kZ+D{-C&SRazJl#(MA2*693qku2Znb`K;qJSa{U?e-PNa{YH$Hs zhu)?wTTc?DLslfe;PsXiv?$m_ue-h{&Sy$s?BY0()2Jb1UyjB4 zstW9Q%6`U0ce>hR66lt&S$D2KNc=aRY+j{?8>;x&H`NU{Jl`SAerEvotxS88<^a*= zdmMMSTVWS_wu*!1*zr<{d+b8sM1cTAPwGNL%Q<1tbq{R)&BGJcD(F62jg0)B1<@y1 zX69LG5J?WFss@o*Bq+zunIb42u!!tmKN~JCE{1sFAkpUSq)PMC(DOX&B9ys8|C2gR z@^W%u#^_A2I^0ISkA6sdj})SPQxeMctf2z(oml)Wg`#ruoh8hZkr;v+ zKc>?!zvUsqm+Y}Fm&gRlwXTl`L?sN&u5$bp?tlQ*`{s}yszfuuBU8a$ZtC*U#pLHVqlz>t4 z8T8crXl#uu!|+|dXxG?PB$y_`MZ+Rc-}0DDlRQ8lXJ(+SUM|!2oTZ;ryGgr39;8i8 zXP;X$5z~J`Wv3M5?1c$9c5*$9)cHsJb<1JnP$XE2#FMh+<8fDX70$XDh((jNX~HgL zaJa&QRxvNQVSR(!zexvwFm^Y?0r0-GjWA*RG}zAA-HIvJFukRy^X4R$yUW;J!dN2= zid@6xS-ZmLgDflVm^w`C7o!zXe%RQ?^tw-F(Ls48aZ_SD*Vh$LI8_XM+bZd$8?h*F zREnxcKhZM>){~3ZlELF>0SxE%5w}Ih=rEUs(nZ;58r(@I9=b{T_U6NnX{iw3xRo5X zeM>v?N-!@fp7kuvqYo>`K&@>BD9sHAQx89q6Fd=lrc4`R5rBtGC(#uyQ{Z$IV|V86 zFiEY2TPS0IAO91etGOdqn?3A|9<&6ehliA>7O**8RH&`bF&56)-FiKIQFVelsptWz z=FFR|r3$TAl<6n2S-9bC6;9KY!M2A%q-${mh&?ZdPfnwOsMS&N@w4&d(_&0(eM4h? zcaVXrj5(U-L!s$4Hl9+d@;94h)nuadqr=o~_&(WXQV6of$Yl2N`uu|9NsN>H>A!my4f!(@_w$j}FNXuuNU1otI05v7XCG;M70#S$`S+ycmUfwMo?5 zL<$~Ft%8KAATZcrNM@Z-!mKqstPpx(T*iH&c!V}AvlW0^h!bpHgV03YvPZPq_vswY9JeYDk%A~ z?;7=S%SZoLsc3m=JB>6RBK=>Bq3%sQ;IcZxs~(MJU&`@jN(A1$8c5T>On?VJt6<%u z01)j|BD*3~G1-u191XeS){tXDys8JM8M{M&4y+VKI88ZAJi}()yGJci{q}7~-FQck zV(c!kUmupa9u&$-x#RO7ruQ8`1-F7M>5mLxT2_{etTGWi!u)C4?r>~2sK8k*W3b@q zJTjv?9>ilxK#~rVz4}|}4F6QLTb++XRyXO*-cC{-lMVk(&4O>@$4K^YACk>pCj=3p$uuB{?bv0_;BqXPXuhGO3p+ZiVXB269K9$?<`1jm zFUIb^k8?$5i&a7eTO%l8?9S4jb$}!!I_CLV<95dGMDwTN=>#i|U60_NmjL#r>p)^m zuh8k4C+eN$;f-8nY%e>CYEx)p?C!9l4QOoUcTBIa#UBp@s4>L^S9Mo$50jk1l(D;;uUO}L(syBCvk#8f z;^Drt@>o4?2GL#=0^K#0EDw7ecpWRCPf}y>`%o!X?EFrP1J;teXOiIJ=0Z@bdq5t! zwoym*Ok5D0gY%bk(P_)Bklk&0urMtRjDIzecH38U#+G7yd~7xzxwMex@kBs!MLFb+ ziG&|s5hU-gG`=}og@13&!XE!A^t6}?1c)-7*-j7GRdy zyFL%%FCGp@7goT-y=?c;R71`7$6^1$61=$oJ?;CliG*EFfmc%tz$@SusiY^V?wc%} z(3pj78Yk)Zo3}{`6o6bV(<{esB+f1$Xx2~(PMI5rA>-!IQ3;~ZvY`S(SBJr(MeanB zPQnNE)%c&OKNiJFQkyMmpt6Kzy`6UjpY$zUO_U*yZDTpDV;yj#MMI~&)^tc?>~3&} zIXrl-DP+~yD96~Hv5zi3H#yH)8+pRqH!NG^^kne*PlL`73&fF4Rrt_X8s9fW61(C^ z*icmtapy)sT=HVN_3>vjygFG3Hi=1)cYHOewf{k1XqVv}hZu~h$fXbMB*1fZC5)8~0h5!{$(I5JTz? z9@{<>pG{!*v#VO@YyXBz|LP6@`SRe`6h+upX+bw`2*#2snji&6O=>{*! zGfn0Ta7}-`|HBYe@2nGEUg?Ss z+xTearjErb;v}fm58~EWgI$&^bojYY3C%Dx?XJKRyG3#3<7%>SavZekmO}FJkHoxd z1GSl$f^G8)@J(PJwQe|0R+q9~X9w1m;n6{+U%N@q_vhn{4=H$S{ubJ0^NzR|m%w|u zc<^(ZOG3VlL667^EU5^`Zdo7d+&z))T&rQ}1Aj<*$NKJ$PQjOq-A%adhGyIL2pd%m z;09xNi>^9Arg@Je=Wm4-?2PZMut1Hp37phI4wh93VDd6O81=nVs9xZKdK!HEcR&S) z$4@4mQM2H2Zxx(qnE=LdUyX}W; z!S2*6hl%rM;4;SU7Mq#jsJmgDSG^N7>a#u}MQsp{dn(Lx^2U~hJWL<2gleCSNZywq zF!{-{gP%*n+%u_E?<>=1>|$NG2S@0Fq2**%St4}LDFR-@QzAaRp9XwS$9u84_*8hF zeoeYaraxx;wZ?Q1_1Q~STMW>t7mCp2dIFYMEu)oF{*q^p%RrXT`tE`f$b_9zxQj5~ zU11Q)Wa(2&6J;2)hX+TKy}(ZQR1x-S;nmmGw?S>pNzk^;G~v3#{Q|vuP(Z z+o39Bck#E4uy@09PQJzk3>drHu~GwO)Q+YL{`oR5X*C{-mBWS}M>2yK3Sp9!aA2Z1 zgeR5LVz*dKE-giR>NCwN6(e4hw!Z<4w-%E+^B8|f7z_N!`~9bdj~rgo^r&gjNJ)x z^-=TGVa`0&9Xy@+FmcorwyTn-CIXSa=ftWSlmrTJiGdV?f97Sf%ov$6JdCd$1zO4B~xC1xW9u&6m1w3PoN z<9$9;x8zcslopGxQ!D80J~8+*QUNO4Lt)iTj{MYfY{g$PcxWlz+M0lM{Y6;RJV0+; zZYJNW(!t?-E=+iQk<>e%r=N_G)+HYCF9tQF*z{RnkjTZR{v zqS4}F8eO(u5IKjZ!XpZ;C z6lLEF@N?S?^!2;o;8kl2DvaG_w;98`#JR%!F4lK9$j6RyOW%DImWO#=y06rQqKDjoeIZpgKKC*p^?23UmAEaL^$V7Lf@H z{c@mZ&M9(r?NuuKJP)rLG9Pb$6LqwEP3B!HhOUCyFy+VsV!uQL!w;3?@UaMdsu)HM zN+-az3#`+$W)|=jR2b4wK`|vhj@#gY42ufgqxHb8OaOn5aBz2xEGO`?KofQje&bo8 z>#fs{94J$j6j7Q?TrX3{gAk52bXZMfY5ME zNw2^i)5hZSGc`o?HY#3A+9udx-3*IuLg~~ ztnW^H99c6_9X;3caa*S=UK(02OgL}Iddk^3x7{A(E!Q}DE|`u?509%I%vlGfA=h<) zfJPj9H$T(?*WKO1d-FW8;uX8&1WZPin_6T>WFYv=tAd3f4eKvOQ`u9Ics-&V`#eXX z{=CJ+S0@4fOfQD#TV9egn|4#POKEs{b{?Ak>t;FQXUL+vIbhV40h<~Ql1~zk>HNV$ z9M(v}8|zk6hZ#SKuU8p-`4|n#UfCqZLjpIoR-$89FzU~>q-S3$z!PU4=&Sp{XTM?Y z%6ctSmk?m#)tNYtpCf#zZ3;5a1@Lf=4XE$zv0HnL>EfRXaQu)7-ta5rj9O+w(rEU% z#%Y1z&^w`opAUL^^U!IJ0(P}ok=o&4_`H>6W)@1YJcS$@JDK(ST9@J6gFmRh&1zEQ z!n%;I6@sSV5lPo?rAhl)cVtHnes4NW57ze(`RY8l@F)$W$L}U{3mo0S zbpT%{m?o<$EGaUFeIv{lK5IId++NzL+h>n1jNR?3H^d`Bn>fjhu3))}X~BBcz&lie z4v8|2SR?ZQf1QMX=DLxaxnUsKTLB8|MWIewK>y8-V;Maqm~ip~-T8O}iJO%IS3?V6 zuJ0W(y!`~dSDl4QW3y4)p`F&;?j;%H3!wXT3MdtCCbcu(Q?X4YXmBMC6ZGcN?FD05 z4^RcHF%Ad)N1mjxZz2}Pu~~P4Kk6QtKt1%;V1G6r>MPtpLAQyMUub}{n+3S^|VGO5G03!hhGR zP+`UdOmqz=Lyse1?ACJ7IxYgIJQvcQhS@lHs2D#Ud_}9On#gnSG*G*g2ZzU9C0k^= zXr_G*y4q!8-03zND*b?@FDiuSXGxHGZ7m5i|4!ZSm*SI!G3ec1NLBsD!H*)QT|5v1 zmt|~-^)h)BRpQ~z8$Ku~{V6dW}PBDF*2o3-GXuKw@U2DlEj)X z>0~oRLqkm&ELc54uKrs=SG6aiRY(!uUj2k>NVbrEOV)R1m$fB<01<+S|fdYaO`l%CZX90!&_R!+F3oT*ug5Q?)fN*%RpKI^G`K8M_nDG=dD( z6~Z?UUGQuxA3teoAZz6#DiVI+bfFp^4a!2j2++lsL-CbZC3-f9;fIx#q+@9;_y?52 zmZs0dZr^{@;BqqldRlk*OOMJ!7cq11}+!_#a7h<`j6eQzg zLC>~~=md%5Efsd>4+_Qh)f_#tQ4a3CW}dgek@3mk=UfODDgEU1?88)V6+0)CaBPfV ziG^Z5TpmgV{hl49*zOHAzgmp3CbKbea~%y69}Qu*$|3zg1k?=#lf!Q(VC&N=d~G@l z-R?}LDZ?s|ZNP_9P9E@lMLWw#(!-27Y~BmuFuOrQ=-kG<7;M&^rep;rH*a+gjCRBw zn*{iCrU9N6-^YCtbqC)>mLpU?1up7Lr0*mG@TYAxHZPutzny%^m;aG;mSItLT^qI$ zQ91-fq(QnR?!BZ-TDrSChY1uEyIV}`M8uZaOAHK9umcqvJ5U}KRKEQl-(P+)%*@T+ z>%7)^#YI4y7Z2drNJxHELr*xcd2>w}a*dy8z{G82zkE7uZY_iXle|c%p|n1D8-#yUek+*_L8Y%GGUV`b5=jUMs{!)>8=U+7*v;q|K=U0XY_l> zUBwcJdXfyk=d2_0VP9U;V&HeY;w zz@VaC+&T+U8EaI$5crq)Vu;;S_^;$~uLZ53YlH5M&*6_9yav4Aoau)X;wk?>nh z*Ue2qxogF^jsJumwQVBOy;+b`$1+dz&XbPS9klCk0dCr!foBJI(XpPrq`$lr=$S-# z+qr=Jl3-`uf=Zn27lmuGBB|z*37~V359@S-;b7)uGGmw;djAlj#()>PoWCp{QKtu9 zd2FY!kU*T`ByL=_1y222jUMl$mola3sS(l|pFci*AsoQp7Ie;RWXZl+g5z7XD}GS+_?4_9PnlS>~* zVX%;g1y91!eV->)43LA;ase2K0^l@nJbAEQ11kg~?9lMQb-Q+mV}BUJ7RK%lB{{)D z>pjli=dAGvdu^@zEKo6RB4_ABz>2ZEp`&_G^YN0{^{y8-53*U0xf<3~Ymi05gW>%N zKDg5f&|4G1=KoQ+M9AhDj>B%iY#R> zq!cpVI+O)TJDbQfxhFLIWHD~q#&T+BSJUtrKZ*OW3fKyS6mNZquML7%ZZ=_AaUIWUG=Cir9M5dmI$B#)ljvd$ zxu=ib5pN_jZ>Pe+qeZYb@jj7Ue}ay4$zikpT&!|$p%v$Dkp+he+21}L;^%K8$2>pL zfub^88J>W}GivFs*&~?`lV#w95zNWvPrgUV;zKj$ax)3UOldiK%V!eoi4j4tu_q`z zJ;;fa3^8dVbBaxM#^aWcTSk1c0+xq|6Wx~Z=gnwwxe14||6_Nn^|AZRDej0m>vlF5 zL6o{WeET?&E|CgCn~!{Km@I=^|AdfVOC!PbMJ4DtjsTHK9epU1h&er_7}M~cM#k(Q z0k_g&*Hku-ncPVd#v)Z2p2t4#+33>QOqUHjBDUtmATUT}y=?2qG>?9Iv#A`{D#xLc zWEnk~AOYzPRq$p+7;{oP5f^zy)<4X0FI6+J+w`9}>5(Qh+!4VycQ=SRIiG9e8RK!r z?k>nWU~rmp%K{zNLCe^k$x1V*i3$)`TTRDVR_weuNeAcNc+9>2=mSahLa?f0`JCHR zsl%QSe5%055fM@-)s#y9`x*^PuT+4`raxr*o2B%heKPvgmf+EaFKFwB1LPvjgs5ft zaHXS-q-0&DT8Hy-Uson>^gBS6bY7Cu%_U$cn+y-8EhEwvf9c~V75MaGG;U5yp&zZJ zAVZfAFUmvUPpuI-%q*O<)(COaDc1En^^sU3UI$DZm`^QWI)qF4aQ~Io;eYEi(K&M%={+<9e)I|8Rfhs}9B`)pEW zpbG8gNT5b&IZ1Je1L^v5=x_W%7Oh@K|M;ikW`$yWocM^w=N}_W*0b*8XSuK~o09Z1 zopiK!A-;N*j&^6*+0o$xG5A#qO>Yv|d9053){ek2|5f6KB<3}145mK^q``HN4}%W_ z;ru2Q@+n>&!>5YS;f*KmQ#v6Q-_ZxB1#Bkm!@-?(!?^1ct?&vv>zW_2!Z$bXIajAT z!}>MV5PjDG7S=Y36=gl~Pp}AiV4+LjB0i0Ek1?RE=y6->)KGf&og59j4 z@oO!Sos|G>QDq=;XUWB zt^T%HztsmzastZd+8I`Ow=04N56+x=`62Hq)s6pO#HIIkavi*89k#9TuSiU-DLbxxPrEr z3=&as1x$#K0q;jyM8$C&b7obcNk%9ZE11#nOxAIpD`YuiU#JQ1<&ejmnT*}tlygCa zx)|}w4W@AKLp7Mn*~5I-+Ovm6)9?gicOJ?nc)?r14bO6g;@L~##X!t2sKkSE3 zG=<1fcGfkpBa4>I1fyA1%)c=fhTScqud`!WzJu*#toun<#;+kIms7xPQ!zZMcubl? zn`pjPHm(TD!g(Mk|{uiv5ejQ z=S9{_MZmW@)`hfl6g%(Eriw@6vEfh|c5V4Wdq!>{H#^h7Wl9meoqmVdTt7|iKjq+m zt8%bk_B0KBewV1a7QxEwG#C@KiP#5!rJFyMp^YdW$M;oJnlT#kH?s`fvT)Y@>`4|} zltY810(@^6fLe@D7wyu3Z;ahtZ*>O`qpe(iq7mE8t;Vt;N4#RQrlm>W20|FSGv8tX zRw-KI*nI>SW-woNYh^?;^DYbys)yDR&Ux(&RxoY-sJ`G`ByvXv_DVfcjsaG#Vj;idxYLq zd`fa!ieXKB3i$6`MIyX^(SE%OthA5CEz$XO!E{OJU0nt1Cd>rcjW#5{QVE+}gxFK) zhfedpij6I`!1S*O&fIo^iGRwtd1!)tJ!}@!V~?|@jyZEY!XC~ucIO{x3YuM6;^%u@ z&{nz{7hTapRo;8fVZawk{e-YeSsBi6u%t5ELUCbB73%UNu_Psj(AzOEKazDoZvIVf z8myv+N2Q?VKrw2U_Rv;?Mxwkg3ub=KgZ0}kkh?3d(Z_y_5534hZo*z#XZMD<=`f$# zbk=wGgE_vGh9S?l66JfMQ0_=94U(P!9+fPo#t#Pdv)aUViYk71CdAo$z45J}L;SN* z7rsZa-LrcH-mfs_F3MWqe~jI!G}&PJt;NnGs~lnf8OD?Qj6hFmgZS}Ucg)@*Nw*MhGX#+9u96BjaL5zq)a0o=IE3|`Mz(2bk$Q-V7<+? z%rW71pZ>S=Bss*(frHa>VfnPPr2fe*TKcpQBR{2KSNT>-JwB7Ubi+tF* zIuj;4?#6@p_j5M0=0!uVbrLyrKnkZ$XM1;fA^6PCfG)9` z2#WiJa3{eB!VWy-dOCD)ntL^hzW|y^PZtk#n}H`|cab%=uzQDjOXFb&Z0}$@P@Plp z#iM1MS)CgsP!W_Z(uCzZMo`yL{@C!Cbvs8epZR-dvT}A9>>OJKKP4q#cughE^NB;3 z(sF!o^gEriXFd6|lzDWdi@`PaAvw=KPNi;TPw4{t`!Wiv+hrCE9mHa+tPE%84s_n#<`gWs5;clndf*y ztcwUbeycfp zY-Q~3!A%F8m*w2zA8!k0jNN$;H;2VfoW<|11IjUWH|2*8K2?0cxli|D`9dLVjbS;( z8hyGyC9;LU(6R2uuzHBk|`Ih}_+hNIB* z_H6R&zjzoiw+u|TeI`vxTj}F_Y3#dPh$kFx(}IU*Sf6+<99xnD;iFEG`)}@1_2MF2 zFf|QVS#F}yf!|1}QaL=di-&0!g(RhHG&+fS7&Sc{<89q(m!mvfST2Cv+w816YAk72 zo{Z}C?C*Zk9nT%yDE8|x0>4wNhj6naOt)R-{9>^U?igU7Nm~olNSew?93!wit{UX; z>w@W;US$7_M6KBE2d5y$)Vh^J!qme22zG8O}zCDQY7CR`n*kc`IcMh*jP@^=T?u=-tjYJUp%Af^IWu!4;qIzDnywLxuGSUUwEY(?@?1rfy;7i|qZnQl zJ|$pxgl=xk!ttl`Flz02YF^(?dPFP>)y+83-`!-ReJ?EtD8-J!MC`x2kjAYX1}S-! zEXx%IwZdq!M|}b+t>WW!!(dFPnL=l;QiWeHgzz)K8yZGj;nXJUp=4?`7PS+UU#%-P z=UKpc#_k4vZCL-x(w4=K9q~nTHJ+_A!m}$jbE@0jA$Em`Wpy=RQ>i4a9~XdvwF1^b zDTl)wJcv$7IGo$dgUD5*LC2(;zDteAtAk~jf8Z-^{#;L(mt!3zbH6ACb)X=obQ81g&gBb=9EFgtEK52Qw><0CxW@99w7f_2Pd)A5U=cGZhPv4KDMV?>dsih5q6H( z8*d3(uVuueK67}si1j0@>0{@SR!(J>7u1gz!4q3GSem9z9~=$Bm7Q###atSFQXN9b6nFROy#`)9(@uXbeTYDLzQ zE@VCDet6FQxA?=EDRAPY2%HwW!imaiF8`}Bj_hXV!G>wL^~R>N#h>gziLtv$XbLC8 zBE{3cPDds6YMi!R8>7y><`e^cp|V5>2UaM9gs&-$@D0VXAIz(gHV(ZNvPiRA46LZG zfD3a6$$!!-C`XeKgG+Gx+-G!X;vu48mjyohY@V^}5{ctqrPq4%F)T6@b4>QpJua_E zcz+4JznBE`WtI>L-648tbR}Lgi^e(qiPTM(_1(qrVV8OcEK|@U888teTiA~67$3Z8 zc~_k4qzic-%-g>l;F_#GC+T61rx?4Fudqd~A=J4^&Jp_BszEDtD)6G0i-*5-!(WXe zY*k|{>FFqPcY;3*YZI`HxjbxSl?*Ss!q9ef6)syghP^*k;lXzh36e`W)1 z?o7p$o+3<%ctH1VI8KOE4rrF-LY0e{1YEgEC8`VYdqz5*Ubl^=g?%K(eym4neF7AP z&LPLEMxu&3^AcZ;nJM!Y3j)E+NP+x#qK=v~L|9``_#N8tqU3`40kE)iDG9Sq&#c1)4bPvIvcR-Ed+5VsV3_F{CqgcO>5d+OsD* z-?(6l^BB9^+;4_gA9-+wrqe--vAe>#Iv}(9fmnNk4+=K1EOM_3W}Pu4m!m=$zu`lB zoD_s?Nv1*jqjAgo3hbW$m!7`2luZ4V1los6V7KTc@%^x$rk%~iwde9tbonw}pWjB> z1^F=MP9_Zb9VC+GFX-%&5_}byjPc&fXq4L@a`Y_gc9x0(Qkh0{Or-F+Bp>zHdWJ~d=^7;lg-6AK~vZ915> z>>xE>@9C_rQe149h-%B~sGjr)u)J3Z*83x2!^KeI@SI`c8mCqNKR*geCa0v+hDUYapF>Tc(e7MjOWNMmZbe1L6;*cBx10Fo!9U2My-qq06Tmn|jV(f1F zCmL+9jc6&S!zWq@t2J(ug4bthc5E)1gyo>{=L!0F939cuZg$JVJ{!nq?9S)9Ioyu77XMNJ^knSr-ZfqHN$liwUU%t*YyJrzC%w?Rwv=rwoXTH9cy=1mz zCbUm2fWX_=i1Un#bct*}D%577Ht#U~Zqq{oRZ8Hrts(nXM^?Fm`uG z*&ZvN{&94kG7TgdyZcmS0u7h>;#gT%eEfsu1ZHTVN!EAHW}hF(*$Cmii4xqAv7xJ7 zX5#JH>>1xT7C&AnAj|Y(nFpr=I(GdehS963-GUTcez_PE^PbST?oGt~eHQR)^WakD zdE&F9gRX5VK(d8(JAd3owP*Gce_ko{pGbtpa|=kj^l*ftN(}Xi!Xv(s^!Ta?Y^RJ5 zV>N@J$Y(OyG)4`*`h{rL?}ZVym&KBc_233$cOf|hMz2-pF3hn&j`6MCudMM-*EZ*e z#0e%bc6V0I2&PCkh|T`GvkndssWkX4qzWc2yeQ^)JH8X?H23IK=f-4otk71KXs}kdN=~P`3S# zcXp?-dG}`8n)rn@UM>Ucns^Wln@#Gzk3!{Y9_l;{N7sd(bnQ$zxKJSgJ}&^WqsJ5Z zt<2*oWdDCAdf=+`9b&%$Lm1w~{28%MaN2IS^V>_-xRt#&$(>=N<7@H~kuVeY5FELggxiSRU^(1)js@!zHtymN3h zHHrR7B1crfdZ$=;o>NHHULT9^E35Ei6zlyyZNs`~m7vj72yqwuVEVZq+{>w2_?@x4 zxhAfd&J&4;#jw6R#_pWSG+4gP$Z7j8JFIwDjV@uPXcCgdT?}vmRAqTyOKrF}?Y-D+ zk1vi-5TbaFGOn0yPHO5yq5D4Tz4;&sOM|oM^CdBCwpW3d7Y@+zIxES(OUZCww*(H? z^pJN3hiPn47UoQ09Uz-7(!ttmBvHKp#Oj&AN$(@RZm;Q`DW&*&RuXO=SVYgk5V0^~ zoc(1KR9}cE2RDt!UGaQ$xf+b)y0xiX^F)}@DTLlX-Z1V|7uU8=7pDX z?#N?MxC{BB84en6crd6q2G)MAqK6*Fp;(Xk;WzZr*Bu*4&%IQTJ5mG?a-VeUK0yuL zbMRkoE?%*1p-*qzB92E`hlWu)#PYV0<1;@|hte`U9+H5@TrJI=KN6-m^I#b-0*ItP zsY;YZTMO2OWDtlNUu3Cb&?HzCD}o?>Pf*@^kSkF)#9NHrEzxtvPd42x`}(Ya<>5i% zo+U)5jS*k9}jFLK7jaQ|HvO(DKo{x$XWpIglC^1_V z38Bv_A;NkDpi><^sE~*nPf9Uh^?MqUy@Q;-n+|Uc3SpsaCy`V^syHeS*YD28vUAP! z!Pstc$+8%FbyA`J{yL%(+D|_wvlfYm+7A)`IvY;6OTI|pkVouJl|RZ zCF7IffYCB?jr&XQ^;F=6bJ5u0nL?L3NWm99J|q{jTvDnLk-DUUYt{;pHv8c24-dsw zsXB1gp&AbROos_eygB)MW_YlV?Sj6r#pPQjIKK&U01d|OT5XJ3UTl%r$;=I#+eLU~ zg(j|_H;n8)Is?Stn1`WF0iIlQrtx-RctWoVXA32;qo|z3dc?t(_2r<@@Pq8!v5qba zPDLp>=9~$6M2&>U$jSO_c>g39e#B7n;7TW@zJI#pZu)Egg7?OSRPF04wr%5fA|o8HxR0YD#SKT9lec3IQ=>6t-E(z-10~tjx%M*WqvK3xuXWe=Gt+4UNedh;R&Y;KGU2>-Z)SYh>D=B%RQm_bZhfP9Dkv!2% z354f10~9lrdikz!vR4H29{;a%{ipcVQ9o>)EX2-}ig?u1j`XdV3G+o&5LG!APJS<< z@AG5vT5maaF8E20Uc~nC)8O~?O~fSO zEA9NkyvSAY%o9{ih54hwd@IX-EDVP^ex77(yBroR6QGlB0Cs7PqX!OXz}3Yfz!rBP z@3(L<=|*_1sTySl9I@YibxWX$4LCD)SH9i?=0DdGHykGHcdte@(8II)+c+5;Z>WCD zSj7odID21{hRFn@Tszp{d0P0(ShAW z?P>;0URnSJ@7qa%=Xp9}UmkL;Sr|F*2$k1)NCS3&9E0}V#lr2z>%|1m3Y|yWB ziF01HBlt6RSO3unEM{&Hf2?=M^;<=Fe%fT593?@{p7w|9>jdB$FArB2xzVGB;dr8r z$9CpNz$v3LyM3!ODYn9-ny`F^V8LrM@x+~mWCZPG9-FpM6nkHjKci9RQX0{NiS=aB%2c=~Rm^r>9+CwM zk5kLL*{HuK7d;52?>f7PtDq3Hccg>-#_i-*3S(lrW%y=O0{TVIrB6qXgf)Ye;3doQ z8F7K6yHW;UDG6|FM<8BvQ=$v+t3!~p2rBA4!7t)Cch}1RtyZ%8)k|kgv;ExipV9xb zJ3luoSl;oj#kSoUofx}Ym~4RZSr+-bI%u6zq**5eQDTyS%^PI!?yUe4 z`8NVCkK}=w?nuy6o=Zu z<2W5F{g8wYD~7y=RQUOOJ?T#WPM2nuqjo|Zt~cUQ_t#?}W{3x=8^WMuHAi}_74Z38 z0cuS4$M*pv>47XwP(H&P6Af;#!g>W4&^VR(QdrMZhXdX!aBO*&Vhd7?-OU*^g9kC3 zSnoC<&#M}xKkHyi=mSpQ!v|g+7J_HkM9A5$PmRMuFyEVxPdlYBdQl?DPl<+xj}>52 z_K$eoTtbykCE>|?C9D_pCEf9UA6ZkA3Cq6a!$9j5awMURTKeT9Z+sS7&p1R|9G{b6 zMJ155EE(4NFDLQggVbe11=h>N;O2!H^#18_p!lT<;^jhNQ;RXteW{FVX0x+yf-er= z`a(SNi#FuyRKvGJ(;;bWG^ZqKhIx$LJ=@K`S2mqa+_Gt~gt5EkUB)o!d5yTK-4!{; z?y_^Ipj6}^Za{Jd{2j}(m-`hVTsVzRcrgvOH>QIjmgr9~nD) zEsegFf}3-Su`aEfE-5@pj>c!hMz1`Wm2!@pIeLSxeOG{^bTaUlcqdH=dq;F`l!8Ha zB5YqXk94mej>;!k7Wr@_$~+9CKDE-Y{~8}MD}z`Mhbr-`Q^W5Qnd@zZ7Y<)a#XcGO z;9FS@iTgRwUN6VJo@t3J506d7*7*4PMdywKPOyNnyGgEw&@Q=0y!n_1E-4h@+b68s zSyz@6&;Ym=Ab^w~vQX;lOCQCt=Wi+x)wD-p#f#bG(Lg+0swo4@m7mE;-K{j@Q5wp9 zD#Rd@+f@JU88X5!7d9=-fpz^SiFyA$dbYF(M;fHzTR(BE+Nqt zjK*J-b$~dBW2CY>o#7%66IKe~^bLO)_gaFC(3p&$>qR){s5_p_+bFiZWd!>fyIa4` z5pt)kbQY|%!3FI9*CY!IEb5uc%{Wb91Y>t&Zt8-fUc30<5^pShBg8IORh%oUO#+jH zA!-&MqOB%Cq(m%Dx)Oy0Gb>q#)-YVNeIdDNoCL(86b{tBA=}UHp|3hK&@!d~4PRWN z=YlSfq^Egs>P!}VsB9!@`aQJfV=-=@n1YL|R?+*xze%!x1w4Hh18bymN%Kre>}syU z=u@HC>t;n)e^&xi4|dj7^n;kL51h;%EfkEa#>p35P_8CdJlot9<}!A7zsw%a=BPQW zZM8>l#_rO3Opy4NaI;Rjz|b(}FAQOQycS=?JH!3(kCzbh>Xq>C7i*H#9|~tSGiPS1 zB&xK(Z;d@pRM{uSBI@4r+rgk%C%zHoBWh0dmN$nk7l99i99?rkL}&5AWqEZ`wycL^RgaPP*lmcHkXIE%5n z&RIryp=&c|wA&r(mb2&RWexbUN|Mfy2|(*L0+f6!hof6PNJ&LF4BNwljf+PE`c~81 znen*ccNy|Gf2FN|>q%-v8q5zWg6rn@NXEHSRH`-yfBt2T35_$F~p@!9?z*ESBQ92P|w9T9FUdiIbBmugL0&r28G;OukfZTi$ z2n#$wde08-_bNj)-(QU%PdOof+Q}CEbJnnvo#P9~Sb`u$Rvh%5L)#*Dhf&l=`lXeV zn(GA(QX*Jtt_B@*)M@U?Ak@FX$FGLcXkHURg5E{~Y^sD=`-X#k#(bK%B@tZ@m*SOG zZ)x_1on&8d2CTYJ02e-7C$CLfY4r3wd|{Q1r*}5d9;3&kd}%Qlbf;+t&F)_Tr%mDa?MQJ^|8&e%W1rc2ZJavvn)?vy3$evQ z@LHq{W?826dO#???qfc+_;Dy}l|?Rl#lT`=1q@XTlF5@-(6_D0=o?sqc@@uSkHI05 z=#T{|I{9F;?h<*m;40nzJ|9PgW#TN2ee`h~tB{N(jXL&?Va;7!Eb$c!~z#x7~{##dy4@0EA6-rUvqrO z*j;szEq)LBJ#t%-wwzU6Rp{&$jibpwUlsd)Wy5z6{Jpapx6lSJtpP$|rX zb}KPCb?+u^nO%spQ`7O}{B1NloVYWKH_@+!+&jrGh5dye6Sq83-38u4sBT;S` z4-5B>Kpm4hBEFdbhewsM+3pA8m)SraZ>8hgaF(-^?4o&Ul+5kQg**GQ!3NpfM*b09 zJh~WblTvYf?>Z`z%DhU;%0Vq64z3TBlVu4KXg#?KU->ZSXc$Mg>{9^0j{>-Rc?RrI z7*5Q}HPPdW2xUCnu=2!Wajlv$oMh~d%W?pPB4y`MSA8$Pn58vX0ne{>}d#-}(9vTww_z<{Y!iT95Qmn5wndTmj#@%mO7ZU$3ee-%L zc{Z2?C-yVPMBz(fF|eN=p_$ks&d0B(F4L*PHnP7uAH-dmpurs^cH9fvS60H#9LZ?q zu#Bn&{~?Xy3J8;kfx@UXa?wf(^Q8FLiXpgMXiVcHRDice2n7{td;s^wkZGz`1N7|;e|Tpd4;8!~hQ`alHge>9nc@sGGG zaRv_d36O8Ai07Ie$>;Yop~kfe;^s&|a8fC~J}eF+#N{}4ML)G%y^g$4NCiu-7#h60 zNvx=uR;|y*Ou0O)i9|Zsd4n8MFNEK((wWO`2U!;Oo_5|U#WT8zY}K}oepeg;;~!MQ z-(8V#a91el8j!}CSA0CK%lhutsnUU8Y7jJ)%|7kCU{L-HS5TslX|vgE(}%;O8^($o zS}Y-y&2?W%S;Lr)do7zMIOC;FY$jl5i09`X;Qo{Ggt>_#7@a!_;!|X)gF+y>+X-;9 zP!>OnW{~Q1HqUV9!O`6#;oGkoTI`a5oTv=Rs!w#L-8QmbH66H?LNJrLO|*ZUr6mcu zC=-%{XWyQnHuCpL-p(R8{3aEq%GQ%fF@02WQaO5kh{M7PK7Ie+XefTl0}tzP`25U; zB%hSWlznVwUFnazcS%r#rIX>pei3lt?(pyH8qWBs5jtQszOiw{5{FqWg}-bdbbxWm z%jTf-#9HjA3fLUNdNMBRqUqL7PO{e?IJBz$j7_YS?HO4 zn3}ltkgKXCFy>w|97$hE?EMF5ZEyvS-WG#HkFu!N4N17(RRyt)q0lhif|zG28 ztK^FMjNSQrYT>s%-?@K>{NRn15JdV)&}U{thdgGY)vPMa`Y;yvyeJ^+jbfo*ssjF7 z_mg;Mt)_Pur=awuVw{rjghqunk*ZHwP&X?N^s~>Cwx$jmd#nJTZOp(sPj*qK@>^>QW?8@ z8}0;e9dPt|KKDnweY|<5yl(1qV1qi{2-C_-St!hPMZc>PHQqQhH59z=Re|km zNeC;~-tLs8;o*~@Fpgyy zv^+s2@gSF_Wr(izY`1!fGwM%!)Drm13RoTSd->toxr)7(}) zFIZzLf;MS&C^1&0=M{r6^&KDQ$;lw!F_fgOiv-?Nwv%Bp0)~9+s7NIdJ0F#zYu$S~ zgSUg|JWK~;y+TkL(MgW#v0ZVAJS^OijeDD#sg_DNnQ2oD!BbM9_R2c)Ut&L%JW-BY zq~jQeDy38MB%sBa^?nY_gmO(MqNk~d7d{Knzi0-g9r-Ih{YDdX?}^})qZ`;b%;#$6 z8e_mscCHxhfG>*VTXvY)LCV)^*i>f*(&2&Pug=pEEUU3xRtN28JmEI|=K~HKgixBN z0!!qKsr8W%ydulTb3RhI?{g~A{~Zl$E>^(#S%1jJQOjrwBx9zi1f%j_&^pP3B=S-w zY+jHLdm7tFb=_rpzd0XOu4Q7E=>eJrFG)Z{33Fy8L$%s6Qs?)Vc0I4ay)Dr=V30!1 zr%OSF0o%LF3xS;;MkKRc1^2HL;VFOWt^kK9pHLLH9R*nhUeXj#F5r+_~n`iH!jk|AG?RKj;R?i_p1Q@paOh;=S-<% z7>?Dg!t3P{$Xi}c!hPbPYi&83ef}UVN7qs9uvDy+E=CXcNA&xmW8~SEZ20gf7rgx` z@x9+kEBy;m_GvnDn;K|&*ass1QwkD22{4pcM_w!+fz7`wQ9mjYC5nTo{8$-~`p1XX zF18;RuR?@5>gZr1LeIyZEa!GyeCC-xm@TY^tAqn%yCG)-T`RoB&bm)`T48zn181uO zXJ}eo4cS)>pl0|Hv5J}}ng)sR+i!KO+AL36GXkN`S^)dU$bvz005v%lf%ZCV-(>wr zbdj0EdIXqbBCHICR(&K#XKtg*BGXZ0Q6U~4dyBptY9VXWa$%Zk4(#eaPBK*<(3`i5 zaBfd3Zuz@`^&)*IbKS}zW=9+pIq=E+qA~2}JoJ7QhF%AN3NFe+<~afU(DsL>?9oJR zXfmcA6tU;D8>*D95MT9~3Kf@FZpO$Fc9q6D8^_zCCu4W#;?42sV;k;cC4eGhcfxR8 z7_E9o{OzkZj%pR+rI?BAJEu#kBtn?WfDhMP#zT8fBK4mXjakx_XqPlZ54A5Q9>BsJULQHH@#EvvO618?Fyb-Ye%A&E5FJDYQ7qbrc*X8(||C3%9tReevq=5apVhD+O zOd^Y#=q-b6+~J#tTN7Go{psuE_vr#ijmrS<>pRIn@>^=XyA*2=B;v?T^XZEf!{PXv zO6dMC61+b}5JP!sw7JT5$1Vqn877y<jJj~B zG4UkxZpyLV5&=ap-OCP!Jv#}7x;a$42$(b5L= z8M~t^E#PT_w)kx`!Lsyfly%U zFn1w(X87Sj$*+2<_TaxV|@O3pDa5II~ z9ob^Lqb~Sxd^P&DXkqhz@3}`K{lLga2yK$eFzSIN{k}I8qfawuW{xEO-j+k|cgH~4 z%nCR>`!`YXTtx?^Qc$nI7@Hz`nD4KVENskTTr&@nYcG(;jn^nIxBzE&XJFEoJ+w9C z4H;!v%J!<0An@KoQl>Kug9BI=`Cb%;701$9>Jz|`&j*>}U}zF(lO7XQ?0X@^BU`*t zZKz!=+oH?zuxy{Coq)$06V7>(1)8$i=j;C?=`5qF`o1nqrwS<2B_K*0APAhjkQV9g z?(V!748%e~Q87>y8^uQD2X`+)1rrNJEbPMW0^j`~?`MZY;hwwC+Vh!f&RrI$b9t5B z?J67A>s85i(hb3;exu;eHYe=e%JSi7XkmVdB*~(lP_>>1J;CGQT8krp7KJnQ$`GTMwQ~;j9`{+>`t#fkOu>I9<1y0f<<-m z#NoUKF3GGwksfC>ezr$YY&sPt?qE4W)E1;|&f86io{2H+jNg?q9j8B$<9;`AFs7^$ zRCoq3a7-ZhUFphpCMr<9Q4J5Ksgi{$KCt;}IY@6B2X8V1>6@kyT+>;CrH6&FFJ=k( z84<(Y{RN;?{hFvPXW4ej$*A`w8*e}Bqn*=E6JIV9PNb&6(;Y{Nq{TgYH7*ZtKa9ut zw)M0&@;mcpGj?|_0z4Y?h}}y`{1(Q}!A}9G^4gkKtW|`5F+MC(^MKOnBi#649X$H9 z0t+8F;MtBvf|%9Du#T}it7dC>@ch2*q54@U!`PjXrWyA7g>bJn+rz%em7pu82i&>G zf*)S)n6Q|Sj=z*}vyU-(AnXqnU(4X$H5quOolLWY!_jgZ^KMrEp^~a~q-kLsl$7Sf z+w{k*ueFux3a8?Xz$`Rd(?i$Qo+s@uvp{-P3V3O?5c!CwG^#Hjh0ev|EAdsd*?E-o zJuilnxnW@UJ%+qJC5zJ?+1_1(A1)j_g*t!=SnlV;_@KEg7iN$X8`Q%z=Sp;a4%on= zN*2E~gIdP!W)@q5Lc`3?y7t*<%-G$DGe#&JTF1rKIl|S>3Q(=o2FE8Nw0*268r|ig zxBCRVFqI?j%L4(#%V7Cm2?)Rv8tog2zq5-_w&4psd~O3#Tb}@q(s{5028qwcHX3v% z4fS?r;0AL_gCAZaKPqzIcvBKgSh1bl%N?ds2CNHdV>C+7TTJ(k5ry=T5|9}a0@}oz zBv;CzlQIv}dcAS6{zUdnssqPWK?#!ME@lupF}u9d!bdrb23^WoTv zILIJ%L@(hFb=+KxRkGoDGd-D>pO%50AIsqR7=Os$Voa)EE8(Gqe0&-9zwDJK0`+gY zFkZhB1bgj4dwD4LXq*{t{!od*+h^haDK~9TuC)di#_n!68N&>nMS>gG9B{$&3Uo-) z!QSN$2c2>evyQy%+ZI7V2-5C#NS76J428Oaw0F^Ud zknhQZp?C6-lj_bo5`$4Tz7zx1#jxws0@5)W1xx1_f@Rfv(mAt<-guD6@-}nujpj{y z|NDPL&Nu@WElG!g7bnRQ(E%D>oQr%tmNhh?k=mqxB3D(5;GbC({Ho@YM+?RA_~}wS zY88ZjKOE_K*YOZs&jX2zEFb=yB$=(Jgx zW*J1#rI$ld&Y$_bErjs$;pJqlc`OW>7C>#`3sQS=4<$E~F(D)yci+57Bcjg`kH?u% zaWWOgryU|&ErzJjhdlhF7>{Y`^>kPCFOun93^$&$Zs)h@Br04Q)7r}L)iHnU(Vs#8 z{F(>>E_^s8HxFu84|CI6C!w)yC4QxI@Q!GPz|YbY@EPkH&#{8*Plv3Od#zB0vAflS zCRlJjpKIx48QzTDt@D@!ce6eSa>M80Z5KX@uA7MJau(#;FMrT%WSN;!(%^qRlNL{o zKm+q)l&k(pPbSop2(Nf(>dk}nfQRI*|6!_dg5?w(%S4S*ws&{x5-F(4hX4AL!FFgj z>GOX{r}!7(oUbvs^yG3XxlIUW=9Yl)icmNWVPs(HSQJ@bj(Xa@_|Z{^8f;UAf6w@! z?d%3us(Lvyqsge9ScxxB6HIQ<6Kt=V1|J!_I|CNba;dH}^ra1IGj{j1jCsD~wsO|{ zo#1dS%jfOVggM8g>E8)n_`06Wn~xPx>X9=!TOI`VyI3dPVsQvht)zmCC`|cLh)Zif z(!Mc`fU`Bj#!NV%-s{Z+)MP)a5asK|J%st|NCtf6$eWitsis0u7_` zs7b68>v=5$i|qj{XLS}?v|SO`sqwMw%sjOJ@29}3R|m|VSHQPQ2MFX1h>PiJT7NDMZ+Yb7?9@k;&T1vicB#PA$%3Y( z=Saqe3$*xM79RCwdr;%{(a(|3h|X`OzX@Vt@~0Y-1^;M+bP3Kk2*ZJsF_brb3^YcS z!`AVBV0=rTXn3e#4&|fRh#M*#?-$g#>cdtSwx_xnz(jEtx5~$y?JZa0aJnUKUl(jw zqhSMcdnzF?#0VPh)Co-AIbzho3XC47jVYSqWS%nXyF0^!{QmLK76r6^Fc7Z_m$6;} z34HXfjBMH%2?K6L@TTkw$-lFaUbvlrv+m|%r_CKY>tq{QppXvonHiw2FCco)uhZPB z9F&Vq!nV?FG%NQlIpSRiPOGEgl|>bCS}KYiTBX=RgR$bD7hUyT4os)7^YjddRYcSx|6=<#n4KAPMuH(D;&kT;LXm;zqUfZq#4W zip9W*hO_*sM3OXH7JrQ?N9PVdOo}q5f=FekUC)PgBJME# z^DvkzV*d>r;`<>0?$|RugD(l9^9$%ju}HM*U^CsKZ`6H911VKbfE_k@5N30a7_K}< zcWzF@jB%NGcP`SLhkYbXD+g*HCxPwO9b`-V8>)4y0AK6G;HI2v+NCZ6F9%ED{PqyQ zYJUn zOC)AO-}!;gU*qjCsFC?~r%lDKv-`Q9iZ0L`T>)nkHDKRDdHPV<8(&-SP`OwhUvBXr zXVZcq#-S93n?%7{Y7zBuj>doGZ0~OII~wG?jf~Swf|cz#a9-#pxh>L3%i}WevsXI) z96Ui+=nN34JxuF(ngD9Tn~7BFXKJZcgh4MO@p4Ex-8~`>MvqG2lUWcv@0vsI3&!K# z7S{7r;E8_Kk~Dm+7OdW10Y<(~@cGC(PVt!`uE9zyHMhapr4^l$qZVNOoB2Y!%ppJA zLcr4k)bV4Pnp7VZ9$w`>47C#p0zIZdf99JD1gS8=1zZk!74#(M@sq|vMG{oH~gW(o`sC;Km)@CW8PB`;0oOH+e zsy)!`K~Un}Ebwo}fj|0kgkU zurFZ`9!BDNFM(CG&sy@y?Kfq1V#X~Sf= z&)8je41vInYMf==G>l_>D|wK0J6~+J+cwu0LKwSyIL;6ZYnlZvlFsNpzXEq3*TiS< zWEpGp0(Tw{CZ#Jtua*lX7lY7hG3&dN5y#R?^U0oqCq5%$f=lXSNcDbAl&)mClH;6l`KxAuk%S@iGIm$(YYR%; zPP?hsXCjaNY#TOAL#;=uoM&?Gi;zR+;A z9I9JbKKxKHy=@(ecZy3e&p;U8tyo5G?Tdko1qCo#^^$x&wTrGjL^Fey2I2E_3W#NsAvs7gJMIt_lWoAxH zfuxUnNlo-~x^8j-rj^E`<^2`ZAp9RuFf9SGd!cY~aTIy9Zwxlamg9`mzPPPXm!3uy z2*1t;jW2HCXmgEw)TWPazKm}f0B%vV6hyb1g91A%tR*brU440H^%NUaV(bpz8sg%> zbzH|>C%^+0>|Uw`2_cd+IMWjsAK+nF?|4jDJBMtT8U%-)ltPuH1Uxb>r?rnGF>z86 z=I}pLiQ!Fb2cLDCw&cQU%K_ql_5_V{PsjI385pP1Ne>L)AdPJ~FiAHFg2T6wlR59` ziIPJ6;~tG$v=-4n>qMawN0x=gAwmNJqAI{k!v$0) z9pIv-OvR&&-Q7~N!zkOq&hH~LfcfEJ@5Sk$6elUjvg2^mLbg}F&;UD%+qv~duCUXj z0(esDaNJ#$<@Wn9J)j(a$;e@9s6W}fIRu*Smq5p45rD92>Z2Zm3+@!4Wyu@rx^xFw zbw3HRb#q|*mp&3^dMLl*0ayHIF6eLI!^C7|I62Lj z9y#uZoLo6-JIZ3o*hE4`!eB#pF^J~>CA|}Bsgh$H{wQbNGBHnRs>%V3DkGMHWV< zvc0=hKPWaZB*0#M?6x4zDc&uj;R)zpH_^g(Tn$**yGZd6x<{9VgPwjNJ_|&n}^)S$zIfgS@c*cH+yIQ7B+_A+)?Jez}z*`s#-hBNRsn{_RlW*|8~Xt$!k4mif{^3F_w z3j4zX6>S%E_O8IzFY4IcIiAF4c!Ti_cFqdPgT@UnnsthGfJ`byi#4KHtyM*uRz!o4 ze(1QmM4VGV|NO1ZJgd(fz4kX zQOa_);IOw5TxRTUzP=6HBMP&_cuO>3?CxKPIf{f^a?C0PeAi0I^3jLq!To}PZ*KSt zS@%nz3TE%oC!?}{a3!W3%xuR%^YR#)qZ5V&V@hz5|35l(w}$*X84JxLZ2vg#89DiL zFYWh8!EK+i@MP-+3i;=Vtwt84%t?h69<3xX;1RW%n~z^E#v$KvH60W5ht!o6L%(l0 zeB-AQ87~=>`M`K?s6PswFr`)HO3;|ihkMcPps?u$CtstB7a6+)kvZri8zvZOF@)U8 zjxcPv5q+1h-b_;55{@kC&u%SP*LMFJm8v% ze|@^B;`uAgG@K10Vaec2cM`eWSG0O>0k-dn!QPss^w{oP2uN-*qP zPzwFEVxaM40X^3dg+F!|;_HGB^yjp##Nlot%vE6-@Duw<+^=?;`aK;FE>A~%c8VsA zxlJV8b74$OA`Iy^62iX;q=Bb@Dd7uQ+5_)%ZiC;;LP-lxOw>O;s?Qc z$4TJwk9qLU&ViBz1)TVG6FkJ&-IS|VI4450-9E(%CNg$+motSv_jEz*$vJ4u*q!l- zNjTqfm}`-m2cm9#h!RzT$v>vk#8!XoI>|CK6Q!{e)5+7Ja7gnjhR6B8h)rNUZI+A2 zJ)iTiWbP343OGd09!-U{&oiN?;0%eO7pZFi^ZnmR#{5Tn=);&7B*CZvlq_RGwU_nt z7zrWqDPcJ{p;+u6K~;3eLL09fsxp0n7pY4uW~$=%r+n<)=!P>-UlM3`>q9eRck_A( zd{HvteomT(u1x#%*k*y>&ez)6FSmi!OdGxM$Pk=vY!t}ucEWF4DlpK5<@7d465TVN zV6%<~?!Mz;;DRF!n;wL4hV38Mh@)Z{pAf?+_@_|>dn-SYzU%5!n0$^f;x zevp3F(Q6fm=5VK(6F9QP{YXHmfDG+{$c>z1>Gyavzy%p9xEo z(m-+DQR3}zkA@}Yq20ZBblbh2rlo%;DV>bnbw$8>eja)MP7)g<%5dmy0198UrtdZ> zf|LXw=tK{opZ{_;kvf?5jQ!s4IN(9vA_3Q63=bH)qm9;(`uUFS?2WVV0b>o(%4R5i zJ%rQPZVzIUng4K58l3=}GpUc4Nq6{1&$#k)JI9hBi zM&FV@bo$gf;#nOBlEwMZ5&4)@PidtoVyWokmxWsv^-!Iy=Skh0Ea+IyW?i`!GCS=l z%P-AGm#$cB|Gtuj`Hzy`mn=UcGYk?3V#o@V#p6!Q!w}<#e(w!vqq7RQ9N@z=-??!5 z{T)teNDoC^SdY^qwn9-)1n%$5;22{-+j1?Tr_Qodh&f*GUaG{K0wYw|P{)<8a|DUb z3V2hZ4UNL0v|zj^zQ4n=9UUj&pesjquL^{fqGfRNn*@wJD4`*Nk$5|^2=$kJq38NH zkbN5ySg&**_)i}s%6r>r`MosM-kyPPbtpae_8K{`AO~C;8N1_cC(kN}>CUMvJ9u3* z_Si0_c@soo&c714A{he4+TP^zVmaKY!ow@w-U#9o>4azMU_{ug%5#CR!Q8|FgRvu)E z`?NQ@sq%1isT?z%d6S#sA)p~t3Y(Qgp~Ye`O|6f{cZ3o08ze!W^jh za*a$>qx57G(=+a;;qQZObg$7M;fph$+U5kfd~XA3S@?wxWU$=);7D|RTSAY1mVj=N zGH9&}1iv7T+_Im5VeI$1SJ4x#FVzdRprwL%9QrW_X@`pRR14g*xg3w$nFR!zlZ{ z{n}^@bE6jtKHqY{bL{)58l!`YZvEvJPw;>S_I-V~MG>kxtZDz70E~?+!?B+v(KVZ~ zJB)w?K@mL6{Z2>|WA}scxGX6Tf4H$8%vDFp*u*s0KPM9`T~3o-SNo{yhitUgNJg1$ zJL!Vl*QEPK0o26BfI;yR!rLv3x7tfkX?FpDwN;%kNv%b3xs$^)58V;#d zpxqKz)Rd=!dHDw5#m>3zCJr(;%X9N$rX%yiV|wyTEa>gATXDh`jLFVOVhLBvaW$XM!56O)3`F}4&}D~h3v@ItahBpTc+3SoB9 zd(!35L_HoSV*AS+tdhG)>%}{WhFJ!rRk59uJ15DI!T|kKnu|ZQ6ER(^ks6hLB7fCc zo|gQY>?VFJoWQMS%ra{xJr9_b#XRcME|!o5yZfhr&ahFp_OP7E2n+ zajlv!p48W&CA(EYgw1m^?cLz1b1!E#Z8Em7d9S&H&AO_3g0SV&K!VMK1~wK@aej3t zzO_LCn-{+q7~%~t=KVVC1b5jyDc+$8Yi~%?11etF#OBR0_Y_d}pEFUMAH?46rJz+Q z4nLMu(%m^xX!WfS*Dm--@2fYG)F_q@KQ9-QC)_5FADp7xvUD8yk&Z;@KRQ;fpNvt? zWqwnZF=4fpBxQY|?{^jAdO;N0y0-I*S=#fPM=Gx^d?SjOgf@jomdK1M1va{*Wv?mYI1W4x`=kQ7rrs z-QwIzoYx!11#qu4eaYGbwFEJ8>ee;R(>qgWp$J zK0F)Eh93l?hEN&CmrLN{G3BIoOC%h0DFV^VFQoeAMjCLJd2t7FG1cS_{d=yBG%Kcq zR9Xh4PZW@qU#?SwB{{eyG6|2RZKL^>Z^;+mLP%Q~4Lc@R5!qT%tkEfDnUBG^?2#8$ z8j*u#(|NEW%Nv3&G4Jq4bsY4nz|2W5csug2pyl)wkYsv=bg>;skh^xP8)x7kJL5}D zSl`{55xc*$IC#YLjzzBxAkDT-@VnmyH!(frNP#-0Oj9N!7ra49h-LXG$iaRmU;522 z1QY+3;NT_^-05CTQU{|!Rj3f8(}sz2bu*P4Ou{1H94vZ!mG)RMc6U1i7Vb!c?c0wN zZ}YqKm`olH^9}S?d+^&zbqfG zIR|xW|JFO%qoJISK(E3KbeTTbJ8KqH*T{9=9`3CFh<&>pEs)M+omd4=C-+|dRir=9?c z)_I^{a*ur9e2iY%nua;DnK;cB>GQXJWa^|GnEWsa=G5*WExB)K=0E`|YsO$$Y&EUZ z6@l)1C9q~o2;4~aC&Q9*7{GMbh3Y=&6sk&BiL1j*rpvxCaRr5SjNMflU<}i3UlR_0 zG)f9Y&QFI#rt5AHVqS)GcRGVq?C=HCeJ2}DMHD^2{Ze%S%5>p{(Hih|yF6W_;feWr0bMVRI#-_Nv^Y)>3XlBBhpv>^Hb+duYjf){z~xY5^!_!cX1tdR}YRPs8vidupe z)8ofo#j$wApa6A~Sl8C;y`*YF3Iyq9L(d+@6-v%hN7XD0D^0~! z9tY{x=piD|Vm`ILIF^mIhFnPbO&bD>@kK*8=50x(`S+yZ`^_>a-R%$Iedgp>ff71I z@L9KuJ08FJMlhhO3mS}rEDxUpWy|8Y*Dj{`m9e`y5>}|FuVj0Cnl*Sbj*^mU0@eO?Tfw4nW70r-(|nsE=M7}w1qd#6W0kz_H< zv~bT@7(T*s5)^!)YnXMosj8tP<5STuTycF* zk3e(lWQfaVy`JF&M%2`}_v@x%0pnY}H)o=I?+!arZ(I1r_}6RrxzOc(L72aYWfzNyv;ML9OuLMNNmmNNV$la8VYY=@ zZB4}2&bb&d_7?Sd(@qw2rNcc;hx{k)M0(;a>gSq^VU3A6I(Z9iS^j}|_Z32EQ50;t zF`x8Gieo}mDU!=UXk+R^RZ|q;P&p5Z(!HSkuPixzgz-f7e`}I<#_D~|0<|%QV8pni zo`)?s+wZVDe|sjTv7hbHnrT=RrN+&RBA~{8_UD@>!?agD0+H9Q*ul7_i4w~Z`m0GU zPVfZ-_PhJEV=T-c4W{B;D1KsGv`L%oHf~-LFHO{XweU{TB;Zpm`A`dxg65;SPHN5 z%do^F09Qp=P({s&FrD#Uk+ym8RrD+8#Z5vr#)B0!9q{HTPf(p<0@>`DJ7;PQ?ZXPT zmeN)@z@EuIM^k)pIgay+ngd;oH`iADWZFz5bxZp8013 z((t4;l`3os#{~h!m=XJ%@_g2iz1QL(T{9oVLxxC2z(J~1o{A@wv(P5zEUi9tkqqc% zL#G1EFnqk19L#)9zfCE?1BJ0Dd}Rf_lk$&Gvj5@i+nO_vV7llmViA^8~H*OvmT(8F)y#laBv=gLs_Ef&E%Z5a_|! zUBx^4wX6^|U86BbZV~Nn6oqATN}($w7y=J?kg)~w=*IjLQfl5vR?E}7aT;J9T>+lT zE};HoKNny+6?K{aqI9Ait|xao8%1YA3_Ih0o}CW)k0b@B=WwX`KX%76z{4lnxs~Rw zaFqEwW{9Z6*Hl&dL(>QAU$7o2F*yt>@h8`tLg4IOwtuW80+p%NR98C&XWTA8!?ZWl zzF`OXIg|uPv~$4Zc^?sXL@G5l6E(M_q1@VI^yticByV;eOjb<*zZTZ*%KJv={8xm# zBqGsgRRLXA#xgURuO;GB0C+#PA<`y_IFtEaE+l*4hZ!RR`7heAi@jSbra8ixjwKwo z+L-AL%**xL8uKc~bqZ$B0$KKM9#d`x8~uC)(XRG5%6vDjVtV-c!UN7v$Q|U_yS*(& z86JBZQ`2^qD>b$p`F67CYLZ9-g~ORgxfmQ1|B^4dwe-A894;x%$A7+0=)5Th$lFUP zFtjKO7O&|cn|JimmF-!qUor(HuVe{m+HSlhYnBkZoqUI!SN1;04t-!HKs9pu>D<`W74v01lf;|!EIch~M& zsU2KkKDIK!6ez7cEKt>V!ALeMr@m81nfv33Sgto*Wj?r#zvMvSlNUYI8H{VSSuXMl zQM_bYMM`R;f%>rg`r^0bWZpJf5S_$)lR0>I_&Rme6p-EN8SrB~%SNVcMBd^K6&lRN zoI43vI<%4I6nr7KT#F!Yb0kdpTSk6YNT4|L_ielvh^Co9v#*bbH|!2ksp<)$CgOxA ztBwEI9i!%hBmVkUCs-fIGBcTfaDuiCI4lUU^GmfvNp?r!{LRsK(2`4C1n}IY5~AJp z;oSOuLFaEbd`J1H;H`qqJ^EykydQW)m&0m{F;KcMhSuwc;e44AyypCmD!i#7|8=k& zp+EVsEa4exmfS}Td{Xe@`z)N^bbInC56#O*x867mvRF;8 z$NwSg%Zj1PGaSsKQ^}D48QlD_41)vxad5pUeYHRd{$qDPv2b^=4}ZZKuhGQ}UHk3zFpPI~vwnV+*@mJ{UE|WpgUHIzG#KVRz4o7j-~%%TEDx zu?`S+H(j_>5rucoB2$_IU`J^gxF<-#UGF^lqb!1Th!mk`%nvHHc^w(L6%R&h^57!5 zPexZAp_XQ8On1)2Fo!O>^7a){h1t*)lnh~qb`t%{SM+X60e;vKgEjo6bWxKq@HVne z)Au33I}}X3w8x>`<#L=veQ?83b?R-P1{Tb3>sjLpLLS}RRapbfWp{eTKOCOYP!NnC zod$L6&YyICCOkgZ);Z>gE%q`^;7H?CY<1tkX{I>CG3NJuZl?)*UB}VXVlR|tnu5f8 z#_samNT)_H3{{qb!ZI;P`m}(yU=-4wh4?Y)0~NC0N(2uQVf(~fh?eXp>QevFvR~<} z11lXxZl9uEYPU&?M=mUnNQ4^`8j0kxk2L;!A=YI@;nw0xdTOCKx#tmxY3I zUJ1xP#OP)+oL-y_pXepxk=ISH9m&KsCsXlu*kO9X z^C2nd$%C;z@o+Z1o-|edq|?lbF=R>v?jFjdv%;kza&s9x`r{9qG%bk4nu#nQf{(3{ z^H9C!gP?uxB>2TNvhg?v97GGa!P_SIj%jFnE?A*R%8!$0GOgeq)7Ye~S)S{KbislgE4n}3iGKk?epj+3%qcy&Te0=4OlR3^wC{I@KV?$P}}c>dQ3aj zGSEVelal0iuP1z}XIVb(FJo_HIthAZv#m;#9$m#gyY`OMOkG_U@!oTFJQWT{gb$ z?W2-jr-{8|CWObQfm8KS(%^HCK1?BN7V-IqF1&DfBG?_`jx!jmnEzG@->)(zS7rPm>QfoZ z-jac`$Yk0m9gZ^^nXf194-L1fBY&60!McKcSmFJcB+Y82t&*vj>79kLg*~+8zT^BZ#2C-@yMUvLL?HLY3UF{I8sZTO}mO3jr$@yGxVW4Q_VI*B8~^?`7g?T=}EkYKslCDbQ0 z5|5@AAy@u|-hH}(SZzrFO|d)>)f*%aPqfi(57SV6YX-u2N@xDLMq;aS;Le65P|Vy; z-mV;`!$yS|wmKTCOcqmHHBnF&E`>VL5SS(8P53o(D5l0kbLx%1o-5LjH|oGP$DyLw z1>!ax=Xye?U>##~Qa9~T9fmvq*!(}cbDc2*?svcKw7SQb-kM6>ot=-gCb3ESA`X8Fbp1h@aA5Q4EqEqvY1;O6qhb7Wr56abMh18ll`mDr-_;%(K{gpaHEXzlBcRZkuZJcRWig0b#^t@hBkGK4EuGsAD}`{cNO7Oow4 z&$gw>8cr~Fr?J);qPH#*jKAlA(v01G4AntU9O2wmJRn}25Bvs2*nHQTT73w>|H4^5 z;u}dES(!(K&PIS+M-hl6d?%p?*Hgm>@i-cvhlgzL(UHcZ#5RRx>$2vOTC3AU?O`9i z`zaf5s3hZr)jR2$O6Co?Rlszv7FjGK-Ue7rpb{dj@&_xf@0sjnP%rTG9U zRV5}H)v%v=5Zo5JqRDlEz@yXvuCjA3e-j4^n)2Le;&hB)=ivF+nFwcl?S?yS!Ihnp zWZG1?*0V>j|BN&9?p2`4B@O(rK%S&s_JSU^-+bqhJf!S#r!8s0SQ=f5m&b~szx+bx zw~dCeyh5-`c~5$Mn`qCoL>&7(2epK6(z%KqWa{(`@LiA&!52=FSvmuBGcOl&H4-uM z+h(e|pO z1qSbM!i|SF3O+qGgtP4Z6tTnx-p{GEo4dmTXS4U$E3Ijm8fL<2Twwm|@Je_>_2JUv zOM>J^H%x!Z$D8J=Xpp5#K4tsDaCte@YmJ33@e%ZPe<)gcvwaf-<`}rToTymEg0>-h zzlOdb8L#%x&x5R&#V;G1PF$q*yfZ}aWhTfTNrf1XLqy(pi2nV;KGU)BsP9=%?aP0W z1^&hG--B@QI+RY1r%2=GQ)MW=-yavhm`*QBD8V-7=~0oG2j3?Qb2;ska4GZrG##IV ziIVApZ}z5O@toxoCR%~ylbCkp8& zE_34Jw51a<$kT!x5D9>%EKlH%k2G8xmqnMFMxeAY(=*b3(!UGq$+p0F*xb!HkJUr6 zG50X7>SX?m)=YdD&i3wZT_RG;vq8Ks8A7{u6QP`!bSSt0+upJ+q>anzkyarPWjP4@ z7ly)L;V>dGYb?HFUMq1WU#t|@p%)LRg4j!zooMUEda`;s`&pCGGN}?HPB8o1CSAdV zx@n-n*xfP<)`NU*P3Pp#HW)6V;e~Rvo;VJlJPal; zzlK1=`Vx>^Ck$`aE~U6H2Ho});PTX0)VqiEY=6hn7Y1;*=|0N5@%}YW2xR=_MJtMD2 z^Fic9EOgzdVOetj==`xIXrmQ|MrASVCOrl=#+Ji6X+P*$uTQRpsNmAme4PEo4Si?z z3tIg3p^ve<$Z~*{%9h;t2y?Vz_u_LgmMFF?+%9p74QyrXZkdM>v<0meg#K{EGY6O# zOjsM0BE-pQ9ZwiN&4c;90w=4&Z!;R+@-49pD1^FY(GaCn zMQ*MWWqX*V?4H1Mg)T4JBq|Tjm{<5ssyB>1FrG;KP{%HB=9AWN!FyVV1_6+xut}d>wSbeI_jPIZGW6cq@}l zeco_nv>bF~<)AU1=_kPS{7P=DWBE$+BH`TDBGQ^KiS~MB z*lQPvDL?J#@6HMEm3h23w|hWcn-FPVs*Ux`1OA_-BRc(BA<#E5hP_N@dKNMp-Y$~2 zle=q)FF#k}(uZdF?xrgzuUyk#R`r!l} zW6DcYhEp5(F!XaS%ujv5HI3E7uZ-PAfjvH}^A-#*G6Qd>586zh1*BS`(?fJN<}v+{ zEH%b!>PxvGYe(R--GGg+wZXW4L@+Vi0|VIZfPpUCA&|EvN9&YN~ z=UmV0^?db_XH&DG{Z10NENCFD!dJ9oARnDo*%|M*ghuI$!+(Rt5MC1kPhI^E3rq@X<8l`3;UJsaZk)qm&16YY%ca?n!RERjf0;m3 z=Rn6{4O=W^?9NzcI*J?)aK9!y!<|S0*oRJH`D%GOUCSFyO?WtVzC61#dJxaNU|^>J zM64ePU3v@YSkGu&Sj5;}>RTEfzm5E!nglbCX2b6%SBdSE4!SBO9ot;fFsGxHT64Du z9AF(F_Y&aO<681y(I>h_yAbmqMB+mAGO96lG}JyR0av{sxU$fm^!F;FX%i2x&iBMU zE|T=jw#hKFiS3TK%!L8dYVO5*eRMj-yvI7$cz<~ zZfC~sB7*FJ8ym-+^q7VEEaTKZ!V)FNPPUnCVFj{`qg2Mvgw#`f(GML56l3h}iK!Nn zgfCoJ$2>?f;=`QrN~}ZEj1vC<^eQXGm|IeKlVp>F<`L{J%={Ud-^reh8)(a>cw8#V z#VE5udQI3u)V{OXU{MCRl1^ehc#$@DWMNk&^TQw7Nf)nrK@L>pL;vm=xUg_JQJx`& z_lk?L&LR}m`h)41W*LY$!n!NR`NEQm8sv_q8ruKlWA-B#+`qI-q|>MiLyTV;2NBr$ zpDMSz&KMUnzBR4S6xGi*+9n0qKow(m!{Yj2GNwTkso;bg1OhbOqru*HccekvwKkG6^#=XsmV8%TPFrVn4oEq*`pG-nkeBtQ9G6>%y14$Yo z)YLu{$K)5|MHMlO-nX2jw8Vgjmk*axUXXh?cT(?~WZZo$3zwX@NQ2@#iJmY6e!NQs zhp-mH3mK$O&gA0aHSrjby@4)T_nmo>*qh%V0)BkRA|69hm@Q;H*EIlhI5WCnsuF~o zu%DSj^T7DrXD-K43$HMC_e$9Tw=dv{qH%7_-+R{z-RS9LC2PX)IIv&xp7;`NQ%nrC@VKntfL=Z)|-y7Woxn zgFpM-h+a?puE#OwSRPz*dqBeCo2kDb75gV-;`Z3HRGRjZy?R-YB%1=2SND*5Ws^Hkoh$V^i#`d&dW#~r%{NTV?R+nX_hH| zmH>@AbKr@}Epq2kEB((W4F_Y=@z1YzS|W9g{5_cs=ITk{$894gR=uSr`~tLdj7GT; z3+eKPkr3oq0=$%9h*<4Gs!HXt!JLQjO5XT6N1m#uPGb8|0+^@h43ir6b8e>7aovBc zlR(}U`|NLZI7piU+lL1?oDG+wq(lr8;ns4Ny)M*a{V#3YA~P2_&B8)MztzBexhh?! z?Slr-%5ds`utW-QQUF@n#Ka%l}K;U$A+`p)jnt5J$%cj0KsQWe^+V2O2}u ziQv96zF@8a=WTARJLk4&32!Rc+6tlH%nn{vIdjwB8lop-ci%3UWA;-AT-crZ>9E8Y|m zyTC}ut}KLQ^S=;@hnwg?bON3l%^bR_cc{;aqa=I}V|Q25!BLBndhyG&Aut=STuWkm z%sXhM@HN>ovH+G|iv}=VLY#Msqs1`unR$eunG$n8sLO$sI1h$8y&-ReGVvFv<2WPs z=D+03W`svYM?UF64P$p#3^{lMe{5%&o8SX>*L_^Ydhbr(wdI7itcP6)700JRK0PE- z(|5*vPXX$@P)EPtiey&-`*V$WApcnoM$3EC52u2$Q=KaGP$v%RF+|6ENv=4GpgNO#ZqR zvN=#BR1B7qql-u53Gq_2zZHm@UOyoz zRq*;V9U`gZ2mS1U^JUgJ@a|!6qUm9{QmPm=tcIz>&s8KJV`1P&9wY=kC95ayrN#a! z`1y4v?yWpeQ!CFAY3)pSYMly~44X)Q_Cu=c#hjUE;?Pic9j(s!P3m|>(Bu{l$#$tE zID8Bmus(!GKK^*PXcqNcHW6;+ux_v*cgULboO9l+jT&l<0e!c_OR}M&npT!=VsFRQ zU)k@~$W86?wN{wS*q!)K18hS9ceKI*dY`iUHpX6jWyB(qDoI%z99Wg?`_u-o9#5G7t|9Yja_-@jY^7OAAe!Js&&lGw`_S zX*%lJCF0hZ1)P5}#BXaPb5;-0SxxzXFGM7>RU1AVbR}4#McyUm zNoe!Iuqt(0KT{26jTJ!XQnsfi)5%>`(8KSH-SvFsFtT1=)ID-GY+-Nyu1-@p*xA~V z)@sAPQ-z2dr(?_V22MA_39e@fV6TM+d`*(2N<1%YWj!1fFXZvWI#-ta4hA(r33ydV zz`+p}bnTfaT(qNrxh3CIN$+}M`6v;7DCWSQAJ@qBi6`lbKdjezQ5rgRpP(B0H;Jcr z4oHS3!dZ#`$nT9G=!PE!xNCkCiiZkm;qp;nysHEXOM;-d--(DnQ^1-H%)77Pg-drx z({*PxU}~iR7VVh}TJN@S;{^KXe?*8yPpz?WZe>TOgBd(!?5?WZ7#`f2Dmrn7;Ee*E<-^JV*eWfdvFDH z`WA!FeAzq0;{~;Nv5PFanGD}5Snlr71@dY^C&i-~7;-cfM|d2fqa*HK_!0i_m9k1P-0bppRmu;n#nqko(;q`2S2v_GTr_W4$pCL*}8M=X=pzZ!I`5 zf_cW<>|xyEJg({9Of+~TM7^_?ST2E9&Uu!wjj=mRlUZQzk}hgJZI69pSl(o>7OEV1 z&ABVigX2zosQjh`N#-WhwbdWj9xcVbU}?1bn?`=W2!}wgB5)4*NrZV-^xedG-2XNg zN6&gdN8}zPmTjpZdcfwo!DmR)-Cp`6Gz;^uu>Jqe-BgADoJ=*!hxM~!;lQ31~{&~j7uWMZ~!Qq0)yiW%(f_0X*6Yx|s z;Id~KBah8KRjSQ!OXoUU#cFG~aY6{+uIodvPpwG*&|LKSPk=8pCu7b{NwWN+C+uF& z1HZY7@Kkax6}Jw;ZY)7(;V2B)#3w4VqhO~>A@sz5Bo*ra(P-U7{92HM_kZ7{$KM?% zGnS`8rhYnzy+29nbg$8Cue0&n-9$WYu$4Bgd`C`}7r@>>QLyP{Ir%(U0*B*E@OE@C zJ`HiFVISpTh8KJDpYnnoCh}zX&Lq^$X0Dx9C!98IwQN<5g+35)p$tNn$inX4 zK)Uik2!3lVMydbAaM_xrBsL`mEc)}|ZTL$v+}cPpwUY6{t1K*P>!VE(r-+qv2F!|? z5BWuh$%Cl7G;@9~N?ngf#bq1mDk#wMD=TyIkV;TM~z&Stk8y0A5{bMKA20 z0GU!Ow=m8FoD2SNt5T=nvu6TS>SM0He-)x1I}9M5vAYZFtYE>|D>fdD7P#;|J4?nI z;_6GG+@k$<&^DE2K|gA<4wT0texy69RPa&b`9xH1HXyEY{?PZn6yA4?fw{b7dPF81 zjcbdrC*n6<>|9Cqt&Rho+&l=ie?**|n&{^-skp);6HAl2s9Adt;XY)7=Asm+{W8+Nx}MGJ-KXJ~ImbAiMQj(@RsaQe z)WOPJi9R~d+!YgfI8-KwyS==Lmuv`R|0xD3>5*XMznH4mMWdc>0Y=P!O_i#)ljYl! zU@<=%8g5=DnU<8|&UD;!lX;TX9iy%;1EgV0E|_jefbF8qq^SNg)yyeGJMT#R(p*fZ zjF5!sqf6o2(m;4I20HVbDee^e0cFhRJ>y z=TOERvSabc=veY9FAPpSEdp=yk4VU_p?|w#al)lMToCYt&ehvTF0D-g^XHjxWoZxj zR?|gAL75mg%(|WBo9KePN2IGX4|XqzgSYaPBy06=>ax8E$#59v$|cj}>ti6`E89gL z_JeO=Ko0zwh`$!|G1kK!4>mj!#Y;{FF+Cwnsj-7Fxe(4_iXm!!5Mo=U1xkrOu+iCR z1zC*Uku?TTFt|`;^3VZ2p9rwqe+vHc`^(8|cz}@vA7s}}fWtqnXwByUmOC!RJx?U@ z@Bzl|dLm#}8_OO9eIxmzjr7dJc+88<#iW^c>BaqrNlMy$Xt&9Lm(x#?wQu@p-Ipv( zQ%Yt&wno~p`X!0El@IU3VqkyBQZnhN7_Ju;d~MS8S1m$jfCQo8$xkLRCre zRyFLM#4@^k7qnh45_t;sz_*m`sa12(S}(^PN}G*|>>SjMFvT&adu-iL+pyhp_WqhN z9j=VpBRbUMgmR4Cy*WDx=WLfJidVcK%Z&#CcjO_V%bmW@48}u|tdmYs0{_#jAkkyk zT(_hE?84rWi!ocN-B2RNJkG}BpRUqj?RH{j#{BStG}w6ZI9Y9Yi&_hF@Rdp;`aG|r z($ybH;FLnJni>Tv27IErX%r5fE5X(oK}c#HscpC-n6f_AH|?HqB3Y6QnomZHtpaqZ zpUZY`YeaWm>VwuvwxdvP4QftnY@h5gL$(i(GgXbTHk~aq5xSU5+f|G?wQs!QDp3n9WQ3T1kmdH5&lhz& z&w>ob?s}swp>fZ_woSJz(TlOW@jWwff@1+U{h~dX{S$zJwHBBfd=TACoriIbeC#S` z_m^}tq9z#tPMb=>$6XrM%*v#dvm@{q>-nvX_(3PHuVNmScu+l+%Q7kVN!EfxRD`J* zvo8a|vy(PIxj?S0%7Xp9$&h?_7f}_wpw?k*Pi-g$pDtfPmmD1duL_HyTo4MXPeRES zTN#Y6EyDr@Uvzn{Nhcjqh0#NN2(xennVv4r-BuTou{-4!f=QFKMHj1CChZUNI~kh6 zw9cxImEW!LM+@ssOV!7R7wfr%lXGF#Y5}Zo)&MJoF?5`^7mlsupeXYnz1$wk9|zT3y<6A zRfFp!Vp0yv_$0zh)p`=N;ysPoSAZ9eMKOnLIW?)3fUrd+@K7NbX6m?-rT^qHE{TWh z(-=SeAwv&)XuuF-cah;v@OR%1u5zA`2e2Tq0S?r>T_(+eOx!j~S^g^h4Y|Qn@)7 zL>J?sJ-?bnEc{NdJTJua*%2ryp3B_#QgDu6%DNl^z&Ox?^dFso+LQUHd5rbl{rfK3 zJ1_-eUI<`CwgdaS@wpb6nHa#>UF#AnbeCD)K0(?NG}-%0X~!%Wz8EGlA7hW_l$jH{ zLK`!FJ?C7K-Qjr#+eOZq2xm){02mt23E1ld-!U&NJX_^*T}JA4mMQUw{{WPsWV0QKVGY z6GX@Z$m!^b=;E5l_%yA@+I;l&@_(##K$2K3nd%h;5aR+_!APGl$WMfr-Kb;)N*xkK! zFy1mBWELGIFXr8$x8-ut+CKpg?At_Z>%Ndrn+jRxF%py)6q49dNqon;Q&Ow~F?hh1 zW}X=jGe7d+-ci==ym171u|^YvF9>jpu_KzCTq%+;F@PTzg^=Sv2h3K;+HQSdj`&H4 zXKov!g`X>D=VAwn) z4a3fdMQA+tFRjsDOHPc5gOS_w;I`!xa?X4|y?r4CeGg}1o1mL!9`7Q$D>Gr^nG~oJ zJ3w9+Jf^L}Je<$Pq43WdsWuWw;sem{ZC^BjwH3T4>W!R^#@O8f0}J>j9N*C?H3xMWyNjtXz^7r$ zxIE4gL^lMG@kA4zU->J#mFIzzSZB{s^$93vY(q-^FlVMsDXdJD1m%1A6y+oF<bxpLPemW>R8yr|it3;NY#S|#y*3*P*<5$tR};|1I~}r9ZE-YXch-}qV@LS`?)Ow@_!c36 zVZTYR{GmL(r0b2hjCr^yULG(1Vz!>5VEArZ0@W)=f{evNI^+|LP6Y*M8~&C?=58Z7 z(~_X9B^x%}xJup|cF+xJ=~(B)TtZE)bltpLuYqrNw+|et`{O~`8u;!2v>t`_+*&6~TGIqE5fDVdkFLPUexk7h8A4a#TfbgO= z)$R7h;OH{Mt>f5P8%5$Cg@VeAV$gOTCd)prq|=14_(3ZlAGkfEZjyV6ky;A0Ov(bf zqL-LgpQQ?#nRq`p6>k_eQ(4{va%gHEM0Ll3`i%7?e9SUDIv6I%TT6RP@y)57w&z1_z=*Ls z`M=Y_!mUA+GSLa!_yYX7T?4Pp8%I2gydb)m2lc@U&|l_Er``&}m%B>WN&vG9Bxa4;j?X|YxWIV>%lU4)hwGQ zxrNs3dQaM}7QpVzD7KHgfVe7+!naG=%=kIDkmF5rPioEO{`jU}d318d3! zxNLYX@(p%~ekkk1d&ce}>}_DX^LAUw$EN7aK3nEeW1JSQ&fQBPAj8<*?KQeEI;mUq z=#vZ5uY6oBt%gsnCzGK`zTnVY#`aodV4iIVjdcn|?c8GgsUU`%+LjZOV=<6enh!_9 zUXaVrcG9N*lJU{uER^1Nk$%nTB&rn|tYbG7GUv6Bj-)}F*p-XVSHz=P>;@XP^*d=D zR|MJ9B7j`YB0t|rp+b2nHaG@gz+^M}Mqi0_Oz|Om|2#-r@|oMG1Gkcra%XX&boy`)S(3*^S8K;xM`e`ql4o9%xf|cu(`%mRM*SSh+jg; z|7r$hrxtWvu(U=gWS;T6`nXSVBljs}E(Goqz^xUNA+A-DmX&$pu{~^O_J|^uf3znX z%!AnXb_w`?9tD?{vdr+?Naj^3!~_1HsD(-$SwEBjQVltpdhIWlKDifv*2TVvQ+?sT(bNY)g**co5bHX95Fr9=rH z9BMOmXPvEwwPq)|(m5{BuP=c5uWC@RN0rW)#=bkAl%e-`Su}3-C)}P8kh@h3z6#Vv*DGpvtbts3o&*+Z+2GLAM}k<@_ON0G_HLYy1%*ebgAe;oBf0Qh zApx{jZDMoVue6A=f0us|_{Jxnep)ICiH@bPaVP-v8m!41s|mQ{GY_4jJh154A5ro! z>$@8ez+62?cxta zjrn=lX#SYWI~^c`t0|x*$b`6pF4B6co7yAu2z8{O&8K}-ruYdFJCFy@e#XLu!8N3y z;x9e&vIuPsgyF#har9`|SlBSD3?7F2!H1sd@sX z4`_VN1A|@T!8nPd)*gZAsZol*!X?pYY!R6g8VNpY3Sno&7c%MnCYlzXz_Q7?IAQD^ zs@i>&EZRRGUiGEJMny{G6fV<6OtUHy25BkDST&kuOiS_Q)j*bq1R8r^ z5z@}_Kw91t0^UlH`^uWQ`Jez}o;l*(qDoO*>tl|FZ0Nav$bL?j9?u3gG zez;-5JzEWM)JX_HbYSs>8=@r=Zg``EkC6^4jJfKv9J(L)M6j9lv~i&KB8J|c8HSri z7vuaH?5!lVni!smh5T=M0Ip9-vF=`aJ}3n>US^_p#d-R-{v3I)lL)BGz}O_EU7ZcOITm;bY^b3264%f_&K>08xdd@G?jWoLA@4e-#lJJXna^=6V&ILQ|d!*q&3*BTkAJ38uoS=J}@;_f9vgfi`UN0Ha*EJH)x*@vYU_Q&l z$DmU5GOE)g2KHNuLGgJA9LxzOc{62ksILrv9reLU`Reqoi5g5B$9mH$TtK_;G3 z4|zF4K>yFFhOfu{Xc2$P`$6VaJ7b8~m}C?M_xs$74MW+^k$D_>v)jbYl&0 zSRzYLFYvQU}c#E{E(}l`Q1_IzO4X<-QQEWsCv@!G!cB{ zbD-wcHL^zcB&{7z!-VoQTz29FEwa2xKKkasqQFEr{Gk7{efhdxq#*FwqGF2 z*Rt8u@eG{Yl8XA=A?h&yJ}J783q7v!AoQ#v_H{p~r%4gIXtEBF9T{w2PZ~DXmx9D+ ze~5lyO77GvVT~)}y#e!Zh3tFLzd$XB5@TIRNA2O}=sd3c@k~r*>~0rj?CxXaob$z& ztV>o1i8GnKGIn>qK?|RLe9fsT&x6Xje3<@$-CqJsXxmAD{C2pMxw)iK zd{R1*d=n1yJc^*${U_N`QAIbXv250BwzoR@0UcOykYsnH!j*d&@Y(eY8G6}E4@G35 zY9HH)I9`oLTUw98qxI@%ZR3~Lb+ zXI1?7oR8yHx*~7n1(E%A9q@`20Eq&gYTeU&7(S3Dtd9S?4DEFYmamj;p`97{`ZS@9_Re2h;d%%h-q zVjVrDo`?Z?Ik@EQOEL&HDEH>Ur*NcLltUx@P#V5Tj~QZ&X+-tP!`s`45X(IF(31>VzjIl!*g4f5~K7OcyTcw?s>i> z-_AAC>pID(F_eX3`}?S08tbic%YYG)^PwT-FqxKdm;Ox8#f;1GtPg!7{juR2Y3nS6 zyT>9RUn`eLh)dz>*it<9AOPFbt*CYL1lTUkhfGNiSYh#pJDN2Gw?7x)mmUX9!wS*$ zJqB=uu{+;YRxnoOlFg0178uCb-Jem0*f%eXJ8{Sk#xr);`C1#+o_Z__h;zr`1$=CO zFcIx88IaTC{eiqIg?X)GK&~#C_9}#<;pQS7>HV7?46Gy}>*C;WRvt_@eMEEuny8g* zD(-U2#G&9WO3(L@@t-r{@`4mN@OUpdyx|EQaXSxHT4T}R{Aw!X{Ue9o7J+qa7(6PC zA<5UqBIj0yXZ-x|Or;*(7oq|M2l=ql!433HZgU*jj_Yc4gnexSh)QA($LNu?af&Be-eB1@o68dVtKoevH0+Iv*7`r{l9P zA}XtLh4e1ZhF|L#yYt>o{_K5Ci;N0z_ws1`C$pILO&bZF5+yLK;#_qy0>-1<&TIZKM#KL;^2%} zB{8l0OtB9waf$(v8L5nlOZm9m#T^F{9*N3irot}9 z?p{~ff%3XwPDalVy%@VQT4{m2ua9lE?6U$r#_nR58NkX(i$s3U9B|!Z0oHp?!DH+G za@Vyy;I}v*wyu}}=T+v=Ki>l|Bcv3+-jl@S3yj@ei~xn>%n$GSjjXx2kw!d?$Dt@T z&zO9d{&(^)sm`1aomLs(sCJ5s{M|<*zGtC?VlwLFHPRz>FG=%2K4=BAJ_@&`^bl!GvmknewcIU4%9j?yZBRYD?360X3AD&LaMW^J+$s1mv?8<}7 z*W@AknL9Ph3&!W+C1^iF0-qUH5Ifmus4OY~M~`>pa@JOQ@^vB}d6131Pp{Hrv)hTC zWjbhTXdj2hwXJHqQk8^`eVmOQm$PH?wV0hG>%VhZ5f3P-6a^O7lc-2 zj;!-k5qO(glntK6?Yu_7B8)ko59>fp^hJ@?F1EAsh>!YHRdL+`Z6a0T3*U;%AYMTR4sMU2 zQwKvaYF;sZQyGCf->x8aBo?A{@j0g1Fk6er$9gp`7tLVVSpQJjt2)5q{hx0kinOP`}7u!owYllBN?wL(DkDmyy z-T6@T$DL(KUvY=dvHJpJcW(RaS^wmG(PocXu$!?vmoQ71`|w@c#6e4xW9*J}&P2D< z1)Sd%dsy&S08(aJ@H_Q`Xh-Hev~l2Lc$pGbt~4V{#|FUe>QZQQlxFvfOuETD0$=N~ z{S2=k)NXqfc^VN98%4QLq<)_`tvy7)cBW$X?hM>YI_a!W7f9{eEb!@0hQfwj#AMYA zS{=b$HqToy6Pyrcv+$uM>|%9V^6rGQ71dH5{J3nd3+=sQ0R=*|^DK#&vsOxwX7X_<~D z2bfQ--Uf4A4|h1b-;^RZaBA@vlqK zm>Yv1FQk&`5#f-PR|L~cf0L)F>!|JZIQ-|3hl$3lTOqfJ>~>3qhZ8bkp4U0DWL_(YC~dbxfQU zLWkW9xO#n^Xpz`loX*%?&DY6zwssUTo8bv%ln1L%D#FkmpdViaqS)^ev`ik2xhZ8N zeQzX~+ZBR`<7Xn8P)k2QO~70IIhY}Kn|ggZMkFSu!TXqW2!Gi@dM96{j%%~g$v+7X zaNDSG?;CO=qyX-fN5l2+3rTFlNF1TZ=DJP6xObr!JtQv=wdOq772^%O3`MeL}6+suDS_+iWtFtcIF_TvAbor^uX`O zG11TyXY86Gz`L>PID3sUxpB*z?Z%gZ#6MZ!Z}(+cg%Eu7tr!cIu)e$EC1mI+n`iuF zdus5SSRdIzJ)f|7hFdmjo$jZ-@suojkPf43=YwOxQDPc)ht5~b#ogWsxS?(nR!z5S>rj(#Pk=L;NQ;vp(bj1BD}qQne%e(g0j8vCrl?hjq7%vo(BXjvE=f`+L<81(#j9Vh=kgm?MPGF;l@x=8h<&)eXfq z@^R;BWh^XUxx0L}o32|1BQ3^)eQ-Se(iw(5_lq#!;xFx*y_RIkGRH)H9vswvLiWww zPqq3}u<&3e8f16V;d5PN-0%4YQDcp*agq~mdw0#2Wb30s!&GWv}eGfK~E4m4gY@7?0Vs}YI&k^dnXFl@8 zGtf*9se?*CiPF!8XV;RzKdyoF*S(@4_w#YPLJZcJE}=gy#KGrLF}$k`0jaV6WP`dK zu6kXDhh==Q{I?2CQ&ES}MgkC<>;kiIpWt%W>ETVr?milFcwcL@NPb{82-sb>@x2MW zZNJ-*I-PmeYK7>eG96EzI>7yz?hFcy-HALV!L?y|nmfZAc}6_^7a@;5s-EO#Sup6^ zl(4?kkr3dtke&;O#?g5NXyf^oMwf3R&u1h-Z*w-x>b^?0a~<@*%ybkuq~WXjRyq`V zixjlvz_05GFmSe(9NYGZKG!S6^|zRB?tUrVpfVbyUX;LAjUW&?*^|?^6tUwV%bUb{ z;y)=#I-_YabTlM~+@*ut1}?L4zbnu_J~SUxfupar>46KrcssHTb!*1q&udX+*|SiXHocg!y%nr|AcRN3GoiObAaXKdcij&H{BN2T zo_q9#dwp&m$j#zI^k^km*lR`=BLZ+taVbjlN}(Gsn@}zSY{iP;yx({7;^+qYwmu%q zPvqh~OPB_RDAl7N%)Up{rfI6p>Nnl9^%cCsQg^!KhqRZbI&-}tiZloG&hD|0#TE1}kKpD)glmX-L z5W2=KlrffK)Eg^?mwT3z-zQ=~rZ^vJJzkKrUv|l zD%9GxkU#l@^m$J%x-5;y34R->Q4`~|vPFDrNHe$Dd}_BV9OJx- zP~Penea&A_cHWJHKT3HJHtPXVEN`Zl7Nz2_Y$j&UJxk^8_7aboS^Q7VE<*(t1nbq*rWyk(P<(?AOCGqw zVbaej^yy$VV|V%rfRXB^qOi+G!22bH!S`n1D=P1>DzL^^v_KaJ={uf1hE~puym{*Y^P4T*GS~4Y;c*t_A@k@Gjq>d+EiYEi)^BC z!J`VQ+%ghAIJ4f*=wL|m_8?oA%43KX4^_r_W2C7(tuL4ak7EVkD&-85b@y|oFdehE z2r+(?EfUuo9cw0-0^5g&mq%v9%5Z5>t1pL*3m8Y4uZL^$PIB4A1qP?HyYhQAIM%I7 z&l>n($m23R{XrHrZupbj10i5~qZlg2h%;yC5?VbY2Iuy({IT6DI_qo$F@Kc=#md=m zsjZI;XCnQrl!4o-=Hu?WFu@fJKbm9Z5BWluN#%QNhd)&)D7YM{Uema-Vx7>kgNz_^`=S8KRmDXhWAD%1DR5S(ZrZWLO98$s!Q5{Xn<4G| zPA8VHl~HaBAD3-(LtgG}(ZOX?A)gaM)C@Za-sHp`{$Ys!F?M(3lsP^qR<*6knFH30 z-3b&7z)E?w=(&d@PGs!vaJnXn9L30_eh)}{#e>$ZqQfV!e7PIt5DNUHu|5 zJ2DcCRu#f&uP?;(-zNGuDFJK5aj-FIOD^JqoU@2da#1AI|CgK(w%?VF19nl7woRPc9{u=w-4IBTW$;KjNPrZKu;5F70{$7$Ga+5VN@sI%B9y;O$ zt4fh;@(j@IW;+bB)-ZE}pY6C(bKK3?-8Xw9BmtJ()GB~UjNKVqu{}ip8>0ErZg{?( zb==#kpp}j;`}6$Za#$I(PZ2LjkWYAMX|k%Hg*bE){!2(-J)_EIgr)0m!W()2JMs#oN~@A3CYVS5Yhww#X_ zZ89)t;%WL-tdCsl&Vpi(WGJX;B(Zyk=+%~dT)R022fUZjFGs{+>ega-_$UN={DR3S zQ&|kWT!zVqm=h;Jo!ZV(gDEnspM>uMrfW}g0~&fLp358(uQ}YcT~1^oHye(#H~-?} zrr^WRTnBJ7sQ0gHRrh>O`tx=%bE-34jbyY~dW z>U5KA2*?33zeL#kypB9N{DFS?Q-D|FqOez8NIz~I1qb)CF1_p^$XVw^hCeFc%uPI0 zlk&pH@zQkU6%FWHCxFEIxuBT6g?qYOA15AVnT!Ez){DBPB*1)FW{I!VGGeB@L$AN}>9lKiHo#C5}6l5Zze**qeET`rnDP zBD6qWoMi?N+QYVGxm@7TOx(@boo<^Y#u_)z@eo*o0b_Sndb8k&V}@wVMdlHb7P78! zEj(oWhI^?#4^kcZaO$NJ?8-BtGteI+T9^ymT^i3?rxV2w;b7-p1p91%lC$fo=$y&% z*gTYrRq_w$;k5_JUQC5&0~ru(eTJm{=%s?_EX3YqJkhwD`qn%rk4^JIMvr-HSF9j= zoJQcA;9?Z(4E;Zb&cmMyHj3jE6-Akmgh<&Mw)-3_D|_#~_nvo@w0B86EutYwl-%c3 z($*ji?JX5rG`#gb|3Uij+;h+O{JuXNeGp0utfgRgSt;{|dBdR>8f3YfGK%!^P;ho+NNsRhRSL+!z~v3)pS_0(5NKV*RGd5|kLbn|VzKQf}=OI<`9C z(VcuOmYadf3KPg+k2|~VV_g+yvM{vJf!5juphsIVUdtB8E%$iji+KdN$`?SN(-*Sa zVmD3IkH@C0Y&3X&pPEfNLqa#Cf}ds@Y`NP?670IE?w2eS>50dqQdRWO;Q_K>c|N@T z9sy%-EFoNdG2EC~jP8Mf_;H#mjruzU9(ZxVrNsj#eV%cI%cc}n+Plf>kxF^Yj+A(g>Ir4||G+!OV-t)1$-46X0mkCeS>%lL^?j9^R zhf1w$R*@%6arPkFeTo?3xu>C_H#cd?wTbYj~Jr{(9 zb$)o2^!>aHV?54@&{y}20P7>$3C+p;DnE8VvIA(T-;zet)14wa4{pWVeHN))C6`Ea@z2o z1y(Y4x3ESR-6}S5p4HjIi!o zLB-TOJ{%q53vh+^H#$vb2YGoo4q`^LLH6fEGWYdqD%F>QQd`r|{GE`NYTspU#w=K~ zJ^>VL_7kx)A8Fg%eC%8niRb>7(`|-hS+7|!+#e2vz=Iw{b>C!`Ti~K^17mC}<>{>d zRG`6tb6?xvMf2b1s zU$>m*%vkTpGV6T!*r@{Txe9c8wet z?dtjH>hY1vo!n32YZE}ZC<`v1y+f|}vHrfJX&8Pd1)t@frte}Ok!zE4Kw(`R6jW^| z4z1s4e0~9XIECZB>>|2OYCNo!C;=kyhbg_*u}alIO_*9Y>K z+Jnj24V?X5x;V_(-Ib#jxMizho6j;6n9tbV+lPj*-`hsG;w#{Ld*&IsrpY=B9&s{b zT%fF$-7pv_!HO`JyUX&yP$)&M+KCwH5KU%tL*UknLU=y&FNx7#PqS`Cqute9G%$Zn z1oI2DX3QKt^i+P?zdL zoIJv2-Mb02{-q@3{49ZG!#;38RF7mzOhvtwtmnzj6~(k)3OCDX!lKy%XkKpvJ+gtE z2YQBB!q{E)GE*GtduOHCU=GiESYJ??9_$Y-7iRa{u`CE5^PSW&>&}0i9olYiMU)3d z6J(ID;+ZLj-4{(GDUKE6?4Xt{#4T@Xc&3q?q_2H_Sp ze|qGIG?;d?8@wVm^BZ zpL)&5eXZB6C)~7xu?N_kKVuGbFRc}x=~{@MseH^hGaYX~pF(1KJ(#D53vQiLAXID- zEh-Ge=^@3){WTU1Nf}X>jRc?kd~hTKq=#Qc=MTo?xTjgz-_u1?ZO#!l%QSW)kqQcX zT8Q8=uIq-qWkQ=(~n5%yucl z8L}eycyu)hvx|nm8hJ2k_>Pz=)Y8AN6EVy+6BX86r}4GSVLg}*VTY5UTkQmCEqzXN z|Kwonm{>fgwuROl`bDaO3!&m}D2(%E&P=WZ#tE5sWREY-Ts4>eQ&t2SH@43hab=nM ze$Jg68n{WCy@S=Z*t|AbnB-#sFBrRP_cMd96Q{OTzB0o*&jpy=rjL9m;BfETLgy$S z-WX`W#o8fZM(!e%w`JYVc?x*^$O7_xvL8rPFlUUl1W2T2&?<{Ce5_T7KWu)|n1(H6 zc~mUOoy%eSkY{At-Ue!TIT;rpN=Fgni}aYtO%k^`6MnQOLU~0Eaozcz4n^c)%*!au zPhCxoE{edJk|NM82nN6EU~^ZDP5tTu*p9hQeox#-gGAGP=7Irgs zx2A?*tD1(8v)>rP8M|xLTmZMuZEXt}V}$oU$K?W5|9f|Lz0O0RtT}W}_Fi&(bbzK~R*`M;U-DQYQ4%$t!)8e7QCL3xW z^^ki?XX&1;skr51Dh76(rIpV2$T5v<_|X{;5C7~Tjx~d{tDfDs9gjeTV@qhuVKI2M zq8QvI7`uDtMAl7Y-OkBeG>`GX*0WMnH&_*_bNS%r&2H)R4{)km=CHg4+kI}g!UKy= zw!M2fAH3NdZ}7$#_5{lb73CPO%wUecp;_pc+|Fq`-~^Y%`Oqja4LCN_>60!m+}cr! z0#Rw)ViiP6MT6nf#v<6gSd=+US5lSJQLKMB4_%>;&ii(V{D@D4uZ@{7P|-VV#H29XaVeYbESmu1mz2QVHGc40 z(Ucs!Addod9FXggg@S@!+pl?>=NuCLY&KqSJuZzjNM%*HOCO`CFj)T&EOnk zcSV&3u=rZ2uu|R@vlQ9=VZlt4DtgcPp5+R4sXX`?rw9sKhIC!BFP42M!BwV`*wUX& zETcjpIlB;|XZ;})i#O4?4_F6?O)eVEd_k3$94CEl$?#Jq1DcsFPwM40dgxCED%vLD zf9b2z4(tk8mj&(W6gy137D7xBb67$ZNvK~cmFtE@fPg0ez zuASXyym7|*;%?!CcrDoC!g_R50ru>k&k0C3!V1RjB)m*esUX_=sF@|~VeGEQQWsvz zZ5Hkr=YSzc_?R>}165kZ$$+6dSO~cw*CY!+egSnEWVgt_ijfyJ4i#6FlBnb1Fo_f} z=l?fyM{_6T^~E7{W~1`A2UK3PnP_RG!n^P^IMUlj78rEVkxg0H<(YtWhWqH!mQUn& zNIr=2BVj||QZnbrSiC!%bvxGuVpE(4HJ>^KewcDWEX)(;d&rV5iRoD6!^cD^M|>ZB zLb&STY`D+Z-N9&Uc<*}O+T_$c>|k$vhr&EIQ;KjFx^dw5O4f08R~uZ-nuV|W9I;1- zk6S`jFz@(OlJmq9mi{h(#Ps3; z+VU;|H#%nFiN@QsAcvBruhQUYWeOA}G!cGMFCCT7K{>ZLEM2>eMz(w>#=8r^9>U?A zT>-hiVm!LeDZyGJe>B-;O#|-8!LDyyh&bv7yg(6Rw@VG5-{fPgwmnK)uMuA2=)pF| z?p8TlKzpU6HSIUSbjI$?It}rPhYRO~w+%cnXS>l5O^|o%6`GuLVKWyGoz6|g0~IWH zSL_2@HJM+^Xd=iJ#L~%ELh#m;LTs7yAN^{xfoRIcz?|K=uuSDO+2DJWrgkOa&SM$4 zJNgRUc<(ZKxiJIA2$P`m^-*HK`4z1!XZ?((F*xhldb;D_DA{w3b(#)@K;^zTvMz+} z?c__b`;ZS-T-2pIcvIoWE*`k|yTJEjPdIPYG_i-VyJjOBlvVT)+HEz2F2?RQshdK4 z?$ow=ISX8SM}UV4^l;*%m7Jqa_F%=>ox%e(2=)9c4CcAv(T`lbBQ1|68CHZN?hj?= zjMIdThhpV?+M*PW4Gjf&oAZNS9N0z@bmHKFK@RJ-?j>^`oTAxBQ&8|P4cCYuZPva` zRP?i;^iBc<`5q*X>-uTUi#)t76@|fC%jp%)7|?xP1ZUO;!L(04L}TV;{QjvFl_b2- z=7|z*nxO*2jNM5nJAu^>#_qOhqffa2_v&zP@j-DRKAQ{6*{u8Dhk4L+=1E(Jfi=$B zDL_xzIoR^=80S9&N6-)DLy6;bP*szmibkHOX2@>E0;V9EyOU{)10mF+7{>9(!cM=X z^gvJ~UdhTwOZZHqH|`^0hHTb7#%A3!UBu6=jULQS!zf5a?TyWpBt0NgnzAADZX8@a zy^~bcf2FcI>_)pg9JgQz^;8=N*jEg$$^p=*ZA&bl$)dL~g_q0$IgH)$_i5o$rMsLY zF&Aj=WInYzCHNqzN$213#^A70T)lY`c70{poe#las$B%id>I?}721QVxbLUa0c64!W%KGR~3iL_*#E>lnW+n2J6a!}_Q~6;DaOy3A6BWL2!`q0gS+9s!qF5fk$`$z!KM38dX2Qj3jPW?yLfOVB zj(&szUj4wbphKqk3{9+R9L>S=IqLxN(T6{l0%7U`JA5?A$07|4T&4blbL#pcc&*2S zZ~x?B--iX18|8;d1tl2JJ^{z>$|4=MVW9J`0KPhmknxu)=;{N^4}UrbcS$^@i;kTn zXGBuqc1AiR%U&eMCv?)kmol+#Ipaj-)$~^LdvbDB9%Su~0=0luWIYkZjr<~%)(ytQ zoIrZ@0=uzlDFu~(UeKMPO0LYChL)o|tm<;YzUv)A)8<)FoGk!T zx15CYaYO52>-BL~kj&Vf@s~M3t{fEd)ED9y#_pDFRmGgulSuVq54e!eTt2Qc(0$gC zwmuKQNNzFeeh|ZNQGlsOhX$a;GyDR-8(O z)rZfLr5-)>rFS+)tdGa854-7#i-Tn5gM7G`5CJJ$7n7?q#W8P9F-o2bK!cMD>GU!g zc)OSj7lS;&N-&XBUspxY$@zo#k!qKf@qt2F#2Q^ zp|6-IN<3UeF11BLQ+^&SChtkh*czI2FcJNZXR@xMn{?iWi{$T$bO?Bx3}u`K@;Uh# zJ$)btYnR62H@gZtxbr8momU8ZqQc;6MJDkZWw*$COK`?FU!3^RgkCaLfL;^UaqqVX z{!Jd_#C=uA??3p=-)o1LC31z4uk<1EsQ~UwHHSx4Z!Bwz%rNnd09#fYU~fkpXKb!5 zXiBo1*Ml0MIr3gOeAN|a1oGH@r2=;P&Lf+0eBs*760jArJ2jOwI`}aZJ<|)(QTI2U zvTifsPm2ZXk2#Pj{(^koP){XSCgbZT>FoZboo4>MMpTV5Ax10-M4Aqhm#g2=50ZI! zG9em`Hm;$yLcj{-3iPo~V(4Cf8e1`4V40ZA1#!8N2tOJzo;zN(} z3@H00PW#H;QKyFOGdQw1SKEP1`RNaey~Xh8tvGN_^XTUV5qQj^0PmT7rT(V7$elUy z5SN<`)9ybYGo@STm*7;)(@w(!FIs7sYZsZ+o&|TNB!Hx974hFOKvzZPW3^c%T68ft z+bl5<#ur1!zCg$}b|YHrr(m%s7p2=g@V&GweO@{pI%4=Rsc|9f{!qi&^=39c+athn zBG!1$?NZz3{Q1zz-gxu+xzP7WLa2D0gAhbNi`w_d#-AD-jI(G9iEK zZE~20bXX}JWy4c&G^vrMEbApl?3oW*G7h|pD@jeoFfG|vfG3B;aHU)xeIhym%A-r* zmbO3qjk6@T7RuvJ2_CK5`X`V)NWG@a<;MlFy&XYJLVYCl4kj1mkgNWaE07?dWHVD zl!0#+C!zT3IyzMUiact_g~G4VuSu$c&*j-4kJfODPC<`9S6B zIb_S1shD|!hqZPtth4Qj@X~rsm`s?v>JEUv3WxJ;pCPt0cDL}H32Nq8SsUH9fT7z0 z*fUWNTmse!4<~unm%JOE1;;?p34wei&qQ~Aek^65_Kx=y%Jby*Wkii|=8J>lcHz#0* z=YHyVxu29u=7ZSPNC=i*Mrvk{#rN-u@Hzxx_<02kY6bo(=J( zCxv5IIpPB=KHjmIj=I5eWYtoZ=Tl%ih5X5oSm;hqb_U`irD9ANGZqzNmXVyONI2z` z53hjb?)LAeuMZ_)bxjr;H{PY9K|<0{n+99EQ@}B&nS`Z3q+zeKv2|k{zOCO*uU!91 z^1KRQ&zf)$-Bd(Qj2n-ppNi4^x<8H#5i08^2NRxfLEYCKs_Vswjg}hz6|y_&mG-DH zvPEc+t_!)0-36btfQh?|t?g%-;0ng>=35)F+#+xcKLbqhU>s$h7C3C~5oU+G;LbW8 zF5az#DfY9-=9xb5-lh~j)=vb^+(&d+J(NOm#7ryDeA>9j( z(D7T6&`%~4<>IeV?Y0iGJ~#u6Mw39|+i{Y*>jmq$&qZlo462UVNITE{CP&H&A)6Bl zkzW$X>t0DLd|HA6dtc;CGobIsOl7?$JP@g5F4@SpoUKP@;sI^egHd6F3Y#KX|FHo? zF?Q#*#uR=&eRU4K%^6Q+_l58EP)?{A%Cd>@Fut1PxAf6jO?hY@7=>3{R?tv4Q4rr>1jnBSfeHANZwIAu zdn=pck9y%g6BSxjJq^yP@?qaqCvfn(zzL%d1m)Ljyg8}p5 zd&{-9oJ=d6Q^!67%I9FHeJ!W5b0IjCFpq<#Dm=L)P51VApqew6`J~xh@PIRUW)KKp zLW?0qL=4`TE}@*;5qN)BK32L7(8A0r0;l5PKgn$9>SVVVbI(yjGsfM&hjKv}2XfZ5J3xJ_gd$K877FR6gV)9rI z+~6QV2drkm*nNDk_~-zVbN6uWY|ue&D`R(DODuHX(^l!b0QNF=w^_~@X80KhRrrLl z3IX1ipM{zcH#yU4oT2p#4{ohe2Kz>J`nkv({}z>^Ggk^<9Sb36b_7Gpq9RaJ7J+-i zt7wI2G~Ra2!*tF&Iw)RCOphc&Wl1IsmERz_JKE{{Pw9C7Su%3f8ffeCXXO1)c3(a& z7J{@Y$gAd`G|99O&-R4k;+$04BOw8|_m_Znzb}l|&Luf=inzd>hsCaoaAf0vaBYVM zypUp9&%d^iESba68PLb-w*pKGF~dLAms)QvHG`qoER%6jA3|upkjt~h^Ap(p#x@OH zmNdj!p0Nn-`tcy>wE|q;JddVN^uv=|OYrgu30xMOMpVCt!iY9w9iBhQsiT|eGmTg@ ze#dfmD$nSr^$ld(`egWZKOI{2+R1}2H|XjOnYi!2M7+NBFik)Fj;sjFV+EG%TTAvD z!gmwF2f;;{Q4)*;mx8JF!%3h}UJBO&yy5BY+H!`^FX)L+I?`i*hZBS}to3kO6PyK5Mp1w(V%g<0-SSf2U?*+Ozd-(* zkQ((-ZM)ExC2_3(S`QgVhFLX#HJ=!o#9``Y4W?C4e_T-Yp$rJEl9P^HB@WWfC zru2c2JdB&f=7Rqg!RhlOoTJaxQTqwIi=pgJ&9+RKB&QEsp9ny-*BmtFc3AZ_nc~1l zHZ$!uz*&oYIqK(Z;ED#@S#xK?RkQcPHL9+7ozKJJE=4pEGbDfGd?8${6vos^!oi1$ zbaGfImgW{>s_ZBoledwa+8zUw>~rC)@e4Az_!zw)pN!A#Gw^^%2Q}`xM$TT(fcYg! z@c2?4nOpgW9vqv8U;Cr+#*uaO)3(1POr{8=6holSIEEa0F%fOum=jFI2d4#V(d@rd zS?2%`E}Of6cxyN3P^1>yOEO<|2jC4AYhiAp5#+I1cT9r`bNN7mHoEw# zXB}sx-X0VgyQ4p5fW%I5n)TET+q$^8Oj8bxdx1Rc_lI&t#-Q$v1LwkG>R%U*wl)RW zuJxT7sqY|}N8{k#?QG@*dq}p9IZfw&Ou-48)3EVF8_l25MciYvVEP)CyPLb8=+}Ls zM}wFj{(K}B4VKd=ldsC0%}-SJ z{C?7PGyy_fvY_z7ZPJ`Cq*v?H*p4d&cjY(HXTiN>TVFOTUlj*Y8@ChPy6;qMX94b9 z8jiz0MbtrlJZ#oxd!PmWFtd@6Sa7WEW3%oWVG|M&PAM+iKM_|4#F8f$L*Vq^LeN(DOEy`pr$N%pF|juneJ8)6e|(OR(>oZu zJHeQ7))jK-Q3s8U%)kvll2GE?QQE%r6)6eHg`N2^%rCWptlTs( z_ep}PTq&g1`M`q5`sB{!sc657hYJT?(BbnF;pgU=thYq~OIF!{^EMAo&;~=i@L7P< z7Mo)1lYT2DX$yFKhq*x>>p{!Ta^a>yJ9cBh$AqV9_+9ZgCqmB+HjD5eNkJZxJ*;T; zSwB?sXZKdO$D_>eJaSMzoOL%Bz&8D1(%xT5&&`U%0^=OK-}jhWK5Qfdc`2}NOggNc zjHJ=v7JWLJiDnZM@mS13=2Pn@568>DmO~>HEmqlpU6NI%Q{?y{6G?afV zgq{OJ88_muVmBM0?3#e0VcP(Bvok~%Y_%?mDm85nYX7WGGtjt2NwjQ*_Pn2x4;_YO)&G107u3fW23JjCw4yj3gXG` zuqVudgr_%!6B?Yc;}&CgCzLSli#ky)^9GTEQgGWb32eI~sMdjCEO#nGr->pMJ!UPr zT^0@F9rNI+I&)KN)zWO$Bpg&`xx2(0^j~v3>HM0`I*60OTfLqvKlqFe{l|9J|60D70H8^L#{9&Gr$?TtAQ|C zfNE~G*dP-v{Ps&9l-~&eJk8*BoxwQ?TXQUYEx?LqeLN*B;Ak7!LFEu1G}dUq>i0uJ z%c4c7Xu!ke;}nqIokwEi{UCL73Fu#+0Gms*=%Ue3yg9QFHRu1N>zXRagT1k!`7#Ga z|2`!{#~NtXf6U31osPQF?No2_O`^Xw6Q-|DgrqIi(J7ae!R!wYjz4gM_;J@caUQcUJV}6b5y967 zl!R8U#*o9yO44b@9;j5v zMSE`6=^YNC|5WN|)hwQ11hl@$s;C7>jY@B_T z9=)E5Cz?}HvAUK147f*pwX)&srg$*>wVQmnJ3u8*=A-hF2$ZQ_Okb#o!?(0z5EEzY z?urx1FO|WK0xlZYd0^U3DH3>)sN2*oo9j^qolynhzj)wXj^jW|KacOI-yo(3h6(`m*7FMM>Y6oWQO;ivcj za!j0cA#ErE&xxW?aCH?eE{(#Ud-8C>+&-H6y_yVHCBoRJnUJ@0rZke;M#IOcrju^<{2ws z-2&G8`F#-zZAXODKB6i>(OuAKqGM6Oqvtkl)j|+vE z%tDAU|4laWHdD=gF(^ZF@t52SdUkO=X)RBNaU=uO=XQ`SgV(5)ekR@=pM(uHb+ogQ zd0r;u!Sa-7pjB(gkyC%Ehe8n^mkL3}i4j!e(j;*AEQKW_-tb#li;O%}!t5R%zBY8h zPRnlLyHqXMM`sR<19v7qVlQ?>uE+&2_!{L=x0mwOiC7-l+ z(Y=jvSaUraFTHs{C;V$B7Xwn^)0#B+eWQ)+vFf7k=d&0_tWJapNLg>KJ2oJ zgvT$-h>HDKJfKsI8r6ZQ>E%X?R!jlMnOukt^n{OwvZOeFIzEkNeRaW(EOU86xa8Sv zNZ-S5OT?|A%I&T7(=+q%JlkhXY?_O^f+RQrf2?6KV|QzAXtUn%R-rF(LIra^F7j8w z=9^PVot_s&zbb|Cp_3uvxi^(d3PRGuyv&kg@Ltt2vguPK?ElVsKW+NSz4n9jLSq8n zx6eY(q1&`J1IY)4ba-$%1?mHv$n}C=DrK941=4Z&D0@4#>G)1O_ZGnA-(m1&ULJX< zF&=;Fl%TVoKi(*}q-G!wVlTN+RpSO4<|3>oMjf9&;^Txod;BxHT6oA_53Y0x;Bdb= zoY+0b+I`3b?HRl4NjAiy>)ssyF*aanE`Te3n((LmnedXdD>_&6u)k?4-a4g2R$lf2 zMTt_FQ8AG@u41XqjSxKET8O^ZqtrQMJ$a}Q1Fbu9;l}J&WQXrD_L-D~SjT!6eXr0@ zx2_P$iy82FAPM^K9VMCjUeU<OXys+k?B0aZS z1u8Z8AS3St3q#Iu*8J7R6>C{vPzm#<%@P-8yqOE{*sObUjkY&pGU~V%V=2*zln|x0^VaLTGPv#K{bSFXA0%6XyV)kzs3om1rQbAZG zj&)_ZJC9G)>R1)IJ2wFm>$2d>`Mcz|vyd*fPeVKFRE$~DOq(JelI9oLFn4@BoT=GK zR=0hnHXa3dWCQ#E*~F!i+N|5TpUu&?nB08|kej8l=+MZ;?}6^va!ZW%tEz$5K|Y+F z$KH6e3XWEyE~;J;Fz>S^&Z^33TdQUQwIc#hvNM8(zD7bZ0$A%IKtFRW9Q5wt{5b0j z^*4F&_W;}LYtEwIReew-w-krgOu}O_k>sUu2pktHf~UX#k(nB6Y2@l?RQ;Zd>6UM3 zpj|C7)Jg(#dFBzyy+*8#chFNm(@`@y8DUI4eZBSt5%JB1`jHqoGP;SJI{usPSX_vE zY(r6OIEA*2NQD*iKip+xJ!&aA+p%nZou=wzgm|8^w{m zXn=-n)>Uv|w?Q==tC?=*@UoY=??3B-q_9x9$-)jb7`waNqK=(`zc>R+SOSUj{k2fgH<&~Mip$fvO> z@HjOct}0$2Rid}(t(}?Zx-=2B^Q)-11z^rfC%Sv349qLz zg6RYg*c~T9MAKAJyPS`4Lk_qqdynwKDjm3STmWdbWIU1 z{!avL->)KP|BC`k_dIBl?;}TKYiLJJBIfWi@yeDPbgaUXFs9^)qDSAdN ztC=5uaV&Pvub}5^e-e)c%$X7v29pXhiO5(9)Y-@G)CPQU=!^+9<|u%e84qGT7lG;B z0nVzQ>Ud**!Ps(>&lwL*Sq&D_4sC}&)8jGxdBdfi02p=*+QO# z06O<*z*mz#;k8az{NvBVcOMk+bIv>>Ug!%8Zj`{bW(kn8OryUCL(wU<5V_NT(*?U( zHg!fU2>WutV&plwwzr--tx3j_N9kB^*-n2X2L|#B#@~-O#a*cj=D*)eMSts zMP9sy3KojMi@+jq6Axisdf}w>?j+n&R*G4n-q_o#Nw2?Ux5y88V6)tr%{|vSvx~H_ zm9e`+;{Yqwb%h<~#xThC87JN>fWOU4+b&gEq6%YoqjPle*6m8p>?8+h*vY(-ax>te z(m1+zl{>yV#KpyCvS_x@fh3OlL&U>kP<<{APr7-u*E|Bt%nI0PO^4c8K8!!U5OyA^;cV`ojqZEdUiY^Z z4tln?nUv0lCicd!tep!BgCvFVjT~H7D8Lld#=}Jmi1XKBNfjbR;OjM%73%qdI z^HPi`md1rimdOZCVLic(^z@cqV(yp&^Tx-4X-Xv#92lnC4;0|3uVL8pJ(sSZFadO8OV|y& zKYUndNx)4WJ0*E|X^b0&e)%WtlTnA5$84Y8ZU;A(lyR1O>S1KJ0Czk$$7$Br&XE(Q zF!)6P8WD!@<(iMM^Rx|S&tyGN&YGw$_MEd-$rU~dcp!MB2+zOj(4DtgCyY4rWuK5l zjkAfQpgRPrn+rjpK1u{}8|h$04BoED#c2~?(bL(-NU}^a+;hx;9VS;u_@^uM_LU5l z?@Gey_gEfE_=+q%l?&$wqQSp)9eK0rFWqyy5Z_gWpov!uefDS~G`p4p+2#XDsdLES z$W-JvFo!J11-}eD7Cx-dgrh_NoErcW_E>Yy)fi&`H}=_k+5{h@a;&TFS-=6t?%Krl zAY|)0;rTjye0zpD*e|KEtgk4keCP(E_qd>`D+jZ0T2cRCet;T ztSErr^S+bwQ+CjshvQI5I|mJ3JfivEnph813aD*O1Cx7{L|NaV%c8QdYCU6jHv6gf zy?%0OQa%`7iUh@f4zo*J}P`e+Tew+o=EYBof*o)n6# zcf@{6KCYiX9kYw%h|Wq+xGl$ppBZe{t#qef?gnC!VlnRiGX{gRml2P+NXT564^QWQ zBCQSksaI_Remj_j9S85yh6Ewo(WgP?ofKFS+f3?9AJVUHvr%_l9Nw^|e6jZjaC>#syc^@^DV2 z67GtbMHFWHKpd39qS}d|=oLwcMF_Tt6rr)^Uphg5JsI2-4Gx2>_fz!^IcI-_`c)?3 zVriB)3AjdEuXm8FunhR{GYRzH94A$EFX%{ME=Cr|Fn+dyR$Ti{PLvlym_;aT=}I8W zUrM6nbLP(gUsU*`PhBLYg4RkN=xuTZ9gVk~6Ad%bX*QeF*W2KI*+}7?P6OEZnRUV~ zGljIf@6K5TnqvTCciUd-p-xCS$G6`Oj(%nt-J|Mo{{Al^PumS|iSlsSCwW`~W<>k6 zAKVEjfz3P%W@!lD3@PF+M1b1Xac)M!)_vHbBUPkZeQ~6r5`FJASD9nVcf*VA-ww-njreoFPWUQXjK!+=yk+?rOaA!;`m?>0{ zQ!jIk z^QSG$Uz^SO@0&h8XYB5cpBc(OY-;sdWd==*-K}lYhYi*R!rC%hoHBvAg*R!S`{5yu zPwpZ(?#qLrCkpWHH^<0v(Ie$ef>{@LsDBoNa%S2d$fF`K(wx`8o$T zNIjz$_cW01P01j2FCAv6wvzyno78u6COZF4#NOqH>8jH#cNdWd#V=X+TFe^a?<;~A zLW|J8AQ&gr1k;{plVIJlQn2>%h7Gl9WCto^-YN^G3((cKzsuRAUQ_YLl)yV?gxM}0m_t&f25hsub!r5LKG7Gt6WV|VYI z=;QrUpv;#G!^b^X|CtmK3ud=-@qB!CU?HmM)d-ig%z>?p-Q60rf&#A#*1ML^M{hRA zd%iKofxw9zcKr{$0@gJ#Gz*?3whQBZoN$c-A4RjK;Zm4RE_8W8-IG#KNRtLP#{e25 z8q8R35vmP}BJaiuQdt-UA^Y-RFZ7XAkwa9>C=u^J&qRlSao4VO^g#7r;we`IA7nzHM<#~Yy`PBf?xm

W%L$wCGJSB{G? zx=rHCh4jYBG?clXf_D;{s7_KZG5DAb!^`5pWyy9jspUJ>++Bc&1mWnmpospSHXdx| zl)yzJe~{Zk$gV^=y!f1pB}d)REJ%b7o2o&`dFF2_wufb$HJtq%JWe9K!OFh0GKCmkE{EQz_XPG~Jzx`l4^>tLP&{bWai-J(dCAqOTD1z7FaT zlfiEEl2H52QRX#yMFzrh;bV3T*xlPeoDPoC&DRQX+T2ids7RpS2PC0Zz7z%y`G8cL zKAEdH6+>6^Fs0uGQ|q4y#|dY`mN^1&E4N{LSr3l-RznVtkG)0G^2}*wMJG}}Eb=YEr&q`0MWuXF zr4$ZbT?NoKW0*YsRY|>d;_#h84sPmwOeNkol4(ULu<2hKyb?j8YkiCE`Im{e$0y=R z--C2TT|b#~E)PyGih@R!6=W}G463~@!rym-Q25f1UTcwtkDp3mzJwR(KA1*o&rHMJ z>U^B7?1YUw&j=+QG8(6~}#_sxa=Dz3vpg3yK{4#j$F;jKtRv%mdx@e)u^OYm)}7Z5H?&o5@DOeR_ITHtU0p$C=GLsayS5 za$dXu7RyCIMFW@2RvU*+eZ`m`8Nl+a_Ea}t7JfBw;cA#WJb62Toc5T3r3YBYy_o~* z7F7sO@7IB?jNQ4WS;E#sdDcD3CK$-}8J1&=QLM|5bI2UPhOxUXV`hQ-^iJVWlQXt; z@{n7nga@Z+ki*Npp**h?8aGb@m9G)hy(Sp9EiA%JF%hhgSxeR~kA@@md4RI-h|K(2 zdRd))x0;%XO2Iei&W?7{@`JtcX~~c$Q%^3Od`8nmaxv{s4C+qWOm|fMCcCW*q2yX9 z2ozGuryml?=`BI}#225t&ZRfC72$jodk3prL96>SXYxZ0{KVMZkfSYTZ;29K{-+N< zjNP4cF@v6sCg+wrnxi#ici5nh@2v_sZVT+-*&rYOm1}^v_7~we-XiSR zM{X;7G&M^tZ;AKNlPxI7W^%BzV&*E?Drn}_!vMPW(6DjH8jVSi*1bf*Wyy2N0z?}8N8 zw3Om_7jKMBR--XDm0`gs58Aq%fP3W{C*5xrzDr_zT}OhewUmUzp2o0Zl<_U41yJ8~ zxGg=-3U@Gem*TC1zpqwtBGnhdc*gDueyak1y(GQ1zyq)4bMd094AP4Z?NgV~GtLnx)t8Tt42J0a; zTRR(a*T;i?|8DZ;`2e*$m5=jlBCuoYV!CgpIJ9OIL#s$2=+!!rA4_EL*^<-__P4sh^jHD^Wp9E?5A+*PJl_{^uOO=RVDAYe# zMHdR9@ZhdIoT1rABgWPczk`Wj)0+u@3OmWj{tNW;igZkQn~Y{N8tLC%Pe{!H)_1ow z7ToPC$jaVfsykSKr=!A9ur-tFk4}I)$t4h7<_DJ^nvlJw3fOJJ!|cIDI67uT2w&A< zh_SoP6YOAd*%FS@EPV`T?C#4{bL{``P3yr9QwU(}u6MNoe7qPZe4As7x7FA^Z^}&O zTzk*yy~=)<16igX6(K6dkQV0nV)sqfZ6=h!W|JfmnG_14>4mUe_cv)?yP4iT7=t^l zaL%#o{X$W1`VqWSUt0YDF3}irHnGMKr9I_Z>ytU&c7j#CG+4)LNu)2xQ0Ys z_)Bf47U3$15VZOfPS0JR1h>6PVf3pvT=}g@Jf16I(S06@=&~HvpDy9|Z1#QEkKN!+ z0{FJalJls@2=Dw9U}FCQbZRQI-rdh`k=q3@-bfb~ZKxEE#5iE%X+Da*o`F|>ij$k= z?yU2h3nP=|K+4>P4v+YwMsG2Sz7xmc?qcF}Ivmbg6u@lrujG%(E~?iYhtgNGG3)*V zdR?l8JPT&IyOn9+B5Wh$T)XJ##VmX~g|WM_`{}CgPlOwp4{c_VP|;OJUb~FN9KB+E zwl5HM=ep6q>!!dVO)j91Cmfh8%QC*x@q7&1wfH&W!0UQp(VN-ugt0qq5o^}z@Wy&M zormlm9z*JxW8%I9=ZFXg#xZucu0tCpoop4l*gN5DGd^>qtKhGXQ;D6i7mU0pg%ZEX z;PKa+E=mi+*zO{HGIk6qoLEK@h9V*0E9)AZ*H1dT4pPJB1XQuj!W&y}Q!gHpH%jSn zvONV}J2w%9WxaHleGWdCjKlVr?Q~}McapxZ0Im&(0g=xm>*tKeYP}Nd)Aq-fcuQKl zP#zelhv)m^5T=UJp(9pQQ8f%klsIcxfprkxGiFXsWcw^*(7xi}uvsd+)k$l+3b) z3ZWt-nW_7ItgOuJy?qEtNRs-#e~;gv;F0^f&UK!z^L*XC@ItV2k_(#cW4ENuia6A* zO+>rAV6{XM@G988>sS!<<_ldU)MVCp{B)teScB8R2$ zl421)toFi1YqV&}cSTsgwgk4ma)x;Z&$!){>NuXTJJteQTt3BJaA)@%IM3K!Y`ZDg z-W+J(KFt#QZ?Qh1ak|*;Q^AcNwS&{$Jm|PK9YP|7so<^~ei<&t&%*K;e8`$S_wj|n zDTT18T>>8a<V1>zf&G4%Q~@M>C@aw(b+i4*qvq$hZh<|1u8v8!1C}QQOaiBV>dgBEo{(! zJ0CmhXXCemqujd>ju08hV_7v7NDr7qQ%V=1-#nHFcAkLEx$b1!4S(=cDgf?<2+YnZ zrNQB0c-AQg&1}C=+xA^#zG*C+Xh?_h);pxhUqIK)Peu*PB;1*Kl1ijJB5rRp;D88o z4OQ(R+1L7MoCkXjt_i^%nZ>kIPaJf=unwtfzHsC_Ax_IC;>u&iXyfgU;YUR2RE_B{ zV;>Ld^&Mc_;3iHgMF%r3^U*iL3UlkzJ7#E_!ph%#@U=03syhY(M|(hd#_k;EYT(WF z_qk_u0n~S~oBA3hXtC3zc{9DREQ2xK<>PU=VHgoHWnJK7@ZTU|Zh;_c;B(cNzUN#RNRl~Jg|8Q;PuCPYC z1eAWrL-TZV+R);I%PUwH(uuK{^C+EsUA-7g9_NC;VY?C{DOZx7Ma%4?hl#V(aP5Qy+=q@oX^YX5G%_tb@%>7>9P}KVajP#<J;+sLDDy z;FlJ|a*>7L;wVXMGF4G%IS>6mJK)l}wSvRzw85~M@he?xb|+hFljCiIyV$I2bi)vr z`=~N?DD%TJb~hxi2}@!x3Tnj}yZc_kvQSfS)loG9?>!)Ye-We?OM}j#Ai8UNATC;! zkB0_?uy9}%;R#2;a~HPH5dTPOXEe~6O{|xtFb$oSchU8Dj=Zg4ecUe?Zd;SVW4!TK)hEE-Y|A|Z@dMJ7wxnNSEUX>j~s9L(I>NTxJ?phsl0(Ksvu zFJ-Kuer`h06_^iN!a;ETa0r?FXgrQDE5dhvp4iZ?PSw6B!Rkls#&+ofn7QH_XUfw+ zH(!=#5(c#2qb*oM3}O5qJ}AC4fqTcwI`r$Uu;UbC@@TpXI(PqTfh$y0+EP*2T( z^tOkjQTG(p3Qxj7HMZNmaE9u~b(5qk>F`=I7Dh*Q61Nj9rxu-q9}U9rz_Ddi$ygK? zCKQ0}Hhljvn)zQq0yx$SFB{guX`etMMPCmN+vPO09 z^BoVD7{g)qjDNVt2`lDVH z;f+igJUGjb+|Lex-M8{VVL%w(x38eBS>br(Yc@Wd+(V~)-%D2O#=(I%X|O*378%@u z^z*b7Jn5H+!LF@zwC)M%T#yMnM4~}Ccq<8P8K5Wkv1femVx0LPi`vSLg`3fZFn*dZ z+?!=ZTD;_ObX*CJ{By-Ut)qg8N@_6U84vDIJ2+vtjEnKr#RH7p&3Iyg^$R-NFCRC9 zPR8#11LlC~0&jtEyDfIjV1HLTb^LAjl1o)}fjhhsP`RrB<0k4*g?nBYD_Vp)2c+=x z!#J}3VGuMN&jZ24VX{AKJ+0jqi4hyK*!%l6-MZuuX_ilbKKoRVnthpk8oEsPUrWXL zh4C!YdVo50y&)|p*lqRa2+%xOL8_}p=(am~c&$1JBa9+x$*XZ-;Z_7^H+q4Un-*~$ zQN)U)ET?ASjEXH!1z&ck!+c=b{!0Mu(`>k9hvuN%Pd=)*nBtlku#tIU2|kS7Jss77 zq3E|~nD{(*gNxn8a8P{`yT`Gn$3lFuQlSuI?}_8a+yc^}6$00n z=fY&opXC1Z?KJ3MG_Fu*GvA%ZlpAw`O#hq+${UkmO(!L{U2an@iruZPVeHPRj)uJH zA=0ur;D0s@?tUvH7N#P&y(b@i%mc8f(1R{*kcEQj#jtK*5zL5IAjfN`;#oT$?v0v{ zU#d?D>L<*G?~L8u6xcwI=YKZUzl^Yt&AJ=%=AuMPpG``i4TLgw*RXvy1phfI__uLB z4qLLEn!XCQRZSvS*Diw8iN%l{FAKgW+^Nw6f2@};!0w-8uzJaIq7xqmdiFW+Q2h&O z!#YY2#^SuXbPU{bhlb^_zPn~N!{0~*fzL@Iy!sJc`7r~lS4Csn`faQOyPur*$%WR^ z5VrHrCo1w1Sof^}%g_1ZlU71U!zTg%MKK(4c87s9QBr3(9V?M}?0EJlUa(0pn6Cp< zudwgp220r0sArR6Xo`-E-5oYGz<<|mxxK#uKDe=cy`~0;On4wLj&sKR#uB`}P6=-; z)g;?>z2GHR1c&Oz!MD^f8pZ|Txsg1~pFBd%tSiZ(EfMhIOBOWBz9W0QnrPkjcsw*N z4Mi4QrS6YAiDww=yZaRn3-2E$-KSpBq@XNp$%(`(Z`aY_hePC8c^){K2E*lJu|)f$ z6xO~f#0^&7*!xtEHpwc2%_{aBTCWs6%khYD)%>H{)%XT8J> zUS8~JKe^ZfCo*=|@l+RKdl{Gc%?|p%@Ib#_jqOW@1oL#=aEDL{Dt=)1vZ3Z=O`8vB z`xe6KTM|&KkU__7T8zK%)#|dhSR`xTpaW|)sx!0AL-T8*|=j-IJ)Ytpd*38aCIN!OZNjHT-cXPJSu~Ef+9S) z#{<=WD^v4UW!N*F2b4NNboW{AjlnFu&DfpKat`NfDhNE(jDY3gA=uRzRv+!^FfFu3 z|NU(DS-{xcx&7SEhmNo&pU3>as*ochOM71|#B|4EOfi(hqk7IH#mpZ<0@z-+Ujzz$ zmr~J(p%`A3gEfY|bVO$s1=8{phF$p)6oTjhS9}tNZ8IULw z1FnZ_NZYeMS|*Z<<%*&BJz1GL5!K3zQDVp)d&DGZ(1K}D zU&n*#uN`2hwT5e~)<&~-##}S3u-m)3<7TJ{usl3W8EXjVJ?01wtR$$+*xmWDn%MXB zI`{bqV|U+}PpxDMwDzgdG0Qx0Y;F;%q)Fqxo**)APav#!Vt)8>Lf|lYHC2_2z{&Ho z(P+j8dQE*l`PLE#LOE&hEv}2~KY4+6^rv9_qXfJm+(M1^z96svWipRLBq)e)CZ*kj zwA3mOci#xcKPE|Zj-n(;)EB~n*WPd@(TISW0+w5q;B#A7jGojh=(;lltY!Id;>SD~ zmd@aA|ItIG4}1(?WRCh$^=Ix@nnM9&cc)J3!G$xqg8M7xVYmd_XRMil@*n!RY5A_O z+N%UsK9~$*cji(jc^}N#ScqABBvGO&nHUc;jQ{a z5ZdcNS8egb%R35~(^Cx740+^)LnyfQU!sMZlxztjt|1X}QyvAgshYiRO5Ys0NIMte5LZ+~Hk zE)T|WY3G?oDCht3yqeI}a6u64?}VG>d3ZcQ8PA5Q5buW`08fjcEJg-QV*KbPu|S-( zE+0KU3gek4D@bHfIEe4ehLuJiNtEn98b3Dg)P#I|LTYbs-LW_%&OE%(9ggQoO!m^>uOu-?y~ zu3&8coBQ=y4fi}}{*2>xD0^Y4KxMifD6?5t?v@2}k_oK;pc$@a?5=CQJ|1@R;mSL0 zp-_!^^3rEQ+=>4LWVQ=#DK0^yOA5HuZVvgF;>~U?i=eGq3QQH^scCdDrl#fL$AQ1p zcjI~@y*m=rNER%Ve?^Y&JVaflCZHlo#XO@ zw-K1Mwt@y68zJ_S^Py8Z2!ww`kXK*Fp~IpgtoiMU$CWjx);J|NdW_vx8#sf#&0X$% zk_Pf^+09@(pwV<|!LLl#Au-5@m-|fNM6^A-=VEY%7kB{#dx9+sVC_g|lw zAO4XT?fKw_^KKX8R)tBpK*^Sr|L_I%NriChk~nNTUO;V*hTu1AHtUZ4Ndw4svZ^&2 zPF`bmAg|n&s5u)4`^UvAaoi#JKYdofDFS8&8K}|Lro` zZ^xXO+67=$?+?$dJxIqfStK)yvBZB7)*DQuF-=pU#(@Vb1Ls4e#t|;$#cX`d*q!h( z8(iY`zN6#xTwr;4P)?o;$w&G-%>LS7=}JBx>6(R?@>@Ci+WDYt!Gm<)sW9B7NHw=E z!o9M^I44*Z8(w&lp|}9}F`)n^|2GB-b}gqxU&8Rn&uq+-_(El0)RDujvGBn@9X9Q| zMJ8<$(3{7TvFB1EiU%C0Ik`{Bg|8W`Z#5c>Gq;i6i$AH|PUeR%3ccm^N)Q@nZK7Md-MB z9O`e5B63|paCImTPW<^tdIKxz`N?c%*q(*rKi<-7=}lze?s&L;AQhs$FO#l;PO2G` zio)OG(ftj7;C+oA z*{Gt3nQKb0^1U-|NP8wwyD$^XwfUgHw}ti{?%aPn=Ac?HyV1=tLzfHv)*@3ZVIE_5 zZ*J+r8>QufTYv13cbA7Uccx>;<{|Ebi5qB-6hqlqd04mBnx>!k!P}mNsB>NdGtF~I zwF=wo-p++r69&ldu~qbgUNrjaW}@r$r}WXURx-aR5jGDegGE0jbuKq)$e47D7mdSH z4twdQQ$3_mkPYvh!Xaz?3X<$F1_$4^|>91E>sm_ZGo6QI@McrA&TOW!x}> zhX(Ra*b#hMFwTbE-7$8zrI3S(T4J2rJ0twbo`c`P7)M)f*a#cjz$xaK_?^tyo$Ud^ zh1ZVg9>lt#ZB_7!oh%9bxDZa+6@#M31Q_*mr<1`S1wI9+`(6agqe@Bq;xKr&F$dh` zd&!%FyO?7(7V{3I6+wrl4_sKc3eqtX6Z@NQ}KX$ug$>u5-}*(xPzK^^pie` zT-YcZ%I=_w$s0}9?c7s<8o_?3JZeYlizWisS_~5c++o$FvE;MQG<4g`!-sPnP}Op? zV0WW7d}r+LRjd_!Xv(xnnr@1O?K4V-4bacSfP2pY99qb7p~D(r9DGCYux$Yzzg~hG zdzJ8s0qcKV;|Y>kMWDELJp7yxMsu42vB{p@H2xi>SJc)L=hYG5HZL3ML_d&`dHdN; zFCLxc({QUx7d5$efz%DAfLTHURE=pS!}x-J8k2<=e@CL|zYR3v@DSNg@*t%%7}^IC z$>w26yz{gW7e4YvfvypqtFHjBBT7J~+696(f92-9n1S0Tu$gGyJT#shF4!)n533ow zGjlM9%XwV;7&i+PV*8AqCUy&TCXdUsv;$4X?q2X_fKtqN!JVb9NOel^&>wmHCu>Zs zRefO2xGkf-WMTumyL*-iOWr&uJFd0R$0CXNH7N!A z`p#1=z3YVTNQ2PQIN;~h6T?>@=x9tfN~1bm(-CxwctC!t!tKLSG|zG&7GxKrt%Dq1c4RRL^x zBMNV4Ev0+iLh;G_9DFyukG{&UCEJd~06!`NHZ|WTV;$RQ_w6JsYe~Y!6=&#~tb3$R zI|F{Kj)56ZYsibQy|nN|4!Z9N#e(uBG+bK@mZld#`mi79u5u!|E9FpkSusA_wGeLw zOVejbsxYIBb(;1$KwWY@mw#0YuQGP$X=IJ#{C0NisWJu)HpffeH3V{3MX))aU|%-# z{=d*f)maz04Z;iHQa^K!Emmd>Nri5F<$*sB6rp~pG`rdNBQi39zy^Id@J|T-`>=}2 zm4@TO?b$eX@<-}9v4I?Ih=Y=cX|Ocm2HDnfj=o%-g2rzWFjlse&TD!`bn7!=+L9>H zH`z=gJ`Pa$o{L8oFUAYGY4q_JNytuO9@P>bIM8NFB)Q4>&y3~NdR)=!;(#D}Pz@Re zc#tGw$2{*#x$io9xQns7tceyVVtD_IZkHLTGj=DnTpzUN#Rw+m&%@b_-DyY9#A1_= z+@TvTAnaEHj%^B1zH|=#oa>ETS0Um_N#y&)GnZ*F_$1}QO_d??qd zOHP6*{J^iXFyrbx$P+t1x}?dvZ@5Rn5fjKjd~!ZLEmLj>*gSr)j)9}OqE z(bCNmV2mc~VOqEd0{Z00-=!+JCyIy3i{@khzGgvM&unnn$$V);)-c)ot<9W^bCKoY zv1z{%`uIw5qhcJaUBQP+oCQLCX9Tdo2`$Wc*uG#YT8~pAPfa|a@+I4eyUPMm^`d3z z0l4B$K1%-+Mt{1TSp5tG$-Zo`)aW6XU+<+m+F~(+q+`H}TXb*@l9y9cVD;HVP_jBs z!Ztmj+Vfc#l0-Bf^4>;8UjHO(_vAuX|6*ACD~mW7NT8Q~A+DI|i+ko<(J5~7Q23U4 zglgU3cK;}MK~4=rpR#X^1bckQTP=uMqzfU8-L<{60Q&1?tc==A|HC$%Q@NOVD7yBF^g9CQEL4!9TGgcu_tMR&+@H4|;ErpE^h`cf6ry z$FlH2Zv@6QtfTj@50eeI@?dmV5Pb27B%&hY&|^vw23C3D%w#Qk^|vB4R+K>7Q)h_l zd&(W(td5O@bzf%NBB^#0Y-^YUpBTHlf5H@wof~SG(XhmQjNQEy)5W)&DmYzHdlwom2D`WM@xrdz`04af?!V8DP!hm{W5z0Q zDsK{%S-A*DpcvoSO~Cps?&Q%Oe<)QbfV_tykg}|lnn#CWiDM2L&iz8|uk9l37O^0* zFP(AIJ0vhxK({z2qnKF|K8`xc?h7B0x9>AxhENP_S-FFRKI*5HUb%=XLU7Q#nDULq z;q})7_|xeNb=`!VUpW!4A7@=i3*B+-dQs}8Go5vYu?&Nj1FUJ=#C^-u!A{2RdV;N( zBR;j`l#VIHF?P4n%m6+vG8B|AV7WtgKAPxhVDqi}+=UAZpypZ$EZnNZ`l2-H-&tNb zHoXYX@Wx}EjI%==Cj&<(1x1qwxLYkxo2pkV%6#epgA< zwN6?-l7jV-30U^-Fm*ZbihT0Vg0Q|w2zjxA*j*i>d&^jVqa|~voKB?Uq@+OTWg)18 zH{ANGM>MSzP%F9w7i@6BT{GVcR=3QA0mkke&F4YqiZ-zsL)+!?EEsW3hK1?U#oNpS ze&~Vv3qJTsSb(P42`j@@=6I6Ly3PgqxZXCFTeWN+$TN1gX2T3vf9#_m<)I56_hx(D z=aW(I=v?xZ?+p@H3c=xkB&^FzrV|E((IPGn{}URbC1*C0RLv+j`!*A z?z2pwsbzV`mL@9o=}bhsQU@|g z%#S%+*iFVAF_vuB#6htpKm+Kf(yOWH8uw&^};Z6-!l5lQWO-D3gF)+e<-`_N>0>Gz(Vh0 zoY=e&#~+oWb(JcR9K(a*J&v$duYp@KFdMaZ@v-+C+fDnO>nJEUhDP>`pIvJNBR=B< z!UEPa)J$$+0Gzc846sKs21_SqtA-aCRqkIJNgmnf&jR9gYC?} z3B$~;6_lSBj!~bp@rpzbl@r=WzRih)|6ZnnYT_++2ZZ$fj1*kyortS#TB&o(6XNa4 zc&tz~jJj?mbLjxR*O1HH?29q&R2Ka{c`Rhb7Q&$^zJL-|WJaJo&Xs14i9fFRDQi@~ zO;dwcjNP@g*}+bcWt>B>F8*NbuIZiy8u*@T_c&t))r{R$dd-1?OWp#Lv$m+g*qy75 zI^JCRl3Os-1x}Tez?tg`U}mI4H$3vf$77gBXpa=07>Hxt7(vk3ng_$8!=!uJdg@Zc z?w8hOp`iCQ&0lwj{7_6__ra-9q;#3&N?xH$Z=~Yy+<0_3d4OgBA9c# zf_T=C(9L(*y-8INR!oefQ<;$UjC&E-RkAFyz7}Z|Rl=H9mQyovMlR#2K&Va~UIQOG z&jO_Gw&oVJ&O!5^jBhoWqHWkbo06B7AkNs`pFcWKJgHL9cg!9=Pw{Z#>FKC-PMFBO zaf9+Z#bBa33EZS?=!8gLd?C;7Xm5z4)s_NsN-qQsmga(y;!pBv&UX4=b2JX9W#YyQ zkEyua31Ziq2$R<(vwM3=Yy)r8pE2opZbdAn%&epFeLciaE(ex$gu(h(WklG9<^#~qf1hib*}iN(sD>DXI-hf0?T zNZZk5P`{Q4gN`SO_qIoL+UE=$S00Ufmu{nFKl{l?KbFzvhrk%qeA2Bff&SkMP*32C z*EbQG6E_LYykr?~2Y1-%A<7&|)6w@VkKMM}Gtq%w9VUS|oP4(r&AF*8LC z#_oD_3~+(#Jns20Q-Uyd_iefcyjl1_AfN1vu?;26-KB&hyEVyFLoW!nDT49U?VR3&v zWL!E-#4o*~FBfN_aAqXd-djg!z8@l|R_4K6qhJ``97}HWN}<7M#!DS~bew`B z_^d90r7K*ZZ0|enO~*`Jr^PsanJxM&gbETL>%%w3?pg}X!0U8x``kziyz!Xzrrp;? z-$!NKhM#s|$=IFvE;ZnMh6G}UZWuFKj7=})@mPsD`7ZE*rQU@gc2xp=O*5!W)na_v zor^Nl2WaV^t>ozOD9Bxs3GbggB}YHBQi=RT{1ujhZ=ar}_pEP{iqSN9D-j2S7WJg% z^+&4Qo{e3u;W%Dp1-%_E3{4IBFzHSJX=SKVP(JX`NKxP0+{ke1TID|r5~S!qUM$y+&QzC zF0a`|P6%Q^NF)Oq_uM7Ckap^6o6NpZlCV4VG_~eGAPuWB;H^{)z|IxZ#){laOGyN3r8pE*E(Rt>kY zP8&D2vmAA@6^{Dt=tzh*0hWh{#bXRX{?;5pUj@O+2vkU4M7&=O()BiZxan#znkgjF zC#sULr=gJDF?qu_S0mCeO978qm!PbTEAF`WRq*-Y4EV^{T}3a;{;W&q_KxbI2V-|n zoz2nFX3H7h&E~){cK4=5kNv;M69`u@=ao1gHI~m{{W9OVPsOef>sbP7w5n(k8+t6u^n$s!zCWia?JdN0TLKv0Oo4Z! z=ZUxKb^5-FIlF$uG3Rq5J$L5=sfx;m&yU05y8Rj=93g~T!}9S1yYGG8z)G&5e;i(O!mv;$P7dv1`73+i>QpJlEQsl77Lcm?c5UV;7#Lhd=N89~Se0u?o zxhRG*i+H4NK`1DH%mKYQePn29HRW5ypkZtVItuR7pQdeOZ9)>LDkj6pt!Icx%stvO zD}()BV{p@*opk-nUSfJI2V5FLA$R#QQsE?uM^XzIH)QN?zY~4m%$%8i#jt1JLU>dp zO^Rbw(KL?7z7HKy`Eb1;`hpfj9OlEs_tx;-4{csbquxsJ;mcHsBKQ$11FB_y^rB=S8dNaH#A{(3__Tt| z;fKS~>TJkV|40^3+edGh#-YcPH1y2BL3h=iBcErdfTDXMte@6OnyR1Csy&$~7>vRc z{jJpO$pBIPmJ2t+Sx&7WixjDk#Y+i=ShUOsd5=x0R;)bK$TF|iH&@6I{mo77SHp-G z%;$N;4hKt@3Vvzm!D7blwqLb?vwm&X%R0?iK97&X%n#q!>BF^MvxV;IY|c+)OycK% z0!2L+EGjC&o2Y<|5p&4mOm8?bS_tx6q+rw}o;t+`V;u9t&-(P2^6J);GkYW9fOQrq ziN9hut%qpkv;;J^PQ}+VJL&mhw0l6klM5F=nX0M9DEtU?$#@)%()R_ zrkD?Pl0l$AhHMX310lXcvy(^5Glc2`^isyfaP`LCP1ouPrs^VlqV67cPA zE5XTp1GvT5o%=3RNC5Zf%KplZTQT*$hV4d z+Js3gyJJh9{q}`Sxk6Ap%a(L^3#j3V5WHlW%kH{=&}8@RMB!vKs9s@?3Fe3IopXZL z{7l5bmC3kU&_P?AyGd43I$U283zJ0ah}qpQG>!S;&m0fK=5uAV-B|?UbPHgR`Qdw1 zJjnbrvRI>Dj8D84;o=DsnG1C)tZ?MPDxdif^S+t$c{dva)-lJ#VdjVTeb*69=fW8_ z>!!udg*k_QbgU8LP;w<7TbLjI?5S36!XDNEV$S|;i>3m6R-~Of7NMO?G1mFW;?{qj zWNvZ*sL8UPj#p!#y?HrZ)E9F|lMyYXrE5P_??aA#dqxXMS7t(Mr{JgcMqmnpklVqJdI4dAAW zogiJp7I&F5FY-cloP6^E*Pr7IHv3ATwOJ8tyR}*Ws~0+37GZtGI2>t>BD%MNpnH(b zx}X0Mm4r%~Hzg8-s?C!3F z4-PpT96un+4Sh1gQT7~kw=!nT;ik=E3mcffoev^$T5z9#Kyc}UBgO~vaH^FG%16o) zp|1-ebY3y6cb)(PIqr0^y+8JQ6=2K@5xkyRN*uzOPi;NR6N~qf4`+5!Bhy&yY)Hpd z%kNU#qIQx$KN-v{lVE`P;q7Z4(C?KQNJU~$Xxk1tgZbg7N#(-caiQRmRZMF2#Bs(K zmVxv0!-=o#n6qUfEIn4ta>njp+BlY|1y4hjeLTFV?|>7AHVM`o)&}B9xa?pldCw1qmh^E@r^!W%xy z*X!XTi+pYyv4fOec9W4m177a^E~qJY#f{n}2tVXe#lV>CpWy@RDwqTJ?;hH$e*xegzf{$h^3X*~ip@p$KYiSc$)O?_0ZkjbdKEdu>9ko&1X*ZXx z;|Pli*&N@?IN1#;I*TksovdP{oE%;jawO~a_(9&50=WHD6y`8Le5FSyHonclZ*qO~ z(~4R$`B)4*4`s{($-Ts z7`rnR_41a`>4svkAhQ582mL@N(TNOH%3)q0dFI${GwADKe>c89L}QGAFhBf~X?&Cpor$hlA31)v3%u|tfj6xRaB%k=8dl_u zjaLg%p+yp%vf@c;dN8;py-uPr z^%`w9Va~39armR)0JV7jj?~F!gL7B}sAaAp*Pe~g*;Dh;RyYXDnnUQ>S#nIB$-vAbOlreU_ZII*a7hrN4?K|*8_Xy)3|y+VF?vECmIxB;5L6W|KUm{nIArURxuPfF9O*+a>Qn( z3NDXkou)2qpApk6i0zvVZ9Dmp^vfC+`Mt4uaCI&=vwg;&Jw|x+t`z4zjsrQy?$))< zf}eBR1*Yy!=xoZvOuMOAqN7BzEj>WvMG-_g$%4L(7wyOnz=OB)5e9^D+l}Q!W-tr{ zU$a41p@)I!y;MTLy!O`V_#yum-Ms_JW9EmC5+s7V?s3vo_k^BYkclnK55L7>8-3UN zlQivPvu^KVc>ExX44F#c=$t~FJGQZT4aS-r5isn5G!q(%g3wP2my}P2445&py+Quxf{r8#> z=7*o(6puP}sTeW)GTk(EnN(d%1;zLAV7vby>A&`d?m3Z#(?7FWw`v{T^kkS++{uGC z)j^;%H9L)Ma-@5g+JZ2sOE?w(8>~c{=gY3jy~nC>`+HLVE0>Tw)k7cO%Qfy z4ovvT-pwthpm}1Xy;R#0{TRC&8P&m`-4&d(q&=80b{9pb!=yT4YR>%dONWY4?AIh* z(qm1whxo!ag+jLHkpRohIdq>^2;2APV!Y;0y3?$RoH37vt?HRz+WnZk7d}Csma)C( zk7VYjr?k-JHqnb>>}x0%QWW3t0qgyA@=CzA(A1TIQn+aHR+?^;r@P`)p z0yuhG1deSeWgb`7(PW>4x*A`o)8k#lz$O+v>(b%KmODfuM?iPECF6f{lW?-{NqT72 zBU11&1GWuELs`KNvfx8MJ>ip!S4%_C*R+^9hs2@kTLJ7k=L?&T5n{7$BFdgD#x7@f zY)KcT#fH=2Ts@DmNe3t?+{E3_*TE@Q_{jIOLdDkP4mU$nn9tbVE#`-}ylE)7G|W)A~lRSaqNa?BoD6N86_PH z*U~LpBJj(XEd0y-@YB-v6TLa{AQ+bhK?|;ufk&M*O*j=}!V<9Y{$a{H^@@Z9Wx>fW zkzjsn1Ce|tTVFw8Uel2wE3XZ>O!pDKNdes&LBTFE(X#2xzO_UHz^g`LPuMo zP^L5!mwb9kGk&*_mC}hY9GLWT(?Y z=HxGiO@?wXsOwC9%>41=h62>QDT>p4my)j!LLsFp2U>o8BYq9FRO3txo)OEyh@*Gu z#*8+i`XmVq8rxAUOQmM@ftxH^TQ7{^FeHeH5?19wmB5a zT+eLQJ%7#+O+2S@t5*`3#@OA5u`G}M?xLVz0%LdISSMV`6de4fMk0DWU}Jp|*r!Ru zmrp^ozmECgm*=C{Cn4;SUri(=BS3vV>l*C&kCZ|KecBR-aye;ikJLrGpPeI@Hl;w( zqXd{WriDD;^MYPEl!;n7QJ5~inHqHul3c4iDEDB`_$g^bTu~DL?JH#c$lh2RY)Ugd zCIjE91cq!~;jw%#_giWPb}@E$@5em+JRw7HnEBzOU-03Pumy1O^;WHw=2*zsT|t&U zUZ=6#$rbY;MuHEC%nu*d|50H3%mvqbF<0t?$>?@_E{Q1fX8YzssNN&Vbl1sr>Toa` z#^j;Ok3Y2T>P9k2I|^)HXTp~2FG)~WGacTXfJS#x@R7^~T4r&Lgj=P7=;l*Ge$ft)8r}yLWUwIK;bN572qE2rND#6w#C9o`S z0eDJY<2+YuV45G>4-OElsn-(xb~Xg3Kg@%7m*vzBmv@XDvBG~Wr{<`rgMDkOxZpeo zFx$q1L7{2zBv+hbl{?DR6{DoaM0DEYKoTVVVD8-l`0tt+v>EW|PKQu-lb4HXLj81O zW;JG@A<=a{MZ;o~kSHf(C-cLPly;M=H<(v=Of39(wUgK}Km3~b9MsVY z!zo*q(K=gEc%IB06YKn8^>J5XwSNNM_9@1A=7&GOL5{xJtO5&T*==;KBW(U!&*crz z#wy0{*7sQB>cF!dQ<)!rH+#nCRvAIi-Eo3N=Q;eB!^f1vtoyXNo#XtSSjPts9w#V6 zdx8=@XXk-VPuV?7j0_e>`w_>I0QlII5BVR3A?(Qt>dO4^3ZJs^%%4wGN_HRdnHvWh z&(okO_!eCSkEB8!D9FBswHe1O!=7;a!pNnlD7GwLiEV@T! zEGWeng3DxIX!>YLHipS#v8K!1Lk)I5Xa0=icCg~&QcgWW7s)+7&b?)U zXCegckI)R#8N0jaHU|U_K7zPTTP$IIcvCZV=7@U9&6w>1HN_?1e@Ou%Jank^b1!rg zF2YyUQp|%CPcFPDS_8HS%au-2A`sB z;VEP6Zc-i19O)suCgs3^(=4ZUr;J>)6G4~We0)A90Jqq9&@IPg!E9zR_QmmU7O^&xb{H5jly3WC}iyJ zSjB8ul-DX4sF{yy7Ch`!Q$eQ(lZez-wkMG-24CifxBJ2R?q2xg$b>Ri zS{UpjInX8Xg`9j|N4GIQ{IlwGT$X=_c5D=oj^oMj?@}Tdo1Y~04Ug!tuNiD_8;up2 z+i1#|A4E1V7dneVAV49Xtehc%7G;zR%Froc*~Q5yjhKW0JD zkGCWvy@^`wjz>}PH0;;8O0x$#iF`~d$bXB6O{WeM%X_b=cUTsBrbc3B=Q{eSe~7fN z&I48bU@*vyCF#r$f9`D|?ltnpowa&2Sw#_&*Ob7aWiHSf{f-+xKNF3$+3$tVdLp-l z2+UvV!?a%J2FWplO9%Vg564;HO2+QQZtG&A;&QI#4|BbI=E2KtYVddCkl>n$8yb!< z7s6wCwy!ZKe&>DQiDw~rF+aStPX?V(y%;au&c(TM1GIK*6>&cu1-&JiuzKiopK6ecW{8Kf9=R_?Rl)TP8DSAWGVM?A)+07&Z^7d;Y4RL zO#I=BPXTzp7XjJqrS$rXQ0&>5gS+KfKk30;WbU~b@Ds`a`^|TWadJD`!6jqg?j)2C zKTS7RJRlBhGay(Z28`F$klMjM`c@(r56Fh%{*A>n#Z?UATbaKi*bnZG+7W)yL_EE+ z829zLqy6Qv^pejsaN5fQv4;-OV^YJFG-~6wcJ{1@vqHU~?H!_tCIF1x?HV?KSa$<~ z`6hzGjNKg?)<8kv4eku{!?%59@4xgZpk^?GDz5QFxh&>~kC0{=zF-p46bL8m^I^%~ zQF7dPHEoy>fkE@K@s}ubInCQo8c)SRLRuPpb?G9{?_Qwm2UGq>(tY^l{CX!|!|_zdu2G++Fv1 zo%4L*uluxH^ZF>S#1{6nPZ~TWAO1nxIyUz6Prj99;o{5wnA#o7i!=vKb$QKm%$N)-wr{QWdoshBKeO6y=TWp}>h!v`fUKG(3x40=;` zCwEXAw5zj3owd`j7iD*S3N>gqwnL~QAAZ_Q>b1E(3~c3%xQ3b+PFZJp*_+dPA zlp;5AbQatl{*&3fTgy}R!_crf6(7BP#;21Hzhp}c*xyV5hmMP^e#$L=cykg?`x=FE zbN2D(*0-!9A{{gvLSTdOa@H8t3+IJqFXAhma*P~xqRv=5KJ$%mIaywvLRISa6&?5@IjG8Ff# z5Vy=Hdwm1-$p28oVTWayzquP^mFL5I$0E`m@->rF$Ty0>l^4tT^7hXx zp(+EycLsrO<^ty7CWTqaG%NYzgRhDmcx)A!nZD!}Z+C;@fI+M}P7Q}gOYr78J5)*B zBYIz-1Xn1#JJo6l3Uj#CyfPyk|Ce_EALwJ>2XcbUZ2`Wg7eaZYIcU9$LI}Q{#_R{>d1tv0RgaW zP8!qE8i28Jxu_)Z!nx;6_@?-wa7Up4u6}TaW0$)G>z`wAbgKk)4ot@vw)4e1TH3Jf z8Cl$QQ(;C}m8Hy0Q`DsFPG_+W8rymc)HeoGD7$kD9}E4A-icrJXJBw{0a*shczd2M zyO%i&Cj8BXq!JmpIxCvbj`zoqxGW57>f!s2t!9z?=7OPl8ocaoWofmCc)dmpx=l?) z=b@LmBl+;E4T;bh9}OnO``P(Buetsp%F&xcQ6_OEzjWs>n>->L3Z&>g*TC+6q6lHf`j{%mBwGgu;deBbUUF{|lQ1f%B z{qV&cw^DXzGg%8Y>q`WmOE&Q76ul>#M?=`~emv}}3)bDv$GroUFvQ22?pVFyf_yFn zpX~>29XWj5*+AS%KD>VWcRnv*6Weu`zRzDKW8&IlRxMOJ5K40{PIbGL_>y+;IR^5O5UDn!ppE9~@pRV!C#2&d?+TNh#oUn_sqwn_`Qs;Cf8 z-~>F^bwZHaXAjI&0?!>s!F(AN9=_cjpUdUrFgJNztm?_)lKsGCP!7y{(g*&Ci}|=8 z!8rVDI>vtd$YuZRU`x(MLJCWUBjm#;9}{`~xp>@mJ{FTLPSHN!Q`Y`11?=;}p`X`A zcKrDdUbr(8&!+{Vb7K}))$0$AI=Qf8tT(ioF!o=r5~jYQzn$A$(Ei6iaj}y+9J?ri zju;zoo3~7`cAr8%Yay;(ZI04anzb`}OrV3_x&2G|;WX=UotTW3)=^|OYcR!5TJ(vT{U0E=_VrJ3_9b%+PlKk&xo~0cDz@;}AHJA; z_@|ow*lHHZpY@RihY@*@PCoqlAZ_L~c{uJ^S%3{qPWY^&N%Xus77DZq;b6Ko=$5+( znR|6nLfPHt7*o_c@Z0k9ICI!b*_~G16p$-hEXqmQ;P3kqtg2JTPl`Q4B69)WlMmZ_ zl%VQ{CAYZgh4b8UvF=oVe2|{O){+ljbc;N-&Q7K^v5YGihodR^@cd{K&rmwaw#|=) zOJT|puIND{z%ij=UQe0Ujp>sA^Y z;kpBNt*Qkpc(kz)wcxQ19Tj&%IDyDH@5Pq6% zJ|Z>1HlVKD?`3CY_@`h(HgSoV6L40gBVV&dIdnA5J$|CuYp*zf{~ARqp+a=Q@! zK?A=j7Gk*BG^|ny7C})5dfpa-j=32ev~#Wb|i|$;LV0(TPl=|*rk$>Luld)v;-U-25lZtth zCwck2k4@~;3skPm-e=DM&Xay-tDrJW#C zhK2i;#qpu~aGA2ZZT*d5%)tY->(VVThqAjJFd5~RR0xH7c95DQfjez#korl6Kb-D{ zPgCf#$V?G$soSxZy*^O2E(Zobl7bw&`8>xf2-9C=;IF=4`S%Um*xJ((U>TGGPu4tQ zg>%pHpAB*7M?Sn+>^Yvl`XTLX(B0j_2%EAx0*`R`#A@i zzWcyrXS%y9Rm8eQ`B=2U4Gq->anEcuc(_;s(Qj>G%jey~oJW&z9c6dL6D%<@U~BE0 zokk!{@AxfO^`ZT)nmCSp_;+d4Sx`S7yLZ(IiL#X4eWmaGGe<&vvnsE8KNAP<%fsH8 zgK*^wAC{y%8#0Qrq4(Eb;48h1?_U;zyVs{<)6X_;roWfPk`Lc?KM7X2+-6U2U*MNk zC*YN)7`)VRoL{}t%#07DLQ8fSd?X)UQL3BAbdrng6@V`0N!*Zp_}au=n3UlKsd*;s zxzjLoH!ncd7tVOvzDqnJr2&dRC1iC^1B=V^gk7fEsL?_lAN{AI$@;2umJOy*`++i| z937|-BE`4`)38yEZg_pi;>Z*41dqovpk`J9NDe8(ebnU-3TL6>wOouRA6{W?G%HQ_ z2ivGDFz@}FjXSrNKRPlOjg8ZAOj9d$r5|C3*2F-XVIo}Zs$=^n-{6I2NtjGN{J^;V z-2dZi)^|uc6#Iw5NAlqdKm6q*)Ur|azu8#1Dv%eo$bpA<9>l)!0PSt#n8!C2T=t{@ zeMUKBVu6J zM3}g}5C(s>gyPvRtR#00k@n%yahm}S_LLR&DG0D(Ng>@VOn`R@HKG>z@Ng~!Z1j_te9)ORjA{!-g{7;gNB0j~`XCD~m(2lNrMYZ|f-Huq z=3zjICtlH+#OF(?z@t?K(7WCVPNp^q%@yPD85E*=tTi4hcNQm{(52lvnll|R0T-{{ zwLf*ualqX|RQ*lH#E=ppYOoEY+>?Om8FffF-<$t<;eyV;@^S5FC0sYqigky2!@Hrm zwAa!f-k-?e+qDDn%)(4uH2epzwku=qmh{%sNQJgbPuLNKD&B8tEUx%UW@a_#AN=mI z*AdBZvWwn~19q{YUq5gs#SAQ~4Mw%QOZfHRJ`nXO8w`#7z&cG5ZXKK;J6PNNfTAU{epkD}Un7@3ynu5DDew$zWS_pPgPP@&oSicug-3XWF0Uf6E$~Jo)enzrrCc zb~7{h@r^h8Wa5LuKs>3O&pWOAfon$&*o)q*EvEM8}FUsze#%N-^YlE=n zwj&f@D}ZgqDqvSJp7$mnUOpiY7pBOe!pdM4J#7v&{2^<#?jQRSxPp6b3dJQK((n=a z@G_E0HqRg$-VaEE`{vi#vbM|IPbv{71jOL=D~I{<%g@>UfHWBVel94UU&HRb{>=|A z%|d4bfBcpk%l(JTz=W6dJ7GEt3Po-9)m|B+BMY#+Xa@e?{aWO;W5G?S5Zbh-f&GXe z!SRU>ex~fMkFFVRm>y`UICm=Cc|yBf4O5`8Do32?Mjh*v-A&m(2CsSk7DkjhgY<*~ zxYs%qe&?I<2GI-mEzQNxI|g96aSH3&5C9*^hi`7}W;^9e`Ib{*xOiSFS~N9ryMD)6 zjY2H+4@m%v=JTw;@(%yJhpd{6DD*Jj!vkKtV}okaq47!x{5PI}?^UgB8XK^rMcN}OX zondbz4Sa@i3by1&;KuT;-0#CzwpTh6HmwbUkB;-$5?g7kIF^I^gMBbjj{5F)DnfWs zKBRWK(!I?9*1kv$mla7+zu6XJGPa6IM zrsC3+Fm&%;$9p~h$rf5sx0g!*sP!bWlWGHT)81VC-aL!;olN*MpJ8;rTL9Wt&LH2= zE~qMM;JjZFJotGUo?M$O&i|(kyD7Vy{l^S^HkVo$uQx+R+1>sG9qcbe2`^VngH!#e zt9G#l=oq{g6<^H2cn|9GyEhEuz8JEHOJ~8;x?JekG7uE2;`#Vq0XQiv3q9Wa;+GrO z(w$HkTxdy!Al}N1pB~|68)MM+P6Ar?y2J-M++g}b5*+>>1>Q0HSi=3cyg@k~zt0NA z3AW4mlaOApJUAQ9{+JDB9)axnM>)(~numgu2g(GEf zmoL3_I~Y1noFpE1*M~65?%eMfgZF`=+Q=#kET`-)P*DqeZk7r0`L-~(OakYBtAYN8 zemrBVD_-56k1N!Mpz8%&hJ$<{`+g2gsgnjjHwiCx48kY6G_(HJ!FSBx!eT8Vz?yvc z>Q6=ThFrYB}2}?2xz}w&IIz|Z^UNc49#G?zjy(UaFBvM zM*4yObobJZvQVS*pTkJIuu9CwVwB zcrZpR^kMn){Xq5x851vh!`AmjT!no2>u=NX;K%p8@95p^tVtAvK1~8s*SqY?735DQ zQXjW#EWRTjzU9_a7C$qU_I<W{V zznTO4k7PmC`yQsbV>RDRKD^TMGz_kP!7UCSVtJ!uz?OV?|DG$XWWqIW_#hGelA|$u z<9>dv^Cj!3O@kM$p-@_|lGUF7%ZndpVcM!W`1)Ze_x&LY;>hcEj6RGfHt94Jqt4v^EdzP`&sXb^RA5oLFY+f8tvr=6Am2Xly_?9TMF7HnHn zB97rUSX(8*>VxX|>|bxT?4t|F+|37D1tmy!wBk2oy)j=g7vX$AEUwRChl~RusxT9x zdcLzEuA6vo^5GwjO2w|dPk5Pb6)XM`3o^y=uxt}&>Y4ZW+_+?XF^{r4sU5sP?jw6Q zjPBY_k>zl45nD$-{MfJAxKqmycd5SC^UfW-`YE#|$423NI|iY1*&Z2x9=m@$}oIK1Rx>JT^n^`$RPk;mq zc4sk0KKz9rIhb?G8;^Q39-6KM`Y*|6vUH`6dnsmMqmHjByVFXt!DlsV#eK`PVCZ$) zE!kiW1?MJMX-_l37hQ!IrmlxA4$}pX{??#L+1(u_P0FYm#77xUXtTQjujH#>Nb7i} zVeSc@=6P^?r7Yy!4Ca@d=3vWDdh34rk6(-~Vb3c<;ozGzFnaTf`4t}EMdZWl_f5iB z)$6>!^cB__l?b=Vhp(zU%;H`?=T}10a7XN1v_HLyAM4e_a77jjnCuT>zL89ce0Z-n zxu~r(3o|9!{Px)4uy92IG!)E$*72`}Vb{lE17&yr$cKMCB2biiqXTY~-EB@Xg{j-S zY9=L2#pzGTY`HN7=lLua?*5$)S3XFfdhHlEd;hn%(%gmmuJiFu{ZM>eW5#N(c!A-} zT-aXIA1x=?0nr|ynKOnk=Ik7 z#io&b+k2=8mVdsSp)9CK5fMzur0!}?JR7@~10NRI#Bnu;Wx9?8v>}8f@ zT2kg3H3@?x`-S~)?chg-1W-y1=7h@g*Pq-la#}vVRaL|T#ZJt|#uxtc%z=`YzA$^? ze17z05FS~bfjj$s<}1%_XGiKH;Lv}`(6aaedzV|oBguzEQk`#E* zHv%Tk+san;>)?tq)NwBxg#FUB+qbq*hH-Pud z84bgCOQ8F{ExhR5B19jajLx-%SQ=`9vjaEQRwNk%?Zd;%E-7-Z%qd51%Eu!7|V^O41~u0S=USStDOy%>G>Gjv92-*$2GYYmvDNV~?b zr-ApnB*95m2QN@|_kfwwKR^vgU8aSB5~ zzLy=(*y)1{N-1AEEsZl)NLbd)AnF3lfaycNGL?!geCf0ZY!9Js=ZZ%>e%4ubAt?@e zE5?I;&N+5v{zE>MeE77V5xC@JIbYoKneD00fP(TMcpJHZ4fU2nAvFi#rw_i2b>LrW z6rjN`AKbRMfu%5reM?cJy8+s-D6zv~dV56syOTis2ssQ-Ex{&Gv>Ln32!~O2mvLJk zgFOZbzV!kmQFgbfk-DA3>O}KM2Ry7Op*hV+%&b*q1IUN}+mHwMJO)Gi1s^_s$ZU)# z&c>GydSmi{rR>U*5c(UD4tD+DF&C@dywoZRZ#+!GK>yqPCBML0bZD+?9}DfhPB6VQ z&3wncR9w*!h9>eGcw1)|bNrqO7ySa@y=59}(I0?=6LRrj4(%{*G2sK#hQdEZawXq7 z!{;SkLd4%O*xV{X**(+oy7YW;pT0Jm-64!h08sf+YLmfpHr>rLoq?^Ns8X^wW3-T9LbZ=$$LNW5kP8kF7DHH?O{ zrv12jrwcB=MXqFDC5+FoX2s;gD-6yhub>~SRLJGZVj!lPWa5n0?|gmSCZ;EbQ&$fe z6XzZ?iAxoK`8yUrEr>@}QOg%Z*0bly)B%zc2}9miFsm=0_}8!ulsz1b6L&4*F1~%K z4<`rGHu{28t2>)>RUUmOO$<@hmZj{lz7Me5i+Oi|1RloVFzMGv_WFjUs<2 z#$%=!OZ_sZxHtLm3p!JvDk~hm*luKNKmXv{cGE6$QXp2G&EoYY{h?Dg7p9R9->A%3 z>UT9pKWZMB|cpw@qcO*hJ`S6RzUg6gg5>e|-G;XXs z$jfiMWHpg#FePj*l$Nbx)1Lm}^T~&=8{>~dMnv*?17+d<$UNv;>k09o%}(hJ$1^1b z_`AUg`<`qP;~tHLF;nP%E6Ez#)LjIDeE36@-Gzml;*M>9EyE_8gCAvgZhQ)8G%OaU z479Udygk6>Wq0!x4AgLbD9TzzlJ%kO&OW0zc1IMN>%uFGI6Cj~;njZAQE z>twd3WqglWIPM*nidj3FxXsv;?AoGO*z!Fd%&%}Joq30Eks%-ccO-S^?BZ1~KCpzk zbjY*{f$Eke?4e&DTu45=)_FgakM!cw^@E}48|@-@xr6SUk?hg^kr<~bK?i9Ed|F;D z4sn|Ro)Wq*juoK$P+!5f(*TFjTUScY2m|)rvl?P!1!0uksrr+d`QLu=_E$UH>MOxg z^5G9v$TQb2H#p4lVS$wbygup5-#hzafO`%;xz`u{)+}UyqJm-aiVX1l(9S+LZs(({ z$Wz;qj2QiZ@84L%R=UK)FT*%U@IK8t&opqI^(lDxPdK*ZZ04PBzOnqlw5QfL2;@xj z*$|6bUt}pPkdW@~R=w|8zdIkq=)IJ%BBb8jZDkXs$cX7CZCSi6LhvgOo@+ zK|U7nM2xqZLO%SD-_$$vQ4eR_)ED--0<@Dc5%ob6UJt(`uDs@mo30n&sMX}@t<+$5 zH+sPO#5}OimVLmygrfJ_?R`ZO2U++D+~8-BCfwpn0%)*li#$Fk)LGPvw{ zF8;kS3w`hC^Kffrm>y9;eMmFl_v25(@~;|LK7{tv45wkn<{(jVxDFV$6+)$v8En<| zuE_|PiZdv?i{7M-r;T%jSB}%6hm48rcnyft_$C^zc1H7w1sMKzD9%YRV)L}TVCJ%1 zxLi2^^oFJI0)+s~Rms9WpS$_wUZw2)G3vW}m>rFkd z;L~Wn=gkP1*h>PxxC1=Cc}*yZr<=35LUc1=IAMyi*cz!1Pbj_5$=E4wxP3)iU4z>OP%aGl>gUQRyzql_F_{ly0+Y17@^7DZH8LT2U~H+=P<9A7C> zgTqV6Y$hLmSdh}pg`H(mjW0#Vf8oG?9mynsc zHXZ9ev~exi%dV0SZ+JHehOpZ#ulWM6T9<&U8e_2I<#9gnQ8PPxC>2;n7~CKq{=H&1 zmvm=h`OE-ZJu->6Di4I<3if&i-oV(Oy3dNM&ZB5gG=9);ctY{kQs}pNjw^9(5S zq@Lcr%FxxK%N-WY!rUvlcyZ@IY&;gt_GSBnRRndKe*eWB$cLYIVlIjXY54wDEAM^t z2z$3a23G1Ng2B5wraScpcekK!$?hoXliklHzh1M5q3LklHxy2j4{!YQFPBx%#_pf9 z@n&)$AM-{I>V5Ly^>YvSSul?6`>BGfo)%!LvJ-~)suy*ZX+pdY^#OG;sM$SLXj-L* z_P+{o;6r2FzHf<@cP|TAPuX3}a4pDLUM31zwz&8Nb(-E7jp~{G*sd~HP~JgyaMuv9 zJY~(@2NLG+AP41dO5+;0T&8q65EKkDA+6UpHkEw%z`Ag(6v@oo_Lx6#tY$9c!{5%2 zhlLAjS>?ide)e`U_Ujvor>|D<^|I}(AUXr4O$vteQtEb&=!?evz_)(u0v#Ni=p!q%>2LFD~jt}0=TQ5bTo+;gi2i)aftC8IyAAV?cEL0CV#ehHM z_gv}DhJ5&_2^;xdnNIemG868<4S>{5X)MB~Kl&NwV#F|S40~_Rq z3dPsitGImEAGYvO7F4dA1Ia%^*(YULY*)*}mSRubICv6omQjI^s|!H$wi7fOH3?t# zj>By0LVO=?jWJ`K#rA4l2&C*zV~+_)yY;P2FgC|?cc`!RhZYX=C=r$@*}&1e5*T+Z=EvWhF|r&m=4673mFTO1Ut3cY9UW`A~u)M_ugOu zoeuXAAN4YXPI~JG#v8)Dz4f(iu2z^&+1=xn6R|Plgz)vZ9o+Pmz=tuau=0@-|6J;h z-z@XdMNa{jeRE}HExs^NF$Xj+^#wS#kl#oO#^WplM-TYK-MY6k6`M%-v?UoDv+uKr zwIV<18ISw5<8ZmzX+CprBil_rd{JjOg!*r0Bl~>keP(Ckv%El5_?gF#I`o4+-*ce- zj5paUj2Ublf~9BjakjN9_A->>%~t9VyiWq&BWSMvXRWY(krpClca_c-c!o#UW?Gui zolYS*jn{*w8wR4iA0T(8{A;8peeZ1$vg;k8@M;0rE>wX`9M2b$55G1p58uVg;roNZ ztk`Z2JpY{qv8VsBu?Z{q;BBEe=3N>dAs@bbMJ0P^91X+zC4tkF>r9P&c+uOB>Tw!vxSdd!6n$JVg2Z@>A`Wm#xKKD-$vm0J`((S z$^m=4&x>`I6F@DV=7SR$ESNG(D4Rl_8hz)l=rY2TeTS_Y7g@qm%I;XgBv@~|Lp;aq zupv`|V`bIQYnnX!J<|MWc7JP!Kqh=cf$GYrUw|6-bgfjJSlV%1hYt>?9w4?jSfk(VyFr4}W%kHU>8J!Z}9E*eZokh@O@X)9$pf=)k>X^+%CA zl7u$SH@VX93vB zbTgj|4y6O3{y{v~mkPi!p|qd>_!ocQww49xhru-R;U^qxW%BQj@U$&4=zB8(%R4Ue z67L(Vmvs_&eT{;MIs4es*0YrL}j? zSs*)2b0}FY>?gZX$e(WuB^#(`pQY#cor-!k{y{SI z{u2RjFP5{ZfM z6%?asX1&f1RweBb!Ujyj)CzKuTP^WxFs}_RGlI4B8Lza$0F>^@i63qW*p*J%-Hr)p zu3alk40V8mL&&}l90{yag?Bj5#MH(-EcP2rM!gR^wb&1IuVus2r@f)^XAytBAO!PY zr{mUV?|F~*Zl-1)1?7*E;1avbHatT9Kq~=zoMLgf!U;aH=_&i=nF#sEqJ=t^Nf&L7L z%q@U5XO*G4M2q)%>q&P)xmZ>rg9@{vS!Ww%cZaA0q^XCw9$U?4k`KRoX&UO+z2Kv2 z4>8n;f#9i$aP!j@Hqzu8mu*PI0SVDqQoNsk>-&oNV;W3u4uy!sl`QqnU%u!`7J4tA zg9@iZdFO9gV4it!d7dXMxI2-Z9j<~`s|wJUeE37Bo{APH$HB2_w2ylnVDWehVe2Jb z+)vrvvrQ)G@8V!}pu&jah=o%O@5wfAZmfb!21Y1V23X*PS0Y zFAv`|^FjSJ853>8*sar}Fy3B5Z#jEZtT-)}O_~U|D7#Cow1S49S5}^KhWLx#x^F`a zQDN5)E0h+XbOZH`&z}f3ohQW4d+pK2RDx`%Dn1%IlsRp8hj()MVC5!H_t|b-ugw>I z2T`Z#lRmgZT+GU{f?r^%pg zUmA8^Y$iW2+`lK5wvG`WxBAOeE5ngaX;eVpXma8jug4kg0-e{*Gt61W&kamIpdTvf%L{n7?wJgKb?|xW45-{xz$F zZP^_Pwy)Aa?(r)IB?tI}z0v63D+yl=y3RK%U11HeiLiux_{W)JRsB@=!@~_@BRH}9Ztb~MmUwb+AsU5Jb|>x^4v`P9oIyPse^p`cJbAvn!wm;p z=VO?%B34#9v0?{bF!acQYmfWFuC?>|+P6WNT9Sdgf41{A*S9mRYZ1`9CmC|H9@@4lS+N7|@Yb?E?Z5IP##D7*V`%N80>ZxK9CPR4zd-Dw3_;Np<7T8j*0 z7);rnW``clEcC^?Z4BFj3$g60CN6z^TPP+UzWO5>OJO6RBte5~t*1GEavomv8ieB` z{n?Qdv*9UZgZlG-Y@FvZuA>r)B{y%!yVGx_j8T?^oC?J&9lFyg<*d*Ltg;cpfV#DPN+Sb$srl#R@S zNq@T8vVQCMIWxMudz^}XSDx`tZO7Q39WfAhnchtgFS0w1x42M|gsb02p|;gNKIG3^ zwkI|nO7Db_XS19w%;<%)BC;_mbT)35oz3t6lY_S_sDs|p15)JFnO^G%Jlax#HDQiu zCU;$&8#x{%v19|kVBo)hg3vcqAL}T)tpVmjGs_#{Sxyx0zheBCXYJ(>io zDZBgm&=N92YOOBqG{S!Lju)@$W6j+`!k)(hd`v5Z_4U;4yt7W!jdQ>%1qtTQ9Em5I zRM}?o;nzLN18^P;kDmK*ALZFNuP7Ub-0qEshA(AnmxaKI_0+5VtBu(@@8-WC3j5V3 z;Y*j>eBtd2?6H0V6x+m-BY2#pUTNmz52WJv_AngLYXhf4XcpK>F0xkun5d<(60-q# zA~6@IXL#Y1d=s9NGZgfd3c&n@GiW<@3B^*>h4fs4@3&9K6IbSm^Gvm2483*r&rXHF z{KJ+b8%&W?c2|<4gXgS$1dpfI(5yx-ir-k+bL^eC!-BrIWz+BUA!Qtny6jBhERgBR zh4J%cVD_45UXbpOGs%ZeD);jS};t+(cN|2lYIDF zO^M(Y84Yve_Ot#UU-M4Iblmwc6!*KY^2ZH3NxB6f|FxJcE z11|)kqfsVC-2cuib2rfrQ#g2@Cu4$qczgdUF8wza-xkt5V@)mpkY3L=q$NXMW+W&+ zuAt+yPh6jTc%%Kn)IYL_4-M)IkM(mvf4wj5yt=d3x8<>FVm@}*y5qbPiah_)D6n^= zJu^FdNSbs+Fz=#nXUgt&ZL`AI;1{(4cMXB|;UUqRIzV=I)fOoTxM>N^tjUKzoPJV} zI&Kdk#u9M18U@l$DtuR^JC2eex6)c3!{>OirffghH82N0-0TB|&5QZXo?w)GOviw` zA9??gJ6X{ANa&fG4A;qrzj<5a4d>(0{ah?QRXxSGAAQP(|4f1L>EY05vXQ0!`N3<* zhp&$d#2=fp_)@F>uv0%5o|6xMy<1>yMM~KACLe3px!}Xhf8uLzbr9+#P!ntellv?a z{?3_#gKiY!k;Ue?@xqwesD7rfjo!MQgY@9=WgAg#xHZ<9{LcU!heww^7S=3s0_hzE zuxZP1xTL4WFWP(JS>rssoFj`V^5HC}X%4i1qx<8F|FOLrO89!sxhTIn4P(x|;t_`q zFv;O)2rf^ANh7W@WrHhxe{v$ed=ZVOHXY>G8(%Wrm^8=@o(r){SFwL>e|SFm@JCer z(e8HyAFCh>b5!#ni+p&SG1_dH>2SQWimZWpC!Cz#Bra+i3zsOnW3kpyvBO!AI-!eA zG|zY(L_Ly@y{#VWn$sQ{y%*0+0n=eiM5n6Oi!HTp4Vb--wNPN=C0`1DUxn(#G(?~`AbxmB+;3WHA6bt2F z>2t7}v%E!j_?$t>c(p4MZw}bS&wu&A#4G8rcWMY^-Ce?V1oy#!PubX2uMy!iUY z!SMNe9%y}Yhc6Z*nMUJC3?5IuZZ8LX6Rq}1OkZ5> znuAI=`eJ#-Le>};4DH1k;MLsDI@-4LRS=0s$cLXZ=K&AeSHsHOB!IIQ~~S-iqDKTfTo|2NW`);a?!l%=6hT>waj{k%Ld2eDL>z>3ntR5OA)}2VFZ? zuy7i{&L@n<<$EbJ9Zg+SR_jD(oD3miAsBmDz~s7EtMeu%*hJY~(HlM7;$$GSc>?%S zb{Ftk6K?q25z}uw;;L%}c&S(gPaV==v$uJWA(#gTQ{-UwieNsxW;V?avoXHzA1@7D z!7Mk0!UHq9Yrpk|9WSWl@dnYT9PoxGI zgnSeKTkni=6AG}abtq=9Fk+u{z2NxLTu|OQ0NRXFc+t=Rj8V=)-RIrhQNENdITZ$D z>r-LL{%5RR`Z%vth((o<1k62sk&lVI#h&d+g4T>EIB33yZBc3CkxA+J`AP`-$rSTK z^5G3)vZ2#&HjKRO#}>aFgbh3oZ4Eup_V#Gr^>GAf_Le~Za}LnH;+n7_bv(^3DKFM$ zcKC5DtEpfrH+C;r*(%u@zzjbor&g=!{3K zv+yiedlrY<dAAv9&>H?=+7G*-FEftQ@fW z=mQf5(cRrnMGP#;M}w7aSlS}TuPve(OOXWJnr&fy@@~QM1@%!MrTgMhmUv%z4=hu8v#AVJ(@NtJ3jsa~oH4+snqCih{&jNl>PDn~nN-f$!ar zfWG9zt3N!>onJLG;b$i8<8z=u7Fe7V{{=#!cYzVTjQA(*gg zpJBMevH({;c1DBionp6w2IT#czzy=@WgX@TUemR47iD+3f6OrIXX!cT7pCA%+1;%K z9qKlV5-+WohD%io@vQS$Jfrts=zKl{(mcqR*scsC|LXE{OK0Khx?CKwbs$#Ujb>-^ z{lPda3wFKv#o`**a_!S|(M~T7Pw-aW{NxDJ+!zCu+KI6FNgX?Be}k(FNqC-ocoWzC zyrTabc2zkY26~0UeB0&ho%BEc-W^x=|*& zkq>|NxCHyxjmC`9er)zuSGco1AIiTB0fz_He9s`-Q@fvov&e@xOUPyYt_Q*!-Aq{g zt%Es`5C8sZILg9w zAXGCLw3jSkx8wTaJmVZ(z1$a_HaPQ*M-(7SD<4WM-N7hLk^Q$>6{kc|c4ujie=84* zGx|(|J{9yC|Hcyj3vRW#{nQXQ|8JhL#sIIn$q8s?-U1=+QU3DABr7C!zx$%=11<$*J4uKP0#XAa-Um#B2IJ_o7W`DFm` z+%z_7Mt_tt&c(ip-spP6oHs`dh0u32t6Am(pSu4F30h;&uZ8S-ZyQ{`pjd2)oB~TI zyL)PQw|Cm%kbs)P;Eo(p4_r@^WE7wpBMLwxe7Xk5QB z5l{47;ZrAEV;bbcv!-ZRpLdX@b-v`UYSXZreE2gltN5+H|1rtqELgs34*VZU_Z^n= z`~D9c84)FlrZf~0(Ny<+ol(**?Wxkyr{`-9#Mwjb#JW z+br_%^2Cw2P~L^zd*%Zsayc-+RtlEk5`G{p7!w2;sPwCcyAR&O*4ReE%ExX02N504tihg)mF)WZcZOTh*_B^!iGSLx#@no|?lTA=jh$Xc0M zCNzgkPOHW=m|{OeQWyj{k+Qolqjhl7wY$Rk_v|2}F&}#Jl;F^l34Ad5@QSe_bP6Ai z7n_6GQG0)Q`y&gg5BD*zyfwUePbgNuOrv|tbN;;KFw-+9qg5&i>eQ~XQv)0Mq+yA8 z&?^Q<94Y5xpFL&g$%n7)S^$^Iim4CeH?Lfih2tj$V6-rnIuD0|!^>Q-HSmCG;wh}w zgL)F8^YLrG1A6JbkbG{UZ$bqzEZ3R^SJZ-pT`vsKneMvZwahSkWP+u1f;recpq-88 z$-qwMN|JqNWAobre6(Ra+T8plJh96Wj%w4sNt*&pt})|fLtsVE3g(d5A4893;rQ z{>&)A7X#ICPY4+kUM{dcJ`aYg%fpuy_I#4H5AIo$gHNjm;=jd<*tTc$p@w|;8`nOv zl-s3zdqV`){!K>X!rQ#Qw3>N7kArumaZo<*Jlj@smv5e#f{!xjJ!9ccE*Sio4IGvU z&8z0aJpF}ip!*;)nR949+#6$CYQ=ZGwU%W7MPUuH>X4nqSut&fmr$J48$~d^(^}c3EPrVTb%(K6=I~ zW2v(y?Mb+SX_*KX28@6r+W?+$!jJY)v&nwyhsCp3F%N}Mkb!j2uY1k>;}7zW7oxB{ zJ_+l|hp&{bXFGN#KzmCJ>~61MMK_*sk2BP18XJbgA8h1%e*a)Qt+F81CJ^>Z5}C~8 zq1bwa9HBcNX#dZcSB5FVPhmc+o8br|-}l0BbxrK1?9TtiEVS8@B+(@wenO`h0zR98 z?B6&Gv)yLcNO#?#3kn!jXL|$l)CN!VumYjJ@ou;n&m~lxFj~W=Unqm() zelZtHi-y96FY(+$IuOSOXVDJTPd?)N2G-A*_U;~}!pE{s*8TkiKd~zYW1AAN|MPl2 zD!iGs&Q5|q@1h{X@eteh{uSS@nvSVXp;$9vHD8w459WtuL()4xC>R~YR zbr$&KtQgl0(nooRZNjyMHgI8c0l0lohmJdgdFKIVG%Kau>XD<+yN}GwG2Sq+h3w#} zL9l6c0S|GTkBjv)(dX`G&Ucrvv{@1GCnN;ok}j;yI>B)0hGVN8h&zA%#K zdk?9>SIX|57tMh+h6jXxvJO+S~J{y_G9Ue5IQk4!$SC@AG8Q8o&KOGy@H=C z48f_-(s6BTH~(t6pDhwZ(eFtTxEi)H;S1z93=;5~Z7j~|f10Oscd%t%sZjPg4A#kQ zVViotar=r)lyn5*9zhzfGL(jaA-B5HDr;Hr%pzdb}?S%=IFcpR81=R@E=lO5lvFuMSeqKHdAEZRH5$`CwJ4#u{jo&O2*Ym$e$(Sgl{;;Yp z{=4}&8!#aTa><9ceA37cS~v0M_Y>((5smwEk8t%-&spQeG)TE03dX@}nI8G@1rM{( zY`H&K-JyI#|KWfhBAAos4xT6Vn8G+EY_H14^zrt1E2Bf=QmqXRv&GPM48ZKrOyPp7 zQ?Zq@JI75XC~IpkFz=m7UF2d&d8Q8%xpfk^CTpBP*8@m{hM5tht^Cp5()C zEEMo%XU-on@`RV&UK7 zcreQ2?C*w~{AM!EdF4i8+^apjcw!IBR>=UZ@?coJZzm$EzZ;Z;JbBpavd!R+;7P}wJdkqe#)`i+`_zv-@P888EH z?d}zfmJx#17CHw-dLS^al)OD=i=HM0_wDZ6_#dK#wLSPL8Et>C-8 z7>*CofdGellFed!9Je>0G9)FeSJ!0|X1kM1C4#jJ$(R@#!Uwwh(xY7CXfzi7rG3x7CT^hB$j&9uU6*|LAFImQt$$DX@yIls6}A9B zkq<9W`^_$|&w@we1E5(slKGMkpY?|JOm#ePg4GmWG;J)LD#{1_bO)Gu__=UyyB4ZX z7UL%J;eFHtC8eJX;233hOT$e;z3^YnxNLJgcb~q$_+;vsTqYb&KD>K3c{YXP!Pxb; zMA6y_*O3qZ<+1`Q{xV}rTRox6DHqI73<2YlDSXT!nniBT#M0Nj{H@7WrgkC*TaM58=7MKg9zK59@+)r(JRP>BwmsJvdVkS5 z*fS0Mn+zo~`xyEMi*fZ^9ZVe4B1~?wgR^h*VR?`;G_29&XSU75(nJwXaT$SwmIbi8 zRetbJNdJDe^|64^Rs6GRC|X&hYs@7nqqLv zxeD&{_6c(tM*bi9@QD{Tvh`zs@{2ZE7+4*E6G~$FYV)BWTb>Kb*E~SJ)sQjr;a#or zv3;T=COvp3iTb1oJIIIcelQDOZb=k=7-N9bU(*>+KD_hLDHj5cnt>x_cdNHgfh*H< zBo$?|@V%56OXD^1o5mO6nDvfu+bJLNs}!Lu-iSx)d!qlUT-01N6t5a4u!xa?P^gpz zo*%xm6N(%8d`sH9yOWB_=O6RBZzq}Aff&fIPk^M$^~}lVIv=zz3C&(nx3j?^ZZPaM z>x)l^8|1_H4qnat^7^4LDjO|>{IKesFOL~K0_GQq;JxWQFnd3aEqtkrXFBuo=6pMJ zzR@I!jh_I6<7n3S5rZLbwzR_L5<`JC-k$_z9*!C}nr&E6w5c ziX)bLA5z9Z*&Re(;k<;5iPC#aHufyn>mP@mdGYvgaV>WzAKp1L8HS}r zf?U%cW+n5UCzB8Fe;^olFI~zXhfBf188olE&Ii=XTv_5BIUF@94_&NW(R8CcufM7a zhW3=*S=mBrce&8;j~*Hp)17IX0QW6;T05|P29(hmU++2tjCX&peW)nJag^PiAs_x= zWu>t1tSuxPQO}d9D$I>m;r=peRJd*Hxm*P?HTrA4Tp5bpL9qibz6nbwChl{$~n8&bQ zexH2!hfzUjpO?j%^$>8GL4PO6hp)abWPwGaar|q_4U3#m#k^0V5U2rCmkVHzzcq}x zv{LvuWHJU*b|+Xk6T7aAtL>FBh1CDesYy?R@76XFZ#65NU_$e}rrNmn>OEn~N_+Ua zJ0C1JkA)8|`rO9R9gU1csFNmxv!{l$4W0h*?Nb&goc+u094O*;6Bpnf^5JJ4f6g6i zjAUzwJOJDx=2%~i+vf3KdgJ#lGpHDCd-oOMjn`-f+>XW=%b z0L*?E!5hZNzzcN|{2(7*rT-L`AsCA<*VE6#4SRGkc_=CE(t^k-isu=(6n z9Qu*|-26<@dbX6n$#^Ezwva=9dNMeA7D}QM-X9#xP%wUTRgJ`EN6N<0(vP{pdyoH71Qu5)ump1l{_}%>(9OX+TXWK_G&uZH4Xvuxq(R){@P*q`ZPgH0wz5u;{+v2eF5Kg0>NU z+|wqw;w%6$Wp_rdl--GrO4@$S!JA$*3#O@tHl1>8N1qG)vdn|DX(Qp*XJ?-2>4TRY zbI|#c6uzif!YY%3;pFlR@M?L_dcW=FQr409rz9DB9dGkdRW+>4JsxVN&^b>&{Gs-{ zJZgIi?(7Z6nt&a=ssC4Ye^e&C`W;03J|eb!?qK}!B?p(!@y5?#v$@R9QLy%W9%NWK z!|O@XtT$~O1{^BDG-Vrn@OOj6gCtd222e)X-L8pdQ1C0fCO*m>Z+3~XcI^~Yo1QB)@R&{STLoYoq6y!3 zeUV(*?ueZ_`DDl|;MQYC%-6`1I<9gdsze%gyQlE=N`dIBkcIwj-+AP?%}k4Y_`R*E zU|aH-T~s*5AC8Vi?VtpFbf}(}reA0K4<|uEY80F$AO5WFYmO=DsCy{{)qk(xZc+U~ zB`%xX5kD|J;mee}$w#OYVU+HOOvoPdjhJ&Q^upqFm%0G)Pq2zQX-PuyIvJAvji zwB_+Z-W=9c=?w`*InZ1;5d6{>@tGm>v7;jc$98<=ax@qwO+LJ*cM4pLY-cBns`<31 z)PqzOha=bpzN_XA>oiRvlapqV$%jXykG!ZZ1Fsg%$DcD7a&^Z+5So(%zg~I6gb%i? z_b|=It;oZ|6)tFt!@1OIb(p_`-gU``FP^?%IQjKNtfTBMV2mYxh}d4+RA~h5^lQtg zGK4P<8j>Fy=&noIT}a&otPQy=G*_{M%kT3cUZ?`&E!26`cQ>ruD?+wl1X?p6_D{Q1G?--Iw&{$?ZFX7HUK`kjfzcJ!9@IFXy_421(}x$r*96UL1*VOl|o*euA$ z%WZVmUDYd59jgh3zY3tNdlq~hvQTJXHwB+lcK7?68H)7_FSNZf1x?EC{zMso$N4D9 z$KqMosVc^ubF{E9v0K>q(g9rE^1);K7y$KY++eK-ofEm3L_U1!OL0x7%jE|^g*?Zc!;_YNq{1btdNA~b-dhgla_zYM)J{Vq* z55GB83SCWe&}q33x@0+W^|K@4&g4AkF?EG*4|ygmQ^VoWl--%yV!zmOiIwz3NT%%W zzfMaq4SOO;e>wwc9v)5BOh@e-!-adt3qj|9*HPZ{wb&@JBJ*33$ zI=VsbeGyDEkfXUOcV4p07iTtQ|*3iZ3YmW2ubJ19}IT5wqH}XryP3%{D zB7D0S4Hfan*e|8;z&JQ)EGXpeNYKU}E_WrjL3*f(B;oeSMja8r+O zBp-h4#(apqWDnX^9l|k{+E_kIj3R$4?9g_Qy^#Q_b-A2)#Oe{#Y@y?HqE;b^=XDPVF*UeGR=3o09jz&F`UUSuAG z?)jP6PCopAw5?3Wmgdxy={(r>fDIuZzOXnJi(kj1>1xi+m)>MU$%oH=7YX8rdzq0+ z57$!4z>%kdF}SRd8>SC{kDs$atm_NWeXcC(oh(lLAwr86WK6tP^aJu3y*T9)vIS;08bA_L{$^>8o6 zJ*>|@5|(XAhJJy!*u@G7zv3H@vAS{CU!{tFt-sG&$%oJQ5Dr4&4)#v{D|ZRX#E)q~ zc=dvaYkLib_-{FMAMye}Gsbj}j6#)~JbZ2HjBa10xWHZm{vIxXDRMSoTd+YGyOTvG{pBy z0&`jF7AGHGegyU1xyRtN9p&8N(^EE?eE88%7QoI8#Y{)`H?Jzn!eJ8vaNoFC?lyTC zJbIN2trI=Srkuhw{l`$3I&~rCIN;(p&n3gihj&vHgO57(-R%k#o_K43BFgSk)y**a zd%ES~RCD-uUku6Q!zUYxB%6a~@W9Qn7z*t^0o4z@VWw6cbM@(*F5pc zl3aYYmO9(lr?5YJ17XkgOqgHwo&6PT=E)d_@3K=d`s71iKjRehREdQNJ_+D*vW`8A zY~j`?lJH1e6ebTpz&Fcuvwq~m7u1B%Y|IL#meHT?|Jk^{!57aY`SPf5BVh6+5u~km zgRWc^HoQ+6)dy0(b!;xG-bYE(TwR!wAclcz3|git2>VR`zYo8l#|Y)iDg-;$Swhu8 zvUtdc-%W+!H|*x1DrI+v{;AbX;o~8Do=iUcqg6RLu5uvSZCJ!a zuja$Tq71kz`N(pgm2$dH(Tg7Ws-?d@Wd3sr5 zsAGqco_y>du8eo$HQBy?^PqmW2;O>*fW(LZzWKBtt>tCo*&F@v>bzBKtWqc(7o@|e z)32Gc=pc9GQP>regjQ3pagVX}Y}M`rD7_W~R~jo=)59nHZdEECj10rKS2yycp+DJ? z*;x>06$r0)C9-qVhhpq8>OyMuz$u-^d|a#|XhJ?*G@uR;<@drB+L~DUy8x$m&cfkJ zNs^x<4IrXZ43B!ufQ33)RPQ&#Rg~SC2O41YxoBa;&RMW`pctN~YQnbn-IBtOK(YQ=8fDIZ3d;I|`Nw4l(}w75}B4j;HNHaoL#FJRqYV*hOT6{Tn|h z`!A3k?Hi7d*NAWn`S1z-w7JeGWjOLAAMS_QLHP*s;rHrbrLP#TcQJH4t}E#ZF@$P* z&nT1_L;IdpwWXIWu;8p15A;n!wUyh1rK@e=<)#8qdP8RBe}nnPqt5uTgnBZDjlzI& za~N0g2K(zd@L$Csn0~Z?J9^GXpNZ6I+Vq)UKT*QI+eCo>{1iwCyT=Tdp5s?j<8YvC zJia0yzEsl27Tr$b!2UdRGFLYIyh)!i!QZdTA_U0T&NjY0DIk3pzf6t*YtA3l6DbBI?AH@Q*WkKYG5yOae&Kru zllM&p=?`I$+HVVsm!j{QQ<=!`24atT8jmuQhQo=u&>-an9`Q4oM~(tc8coIo`S5lY z|0Ht4@v!+R%@^*O4U?J{(l^Rv3~LwTylQjo+E`xmtKAeDD7z~)nF`m0-jW#|R`^t% z?u+W$*jMvZxYUAXF0=EY`S=(ZenX#s|KN_Jf9ImpqG4FJKAP!%qU`Q)7EG)A&33n} z=SRqgZ&;Fsw+?mDoZ4|_J23_}lMjEawUJGCZ{jr%6UjD?#tY=b&mI4q88)QB`8%N? z<+heR>i);GAJhBX5`T=y4&{c@!vVa=+R1W<3B`IWNk<8voy*7dw%aNuq98qkm-qPTQ`5JwXE) z{&WK8YkBa9eE9tb1pHZ+7e)`K-sVpJiYE#=;MAJgi9IEWYd}FHB3u^_h_vdv6afG3{Y>Y8l{gBpBGHrR-gh6dvx) z#t-Vgc(u-zOK!-~UT_|~?4XS8up;xhq>5H{1$bnREoy01N!A(b!FkH=E|m(vA)-rA zK6VEFp}X#SFS_&Y{3eJRDWq?GnlH@I1Jkrh$*YsL7-38v=nyqr;Gw{lA9e+uVbm)+ zOAb_$TzL6MA3QNM2S;5WfI8&EJBx#%*n~PwTYK0E<-Od2e0bU7WW2?0@$?%K)=(D@ zp%-Fd+@G_or}I7^@*@Q;(!+6v={7#n;ww8Cnh9^?g5X$577HOCzVl}e#vSs)#Xp4n z;_}hZ`7#fD44mO*r4+04(m?Zu0(=l`ji>(=OFVb#lOanzy31yQ-;MEt?s+CC`bIx* zveS^)TMPe-a;Ggcr*=FPGM}V@!$bv?-0QmbWg8i10LFu>D3!~wIf5%VZA5F$W)4F`{ zOmcvTnCHTfhgyh~-35^kKW2A;B<7m|bbO#)n)#;CB+ep?-S}H8t_>ff_R`RFIW2xIE0n0bk@d-;?n8S!<>g0)p$shK!5fi)l$;Nb) zn1^5+`S7n7_lFTD>CL*<7aq>_WC{0Wab%+i4VJs%5la>R`<@E;O(>ww>A7&p7lofZ zbg_l9yVM9F>Q5UjSvh+;XwW&haFP+s*>$T{w8IiR577Ie7iD*rM}?=!hxbpT?C*;j zFqGr>f4N|QFb{)<%H!Tw_H2wB%}zMyfMqjvGM!q)eLl>`Un?{4(4+T!+w0v-vo!*a z^(Mnf|J!Wvp&Gu-H6Ax^ibLfI=lRdZyX?Z26tEy4zQ4y#w!rW+FCCeQ@B4yKLn)6( zM-C#Rfd2ik_lDC;X0urG;a{#LN9eIL_S#GHl7w+kc#!%@>TRIWyhNynlW13v_QvPY zj7j9?+T?}CK=bf$)r{o;w0x?h9aj7yRI_Wj64JKDHrw6+fd9iqFi_G5h*U?w5af-F3A;Kx4Q7%L?|f_{3!dWxyA=^(reAcXd9IWGr`**yP6n@hL+tIXxduJAuDfpq$1#IBi=UE6ECdD#mrpl zyK_xoNeY3GF(wNZKK;&a>1^cvAq=OtrDDgC$9(f3+SxiB1M4sWZdKK@s}a}vb@Jh# zJc~l7afkR^rPs_iiQX2kgaCeD#ilLkhq^J@DC_5kZ8v=RfDt38Z$SjXGv>kH8{^o` z9%VfLG#_93*^vd&BuP!30P85bn{bc8>EjcGS@DMWj&lD0W%An z52}wdAmq(QHv2{iAMF%@gZ)x4J)@nMimTbk9J=cci3fG(3+&DDJ6xZ9cVAn**F^9qW5S}@5;`I;2nL@p!dHLA=t@3(>UCKmJfr_o#nf3n0sO~bmW<7q ziw&{`_|#ej^;OhaE&1>@Z6Z)*vaoNQ53d^Mhi8{$V=DH?38pL9qzxf3czrtbd;FRi z$L!~IwozDcJqh1WyTKdYUSwBI6JU}s7P9Z0V&9%U;!`V9vF=qEKJD7f6E(iG!k?K? z;S>nhI#byv8)^KRl8Y(vp7>R0!u<*rV1r^l9J%KRhnjnZw?}H?=w}7EfAefyY`0J{ zefAV6qwEe&m_zdB1D5(-rsz%C-3RjFt%N?psuxzUMokQl9kpQg)NYBpjRStl$j8UK z$6)B+scaAV@bA9oLPy##=x&MTd-4L%Dl7|q_W$M%@7J?2RSUp#S{l&5JgoQoao#yK z2CJte;^zkqJkYk8iMkS@GB_I6Iv-(9LtgUgG3jV{BNXROU(2@+>SJHVXT#h7{6NYg zlxglv)q1Q96rd2r~Rv1 zexbOHN#!KN_M}LVt=_||mEZG|(EHFln(X``#gkN&0zs z)Y28-r^)jnH&sE|fwDVGTgW(CF4QF-{vc&{UpENwaYSeBv5pzAgUy#^c56Q#lMshfNn>%w@(~l$H+~6#zrNkfs*e67!|vY z`b;_qVe1)Ja`X~8;Idn zuocK3a1a)g4{t`<9eSAJZ_7b~i!*0J0%dpA$0tL`#zM(5C2RcDQh@!BYhc&#Kf>I3 zPVn+u9^CF44gYKfy!nwQKAxM4z1xT2`i~hb&o~G)8Zx0$(#zI`ZRHDQhvQe(RNR~S zke9nuGW()fD0@q0<_^xH%3ApSQOWr2V0$G)rNiLqA>>RHGK+)(IGlX= zHI=^TIMkD$dMOJRzKP(=8&^pBqs*#asGyA=&Cq?b#on$e$=DFuwGfM;!e0mvrwmu(0nse&~0Uchbg<0 z>6k_{@-u`{K>(8}yPI=g2R7Ylk<7YhhoZ)Ol+RZ}%^pn_cW@qzi=`gK@Zm75nYQ;Y z`JsY&HV!+|$A5{|uuAgb#~7u<%Zit5RmovK+B_QPNhRUxug$!5asyKumI&2eF%WsA zocSwu@)r3tZ0lNp>U)d%aPr|lI%I)Tc>rjzShh%K7|wc`i^nA%_$k?tZ}u1id!qAU zWPt;0Q+-EO*;-hpAjX4Qv#?4dNU~z00oGfqCd=8aSsvi33CqJ4_Xj*UeF^5NO`daknMI*U7&1mlvT096mLR^!)PDV>Z7 z48i9;E4UW<@B7;^Wc(x&orhH5Lvk`>sI#H{<$`X4k$&%BXgxZVCga@qWz`u+F+Bs2& zvoSJ!wU-NSi_b$Nb$RLypTiW-d&AK+IUs#uAnaSbh--(>$AkAXaQyX;T@33VvQ{Zz(1ay!Oe}2|Sez`sayH?G|#oCKUo>!yxj(MwVpxo!9)$#8ozd7{C+xbN!)Umyrug z!#rU?pD|k-rifpJ`S|ySBOZ6`l~}23!b{5Ta>$3TDi#X|xlBRrPBG?vHp6Mv@fZBx znZmnnF-%@y0P9SnC934Zn^JZs1}*d~?-p)*>j0Bo^Wo!$F%T}8#`hF^;Mt40w0kxb zr;UhV9ZLg1J2(r5H~(an-!||ik_G6gpN2MjJ9+W<6Rc!c3_Q|Jg!tM9RvXsLadr}p zd>@4&(~j_$+ArB^RqDHQqMu3K)yz$^k3ZGP#^vw)&_p4Kzx_ddcL5@ByFCvA#%r@9 zGRnC7G3Am&?6KDRmc(l(?IZY-Rq&d@%}HiL)v{^0^QRcCE*Yc7u9bpaBQ0RfMS6!C zqz^mnwn>r-ZE!qgcV*|s;rKg)S^WWLfYLlDe>MsNw9=Bf-!h59ZErh26&TEdP)izW-l# zH{BMcCYMW0WhcU8%I>ywSVB_dV}a_c8MvCx_@#x@QS3NE*gHW8hnG@zSE5TlPqmVN z5p!{;VF3=-R>jMWN-WXM4dm{LV6Uzm+`sG2+gAEwU}H8K-RX~yl?&NanGg^>ONXbe z-E4>Dey-U<^BD%oc)+lgzj}_W#vlQ9mdAoeS0z*H?%4485w)hUZ}NZCa+CZfIDw#pK+lRWas`9UNjw#w<)`u>S&D-16N8) zQYV8_yBOY*5C5}8Nl>R|il07-ai!%{+*V6I{1hv&rtD5pM;j_dPbKTfhd;fOJnThd zabmeXTkzH$j*$;N?H?cPI;mhwt!?mDetV#YkKX;M#qwa@dh~qkLtMQkmKFk8Q<^J$xYba~e zm%-&)A`HoM#}TLW_y=_*FxZq2Yq>ql&FT>9S8Jo~Z2ETevce4q>?KxLr$P&5ck$%I zPqQ3a+iWuvZ``0+XzW-fU~y?) zpd*_Lk!yy)0fS7k6N1oCl!>1@zjD>ZTiGuAaHt!T3MW=QU@eYk`N>VOxZy=SuFm89 z(1x3=j(m8z?nqd2eJ>ME?BTI08E9S}j2asX`JdbY(D;>Rk+pmwsw2`6{iL1+WHDalG8t**<5&2~qXC=n|94967d5Y{u#AO4UJ@3pp%UD&aP&pI56 z@1CaN%&X6NP4!{c!lGg8-$bbBYhrgb8~IPUM4aFngY#CG^Spmg*{;Ymi0xPa8B2=U zTh-sZdVLo5jt{`ceKGv|^kIn_g48OxRN6Cr z$hXWo#g?nb!ZgnWI8s)}{4!ej&{Iih7afJUfA{l4%H3=p`S54Yg}~?=E7*Hcf7CjW zje&L4zv<@7fBzW)+Z#miWVIXY4Nzf$gH%vy5V?{E=c4|0lr%f*LS+KwS7R8YpByc$ z6ii12ItMSkG{UE)Ck4MYS%MQ~cMHge-`{_)!7lJ2CJ%;; zl!y0?_WYcq4=S(B!NB7KaYNZ67W8&L*sRHb&Wew0$>&mDe2vV^AITUObDNvhRI{w^ zIFM|O14HBU%#nQfPL_fli4hpJU?(4^^_eY@&4gcz=fgRfg=}%~Al!N`2SZ%EF><{v z-%%?M8`k7eH;OYz#Y(fgrRw;Wvb$ebY;bN)iA1G(5`3WSZunqJ*ch`#a3b}&4r|`#V|kD=4qKzij0VjE=UpQ3a3ObeX#nR{ zemFQU8@n3&VZYE-tV1;vTrASz*S^<`tvbjLUW`J;s3hd%!+)Jn&qR9@AfhP-KAx*! zQsl$?Rj1;>urSKPH*z-iCp$|%e7GPGCM`^4Vdg_|V0kVYUGqS>I%B>fSrP25^5M%w zM|zWgCp1Q4e&pc7-4bQ zEch%%vtIF<5HaPg#OFT;>~PA*eN~G1I?jmg+2H~CJQwU24TVvL3EX*PAPx%1qQ5&o zxu@brW@kxzcjUv1mvypTiWOYvKnxbuQ%^AY@T!ZNS)yGMIKHB8XM;oR!O+({V0=1R zI-%$`WHq0j*AJ#gWrOw$Ke*H$$Yu{7fnG%-ls21(=3QDmM@bpVJM-cFe7e`QGzm8z z)ca zd_W3}@w!KIpXd1Vj5zEa7LQW{FYtla+t{>+$*|>f1hj45#dOJszmk@L*A#-W!fr87 zTQCsnGIAg)*9Q(NJF;6%BQYz0%*+ig=+--uYn@SpGqDBGzH|;0p58Ba!uPu|E{)uHbWv$mD;VjtQ0B{JGzLW^Ww@9^}JEjc#SD z``2+aO2BxlSls*k6qg$Okd^wULekqX@cz7+*^m#vxH1#5EfCH6Qu$aW4Nl3qu%OQq zgw8Wry_mWL6v&t$AAarczml2OEZU)^!(&s;o|I}Bqh zq8a;-vb%#>5PzC7kH_n|7y0m>i>NLbDXW590MoGhgZ4S$ZA8H_{JxRs1!!C z$mGMPOnuI_H>N?~%}{8wTFZ*R|Kq8hS@@^GAJ_YZ@*A?lq0XDS9@5<5TZ$fQ)>pz$ z=kxJ6`S2<~9!SzJYeT-Z7<|hBN{*TfKirv$%DrOLU1@^pGo1wr{Vc%X2JLU%)rZGx z*GUT6tg+}cy=QFHz&ZH?nCPDq$TsIeIQj5Z4FV2%URX6O7v~=sj5^M_Y@J zMh2uD2!>xvm$EbAQYikOji;1-vAxWd&%GlD!zSf{-Cb9hwNjDoysC=&_LSX`4?p?& zS;=Y(J(yZdo!i?4ur&Iqz+Zg^{-ZPgh3gFT*#2EmqbLLcWp`~!dNA#DrKIbuEoK_g zyl;OsJd&!wbjw}gqI4c?wUC36MJ~MOs}DXHl7n+E55S0?WlV2LFgO`yz~PHM?D)jJ zyy{XU`WGc*gyAi&`AEVpUy28dbFtw2{w%ZnaG&q|m4Zi-!%`f~4}{#bXf!Clrn~M$XIQgIihT^wz<-q8aer&vhs6@}z4{PD z*O3$(pnvoBG)sEh*e`~|orh5|4mG<~^cRq%ODWP_t zE}P`&4gw<)IHk$JecKRjWf(RkS&KwE<--Np-TcvFRkMx8(P@V zk;#xoKD!p}yXwxl(58&S&wje-ET$c&5Fy??IZ7h1n+`M&4-d7CK(?f<_Tg?z%%JSf z$Ca|X+@r!XW8v%bRYBd!w)BX z$*>&(PQ@a)WiSseofyY7zbMnr9DUb%+F_1slO!j50*s9p!_!-I@0&V7I6c)6`zX76 z{>=zYO4ka$wp)N7Wp}>}Cc(j3Ws;s9WUp^1K>K!eJZdk)>dA+f-9~#6-{fI@-W-0S z(i^jja&TkGp#LN3KKyF_-~WLZDijT*QlzCMEls_iSE!_D(%wsZ?@k?iuOy?4WMziT z>O3zaN%kHgSs~+1Nbh+2UZ3CZFK|0Ir`LJDo{#IgKM-ZbOeZ1?I-lgwd&U>$^|X?2 z@{Gl3zYJ8Jd!GlaIm?VhNzgPP8Dd#Ib8WcCZ&_!M=@N@cdE0ri-0x%I-Fi4}Zf=QCQh2KqzH*i_YnTOZX*;eBpGwI-~?k1r3a|(_%Aw zr~~A75iGYE3M&PF{zfMdZ_UrgU1!Mga9+l~mq)dmh&iP(7c?s@wr{veiHBWmKT8|&hL2Lu#=I+$sH(G?yAS)zUg_k+rGEm!WK<+G)ug?P(x zb*bx14U`&+;j+RE7_sQKptq6c)F``?Yy=#7&_-fcX$mwC4=;+Xpwia6QAyq&JI~Wu zmwfo_A?t+Jr_vV7>kS-A8v^<=zP!7X1M*$ZVqaFwHZt4*>nZe<~V zaV-=tPsqjN`?~q7eOp-g)fiZGCIgzthu^yNBu|n{!aKRim>1i~b;yT5Q^ zDZ8_FfxM){g62R&9JQWYlhqD5GU0in&kGBnd3Z>2vVfDDdm9&M3pnn7+1*Y9+-!bA z_=zsG%TWS3Mx&s0uNoJh^2BZ#5&D@ZqWNhbwqS7p=nW`QSldiH?OxNUFK@X} zK|XxHtMvPnV~1JyG#Zsh(k}d0>gW1Bk@gOqB`pSy*l8(7FY@7Cr#})*%Vxl}ZS>o| zY$Q0X9?z%G_Q7#xMffUK9$zNKu%^x+ko}YgRa^cr)2222sA&|QUq;^c+E-ld@c|Zb zIv!fesbB0Vj4kbzq!hcGmk<=TeAfD2?p*8X5MnVa|z=NODs&&1&PHeM83$FfQjVOM7|tX|34 zv)UVc*M>NNj#JsYe{BH--KB}^{6AHM#YkN&j*81})JJANDr+P{im z&TCIFeWuQ&yEJGYuLNVhlBZUEN}?8L0JG+aA#9ESqB?0oQNaQkopsaHEOCCtJqJ;M z15|7zM`)TM-03|ac{qUXgDJaPPCk5_mJ)kA&=Xwj=p3)3M47S&?-%Ni6I=@rPxi<3 z8w*)NUO3n+r0nkaC)PK-ig(S3!;Kr$X&&h=FTC5pP6Q-_$RG&RR10@fwR!sm^~ zUzFXsDLSDYaNwI(9&g{oq=wM<(I)|xRUBp;blbV>@GLZX9)&&YOZlB4 zeeB`rmP1e|YDB;*nnfd`O)c zNPQUr%<<q9YbWCd%I^LhYlMd9cM7$x&M=m;ySf%F+P{|Pc0nFEJxPQ|Rh98Y zy)%n$^n=b71>jOQ06vw@=R@MjhkuZRMx46qUT$MjEwM1vI|F3g?lb*EXSwg&BKtHAs=45DGdw;TxY*kFYtmLDR`88`2G8iaiyWp z*yU51&=405POW9kWkwI>dAYdIF$A~oNabtI2SHcv|8k7k@TPqd>q!`a8c>W@SKRTP z!VgJ|?pWB;M}AZL6uL(e3(tHfV060}|8&`41Pxeqe6@x%l-&h~m_X}^c!>-7@T)bb z7t~G%bw0iq627>BtVc1dT%`)jf=&6bjlMYgTp^BHHVAu+6Ij=>V9*H71INZ*_CThL zPdp!mPse4U!iIL9BwNcKkPrV;Hx=$3Zeos^t$dAh8lEN}K1X{$m$P_7`!m_l;u;Ar z$cG3gfAo?{&q+aT)E*5uXHSOL~ zcK5M^!Tf{PLfc+bJVV)CjfCz%D^@tjtJ=e)^R&bJ*9i73-7FEWbi&!Sl#d-FGqYQk zJ(VJu^i&1rnoZ$*H2iS(wF1;UDuYH>3fa?pp%7!33k@w_S=NzCzVc2CI@V?2 zh3JR;(ZbX0H2Lty(rE{IdLx@Ixx+IarK8@DSUkFtW|60TVnrD_a6v5`d;=CR{dxWI zqa9g0CH@#_=gtkthhH_3&blU^FlPky-5u7%8wn+tJi!HJJ|2=x7;XqNc8bCFJ{c1U zPaQ6FSs=~BW*;Vpa(0FIy#E|j zNU`G+3Wmdwk0N+hG!rC~{}VnqXk+P%5>#=ShTfN!Nec6gA&RoQ%(J#I^}ecuhmJK~ z`iI`XtjM*t_Y(rl9buOR-5+Y{!t(kTlDYP7xTCTdD~m>=@3ryl*=HZn>??%A0C})G z5W|Q5L)l#o&Gnq?$LFXj-M#0SvODMZL{&YP0 z%~~t`9yblDuhHG-mN8%++K=z*or!kcBJ{a23_tF6U@dubAXA|bjxCl0-;uCDm-Y zbr(ObnS&Ag!?AtA5`JZVKiKvoALK>{!2I)`Y+>(E{Pu4VO1F8UV#iI zUBJ|bb~@xVp~h!8PpHQJpcMKBj_6?7I1u$Xt|8 z4#fqvP zRM7w0!V=7yd520WzIIQ*OwnO}M4^KzC1k<52T|aWUCNFc_3_5_dDyNQj3YY|_;%aD z@b`Tow2$$HQ)4Hvt~gcfpInR?X>Qn-_)6l?rUQFb#n7WN1!Sv(1(%;D_=vK*I29Y* z9kpO`ez7fNQg(Odj4|AwAd)CL0g;VN^v-3tJ(4!cW2|` zdDLw-Zy-*(k-?-6hCnj;@FyyIm|bu=-`59LLaeQ168$9<4a$5L_`C zUher{XMrK?cdVAQ`#58GRtXw^)5Jwwk<~_fz}9#XEE=Q?AKuL1e?0wh&e8&`*fRj1 z9-Yq|KZk+biX3>i^9x%kwVf;6jKzi@)LR;Smw&r*mM#C71ba6mLAw4KwyXI8UpOTL z1CwGg*mo;eH2ccdDdmE)I1KLo5;Mm*8Ju&Lj3PHbjLC4}>KB#i9-SPaXS1NzZ6H(H zt%Vm>mf*(=PRQ&lB?HKZ_dX_uy;73_lgk}E=1szo7~BjK!|vC5 z@G7)fB0td;>)#h+puaj!IXISG9OMm;wvvlHeF&KB3g&O?15vv$9~I6?p-uVU0_LjQeeWR1Q@x$mI=R}^QR4&7#$vs`wo_I z1^r%j&Ls~*Z0Y|#GLLnS(g%rQ zuN$6pFGl;LBT#mwB^$QG7v3}$!uG;JaN8q=9H0>V9YFnHRlPhWfw|;MnXRP zX%B?Ar$X2hg&}xtZ4nk)c;kQzI{fBnbvXW_82Sad!jrWvLe&vHycbA7_SkqrjCZ?@&|GedASri($z>(5uZ_%13pxI~ZWe}Z5uy4Y zWsDASX7RdyaId8R3aVuwxwC|8hlJr@-CSIG=qrDJt&;t9j|J7h3^?cVkR9K7n*Yg9 z!nbnCm@})MCq22to<2(ltZ^0FFfVMnQ-o95Q2eOn&(>}X zfHUW41`ws8)oK}6Ss#H;Z8Rge=RI$W-^~uW%q15q4Y~*1Wadie_-X4DbPy8p7Wwcq zhqp1q&`fynHX7_+mNU&UJ$(DgT*Mn8xZp)5Z*dw3Uor|os&6(JOtfPbONQgh5yjX{ zKK$LrKa!{!+MxKd1TL0Og@0$v69N_)*4{s4#CRF4SI5lp??*1AIKZ?k3By4qo)w1vj?Najd$nv8@UKgnY6INr>rr?v3Jj#RuH9*=Lyhp%vW#rj{# z0+;KNP;I`NIg<~c+>wXR3WKo0K9VPmPyojO+Ph0758P)Q(>GVcKWC}alze!0_OWEi zRb5bZB3E)Nz|0A@Lfw;zNaX>zVWAa{v-EUWAY%{Du2DbNt?}^b+FD8Z!)bWoSP9-; zH3oNV?Z>S9lMmlQ^SWPD;PNX6UO8_L4j5F3d$!ABYf>Rw}I)}3E;jQ zgMC9YF(v#lFU~*7Yz8DjS3xqEd2u#_-{Rl$)A2}J9L_yk#gF)OG0(9%uw`dBoXT0m zMC8N!_2y&thyZ-Kz?0v4q6l-xi{RjGPv}b+!8&e^!r^XouS-6>@IcXZ0?))Pi;j`$plH(&6mV zF;57S6T#p~iXd~?gB$(w#{sehDB}Ha@4t(g^wMyelg@#ibzN+~O*PjcAAax3bo@2; zHoy5n!Y(%_gUX3SSpWPaTlM=9H~W);YAG=|Ky5RBF{hjT6PpWt(bSO=mdCEnki+SJ z3h>l+>UORd_`was!1tX9UhB>R%bfmfbkrDZx>SPI@M$zE_|1LB(YpG1K(~hM!8TmB2*370Z$(|WlkL+ zp``8k`Y@IY*ugix$bpg(Q3*aMRsJhdq-~p0c~B8Pun7_JDBu zuQP1RD1qf~G|A*qH+&+nE{61V(1-WD|4FpmG=(M z#i74K@$r)){w7NX9yb<%#}q%x@20YF^5F~Di7@ZpESx1jkjoT~2K|~6ICjPfPSjTj zzgvxP7$+}{e0b}Wvc^3tCjrgFgUJ(9m^sZt^7J^vqoHCTqNpQW~1HFH{6_j_^Vgv z!nUw9=;&@``NmCLmVEdcPCop~S}rr9ouP6Tydxie;Oa8g=h(|%x#gkx;b7{TP2{U* z4uYr03gNQE7xrhEu{84GFHI}PcnxlB!*nIgm*n&3mq?(B4J zupz19?DKjX*w8`G_+=9y;!vT)?AR21BrQhkps_gp={F&VeE8@Y#V~c(2>5-)lJB>f zjfIQJ%uE@Ca;sC=e9aK>Rmp=#oajqG^XI6GK)%;{y5cZ;vFv@v|vVzU2aI zT5ydAlMiqFbS@@I?c+t0I@#dtY{(`b-uE%hPOX!|p5%Ny;T4F3)&+12jUjMheGyDG z@`mMWN3)Dy>WD9kai|yd{wTFb=9lQhcFOLyUSlxusGjg4&kSiE9-YXCA6&7W)a!%A z_DhogN~Yr-g%X@*qk+`{TI^Y$7reSz1U+U$Av46Ej~Ew-AH>wJa!eYx&R)iFO9TY1 zr27o=;dPhp=1XVI#UIUS*rRft%a9LW?~np5)`@WU)G=20_bIm`AAWUvH154v&b21@ zu-ZSlQ0)`~k=2>Z+h-uMoI>0lJsTVTPU5y@!{OV=Vo<-~4r6Ei5Jr&?zv@*9hOeB8 zdp3wAFK0~v9XjjI+GPt1j%=H3_t6?Zzo!mH^5L880tEFhj$lvO-EPo<=Lg?Qq&?j5 zbXGBrC{v{!F;ljr)EBOQFN8I5gCSobfnQh@jHcmvcyn1FuaqxkDUv9N9-jr9tJ~R$ z5r=uKRRVr8NW}|{O*}5Tl^J!Wf^uLyoH58v{|----J-^LOpdt7s#&bl(Rvre0ns*-Ie866lUS{W)a?dse-$e zrZ8(&KiDW!2nwaLaD9IvzjKTB%Ess7wJqKJ=cz5M>qZQGIGF*dnGad?#*=*Y;3RC# zO2$imjeNzCJ1n&%9Zto^fqZ2ZbFuowCz20;YfCuZ@ms_v74!!c+XB#8=nqqzJlUXk zifCve!YB(*475__rB6q}$Nyz_<}Oe;;jl1_eE4;g-A!HQfL_VZ8hhVZ{68NanFZvQ z|J!(9U%-QlD7&jPz|!;+!aq$eu+xlo3Ux<;e6t$YYVgE$14MYmKoPYc_^`9f1HiLi z0qu13g9YkK_(=H(obfUng-u=j&zx#DkB@UMl_8S^nT=wYk_G9Ewibc|6o#4t#71A&7kV6-5GT*f|WZe-L5)Jo*hS z`cJYmbqvhDOmo)mG#?|gT*%Kf#@UqJy-Bmfh9_!`J2b7~E1h*;|C|W-xwAxSf+J3+ z?9NV47xkAs5{7P`0cW=s!lbag(o$0Ls2T8yBCjoVF!7(@+!Q22nk@N4G}c7FaQzI;Xu z-crrPVL@%&HMx#$+mJ}_+{y4##F-Dj!Dp+bqwf1Se0r^#e;C)r{@l!l7^4U%T)%|f zE9{32b{OE#mF{dQz$p&zhvO5JY z0ko{7g+^ryoc@>gB~>hO#-{rY4?-Pa+(vSQ1VgxOdr5nI$Eo4qb;m|uT2mE(`V*lw@@vvEO_>z41Chfa? z;>!lcgOedtHwnt*PqA95hg{g1fl^&DxOid(UtrwLBGhw1^`B69y0wU%43@=_zY1`* ztsg#+oyrSpRbWn|2xgEE|KX|}D_b%eFYhlwtHDlK>$qO>?ST=rQg*j^vOOd`i*isQ zA3mGzGuGTN#b;A3g|~A7{v%_;p9VV*v2Hm+u;NY(S z^>PvX)YU+=o|KRKR{!PU6|30znn>`Um<@Z%-ms{%HN2k1W6hsbT=JlmOV4Ox?-WzP z)-!?ja1XQV=I#8VY8K9W5{0)Gmh#P``k0Jo9`x-Fh7D4Q?56o(jCogxttWi(fQ1>4 zh){*+iNz2@KD^ZSE4%6*WA|vqjYodPfmW zz!L|HgvzL?5cIwTuFV#= z`+IltEBV=Y?Q{gDUSGzWilpIgdOkS12EtPB0CxBH5G-vf!ok|!80q;yZ{y*8UQxr!}lbGp>Qt;EslQS zMqjrv5&7_m9vQH2@_qKA{w$AupMr3G8l>9_faKF-r>7Nm?^;rxeU5Rcb@ zyPLGQnv6I4ZKt~+^5J_b{MjngKu{6o!=TGjus31_Puv-SJC!4cKKemo_zS= z^J#G4$2GQ9{{l~_PQiTg;U8>1#(gxOG552Xpb;Gnx(#Ja)4zwml+HtuLkPZ@m&zyG z4uaMBbk`p=8?Kz2#G=wi;8gPAlbhU8|HgO8Qlqgj>o?_+Po}_}@)F@>Aobm~i?QUb z4X&GSQXk)A4GSr|Tj6H{GL{LFO!DDhtJ8PZQU||`{~*|ZccV^%Vi>nX6%G}e@+B3% zxP%vC`usu2-4a;es$ftK&V%eDz3kuNW&Ckd6h`Z3;l0J}+<8PTQzjo?acn9GJDOP2 zyjC9Snug(T=i&Xq8G`mec{Qi_=7-ZMTj$FON$F!y6 zLGs~`h^qKj?@z2gD+gW<4~M}s7O;;?`eXg%0-Rsyk6{|_`~dmz?@UGTpP?rV{;9y=typJNcd`jMf8lW?-jvPbwxNaoaRB+2jI>N`B>I0jbXM+*j<$f zsCt$S=j-0H6@j~X&%HQYXqb+YVK=#@^f`8ze0cYLiO_JdjvbL{<7-j^z7R!!XXDRVc6_UNI5>Y2fo9%J=>O-BFnpRedXNwQ z({UQICCelMCB|@ovb)|oTUgOH+~MgsYuxgW7=M^f#L;K{gmu=A(4Vrqwz`kTx2&#l-}gA*}>+6$3y!|*dE)`Sni-4(CQW9G&3!r&; zkoB^FhK&y!_s2URE2sVBv~hUmC>ayOT)@@41U5@+!sm?PT>h{pPP7u?o}o$@u-b#& z>Gp@$as@DTXMd>ty^t#{497sr9Q=L0i-%6FVu7>cAaPAPG??9Ht!)zC5t)pGwUY4Y zr<45cr$=n{zZvl8RSfM=Rj@tNyE%`hOfxaAs^AMw zXI;Zt=u+OFw+4@a_=6?z>F+ccb-q*>Q9T}mDZ5)`W{=L#!y1GAtss!HyWD}MkTP|W zWKkyIbyqPS?$gDc%@2j2A6&ux0=bgWYLF0Yz=yi~;OLkld^SS?wOu0EgrFc8@-+{> zX8dKN&#vO9Ya>zXX%>DUAHM8i4a;(lCl@6Zw!Um(eXN;Z8Ig*+ToZ6Z%3=OOgEHo% zEKt201><5%S&mH~#|?Sdq85za7ZSKL42E@GWMz-?h1LBgFs~F)?WC~o;4Hi^>o1h$>bzdmhpr+rF$=?>(!h}aO^LpGEWDklY{Zgi4M`sBN zh1zI$wNJQz%pJasCj0*Ea9Cwy%Pk(wraNi6!z&nwT^}=;bZrRukPqL!s)zX}mh-8t z)O(qciDir1xDbAfb?GL;RF4#hSagm#lMlb~6wM+>&c!wDyZKeC_v~9sHs~IWfNe*X zv9-&k@#V36>^>cU1BM0gv4e*~?&Tt=Ti^w{S{kfRS%bP4OOS1yj;^CnawE_H&Qo@$ ztH9t~or<90YK}(q99;Co62+U2J2dQ=4EdDZ6_O8sSX?dX^moSjnUs%pY2wXiiYz$Z z1Kj6|K)Sy&bPjOi)5wP}U0i^VwhusqbMu+bw=lR$KKwsrU)To4?R@E-SXAmx$Eulk z`JqQ=S;dzmSW`+f8med5?|Topp;HDvoEwWCPFuMY`^vaVE*KVtf$bA9b)CqdN<#rQ zIQwCwj|<;?MHvj&iop5tEC@Cp$lMNS;g(gDZ%LeRQeTDSbCVIApzLl+pFMP?ZE$$M zcoJTuvu?u{GhBaZtdMt@!4%5wHj@v3V{5bImxU{yc~^`w-s+gwI+pnk^@ixmBG8{Q z1Qad=^Pw1scKP{edsYh5#4A{&ZX{W8*>GiPC)26f%R^e`Vo_uo){_r!G3^2iu1SIK z=M#Wc)UrI;b{@_%@qBPJF5X(k+bnw7HCNgrofHB`rlzub=RtHgSBO63!#|imiH8=C zfK5}$QyW7&o1q_tZmY3)=1&Qpyg3D~We0?vkuq3Q z^5M1gvoTeO#Mkdu@TbeApnp<6JZuYufty2E)-bZoON;QMi8tP;*5P{k>Tt577)H)< zg@3$Sgu^HFFgi$#Qg;~6GS`#X$=FCf{GE4q*zyg2;a<>%m_ zA>lZB`~v=O+5nhYPyo*}{GqnZjXk-qgrmbm$jOIqIHkmQp3{V!WZD-gbO!OH-GZ5c zA(m2h*Vi-|Kc$^*TueTEB0b|P(#+xcbVW&Wr+^Xv%kCB$;LkXT@Tp)r+#XT_rv(jI z=%mg&qr5QSb`c)68H#ru{8@8F06gLOaP^Ed$T~0M&&p{|?GZUbo8I#~dApgS+gxb9 zLU#Yp8|>lebNr}X3M$wq;(z4Bm+Q2#6XBWQ@G2U#?v}Ie2|c|2bS`3R2wp#*$@AR@ z!ul-gK>Rryloah)^{V06pjwRokqu67sdG2hF`lLVrtGeo?lU&_F}2_8_#yJ)vqaPZQry8svWJ~%4@i+PKO>VDP^3^O@eI*jobyl-pmA^dpRUTq)5WX1|$p>oE-d!N=-6i_K zX3KF*Y@>#Q8;bEY`S4MNk0o1f>q0GMch5G``|n{Jq2R?tjHc{Py2J_}Pw;m5XRtl2 zrtGe>bv*o$Une>AbQ=CVT7r9)jKSW9e(amvELeM41X=G@;88z;_bi%&K5~Wlag!{j zuP9`TXNJP2%v_ME>1NCKZQ*WLW60FZL@n>f+-d1a7ATiQ_Iolcn!?$cYqxk!Q93## z$6?%oDt6yc&V^?qLWgi~%Kn8AZl z_{WX1J8Ku59D7o-z-1h$trtVXY6s{^eeQ6=@c(CbtDP+H{`y{rC~W~|Q+D?(dK`>3 zJ0W>QO$VFIOR($vD6~2@oW-1?Oi)Gy#U_gI?^O>z_qRWq4k*B5r~6^&pe3wrML3+F zNZI0^E;1;q`D*gvzb#A0mBVgxQLluxTuFuxu_?GBy zHX=S3`bUJqKFDM9$cOLyuK_b;wtQ%^?%Jj?>;b*~sc zI>2X=4{!D@6)i5c@Z6AQW>b&~T94wP*z+*kY}3JW)3dO6b`&}WujM|zeeCAuJXoj{ z4CUKnS% zh^Fjrwud!*Gm>kRTVsn`DZBf-*BJlYS|ThWA3p9aJ!?|5!O-ouWUSvzJoQ6_Hiw2| zcCIZO-a-4bE`^XlKD?rFCLeV=1eGu3qT$gW+%t0%TXQ=aOp-I!-t)N-lhlH9)s{^j=Ipyv7M-Nwu#z&^$bps#!wS z2J+z#Ovb9+WCu?(#Nb~CgtG%&0Mkprtz8qewUqePfu87LM|ajglyLJjH})>XA8cF; z;2rt!BiiTl|9ZpFX<-f?IR1%O53geDAIHM}Pw6nv^e)r6+rX;>lF@x-5;EDcLHYw;UHyD|VqrZoubz>3VT_^(^c>(k}_`zm_sqAr$3O1FBu<7P3 z?Cp}{_VY%=cgpUn>zv?VQHAjKmJy~>cBeeW9;4FNH$v$oSVO;GY4=QF+j$F#&KZU} zp=6=n(ZhZ!H-*9E!ykTI3^q>cux*eIe|*3j55yPYP0Jy;Kq`dgUJe9v>wGvsKD^EL z75w-3NW5g0jq25J_-OLs4Q|ebTfu1%{-%{F+c)tq3aOadkbsj!wH$QYnf>rA0P^AA z7MHOr9=-hYOxma4AB@V{iQLY25aiYs!kn|du*AoV-49g7k50w7d4xModEO=I7^nlT zBg8QE(iG^oJz21sXo5rEQAZW|@K*8GXT#3ffFEUd77Hf8`te1Q;8Ro3Kw6B$W{*XM zQQZRUatFbcJnXF_K;yk7AMP+4pDZLZGj0$zA538@v_oKuQXcd_-NWpC%4lyP8jrMQ zqNey6S0*1``*;F0ol1e86&Ki)(ra9YeE2($$e8%LmzPiNWKp@&DIsHc9Rsg zr{?35nSrR65x`ruhk)INA~>Pv4W8+v+4=v}@xrTO)NyykJ-07QmMzi;gA}swFEgk! z*AsRZn<34^;~w(i+c&OtIQPOHHd1ysTE_^E;ZDimJx+LPT?syF(L(oS@=Pnp14QK_ zD0r_7e4Ae^J>N4h$n8GAaquh) zS&#&K{!4;x?Rqxw(LJs~K78e?SPTi=&ci&vu+L37ptLj$%;gubn&JWYQbh9+&;9Vp zQ5Qb&l@eT!5P`ec1ES_DFrQ6YXrEew73t0>eR7xN-8Vy6Oxc}Q{bWc_Kke|2TjB}& zY~v1^;iBnEg5Ea)d^6}dc|spXKf5IPws1N=mZ#oOGYx#6r^QmphtIuH1hM0XLO_u} zcQOvd0VUJ{a!?u{MK5E^w?)8!<=IeAKD@@p-Tb}RTr_J+L(zcie2?k{b{tY*lVu`E z*BoOFL!R+7r!uj-EgF}fDCga-J?z5YT&Q#mfrAS(nRMVl?8q%dtMJ*l{lz4{Z_9Ad zQ7;DXEA9|B^oIcC!_RnKf(}cj(tV3qQtvwfqA0t&yUiBl8@5b7_thHDQg&AyVuG?~ z0tEN(j!>#WedKmJaOlH($zvZktjsLNW2;nYR=||?miq$hE`)dCgTc`_f!|pcj0WVx z%NOarK0hXCjKF#l}UX_g>!S_;fD5pR%iZ( zKiA5}{ud&#uK#L&eBxi$Y>*Fy?*d`|vj~=8q<}3UMfm!LH+D$t@*pQQFp?C*fBj~F zr|fNkRqNqv%I>DFqQ~12YsuNYrf{0FyZTHkkeV>3v1DCg>sif82nTPkC;wjD>eM!RsTZpT_FpZErq=4UMOZ5=Hi=`-F$HK z7M6R5`s?d5KrQAW8^7lyuN{(v7t@n*m}?{d+IWZQ&QAyD*f0w>@2`~eP5%Yv49i*m)(^c;PToNLe~`+IA&S`FEvNO z%r|Ph5j}B2e{w6e6>;bfANJ4c04R_ufUo=e!9~L*{KL=){N0|7R?F$$C8nC?-;M(> z<8C{dz+jvb5zA+p%Y&R=5%lHx zK<*-A_G8XSbSN*z>t|=+OS>nMzr8x}nX;?6I+ez-&1sV` z;a2xtmgpD?Hzm2CvEc{JS8U>%vtp2u4}aCYjgKg*W0yD4er`uHSf_G!`PvPxGAbSG z-^AeyNj3j&)x~Ds$%Zoh2(VwcgjLP&hwFdju7{XAIQDj1VPeq2dXp*3?@)LJ1C}c<|M+{#fQzfEG3V@!yXN*%)y+ zG?q|yxA7B`wyffJyy9>o`S4*XcX`J@4eWAQG6>p9aD#k!ImL(k@vaQa>x{vyF%>+~ zuA8kMl?w(R>E64fh^0lz;?>>)w4daM*E^^1?vpC8Ng{%EE$I z39F>nOP)S80(}&Ny0tx=c@^QX*V_sM=|1CDiz%9%w-oeK0OBdT8{4c0Q%i43zQ1wB zkc;GDCsMbws}7Sn;SGCYi=fI&0ZI)b__LdV=wgwNKg54|>Hbyh?ZHUsG0uiXi{CJh zmKr`}YCPT{AHIJ}E8pVZ#BQmiLXUd_TqPe~*0G%n>RI^UK@`?xm+~;9KBnZI2VGUc zaI+(Ub}t9x&-aBGaMTx<4m0DI;#A>xQZcL{AKo&&OPDf32dh=Zs5)#4{@5KX`D14S zBFgTTs@QyOWX;YUi`S3#x zCovi4*|2tAAR->be%xc( zQ)qT0DHcwU4_}@1h0nc`gNEWTT;40@YUIP`%`1Sb_x(UC(}jJzpo|a5hkuykfrF+h z@UC52AX!-ggD*P40;^p@*MElSc}$F#rRcn#S>EW@N*#Xm*~)j&XLK4XQK@0*lrBcs z8huO-X%;@4xWc*j#bBnV0g9)!c+DXCUEE6h$K=C5JL}IHECa!~FdwYWNx`$U72Il1 z1iCJwXZ*%a{&4eN_Ni$uoyF6@wDTI9Jm~@t+LMBCDFOShIL6DyKV#P$GNB+m8l(@E zvF?~2{HX@34W4^5r2flqYw)1#uHMT8;8=oW8~N~)DZ4W?(LqVV2SL8q z4I0UZzbICPTU$)|ukF70xv>zN3kTuigajsE8Vmyi^Pr}xmuZm?zpy0=Kj>uP*1UE; zMz5B+lMin+Iu-V;YhtczTloa{G?aZc7kBjA&)q!Uup>Iz;4&o=vVW~$Np636-S~V| zeG!P;Plxb2g&}Y=ya?(qd4uGF4%kpGFxrfY53pw_pZWhFC5y8}( zDj-Uo!aa2T@OeuC?j#>ROSXvN(@;>*%Z2BMzOuE~D*4XGG1UD?vuM*F@}QlkSyz4% z>`Y7szwwQ1#FIPx+Ou@DARqovN)_K0PTp`%4oEA9!v(tq?8NH+xJ)QOhg^S*?sMa& zyk<^Nyh~Tr5=)u)TTMLYPyqdoeUc3PaG6`ERg2mF*U~= z*EbIpK3fU28%FPz%MD<)fkg5&dpfq7l;C)!QMgP`olOe!f@^nZFI`0usx5ta?#2LY zI+u@4D2+R&FJXIBBfzIE8;P!?)%gwal{txbf`Ean9w}UKVlp|ej-NN_zNwQ z9UA{rSNumy7KV@y z|3qso7rFjn5gmE(voHuUCr7f=Odhqz(cWFE56<@;$N!qCLCU6L>MEK6C+i;zQCD?w zn3EW{J3Hdp@vf56Clf*VL46Agt-#+2T+H9IV+<&4PTo_>{3ZcywntM#L=Pk!$;b zdv89h84&;#3q9EmSw(F8Q-sa8J@HiH2;O*;jEM;)5b?|f4(vH8L_3bds!}obfPh-5 ze;eQESioO8>jt^enYZjwWB*hKyt;wzeG|sv@h^2kpPCCqc+pN_-zcyu9nKFN^TfNB zBGeqHgtHELFr{DqpeS1aMHT(wh5RD^VM#a+H_yTEZW_ceZ z{6kDKzE@Ae56@3>o8OPvSn}aLUXUXsRl!!zA#)=(7gb_H(ay1m%cskN@P|Ck?Q`J& zNILJhocr&OHzhMk8ZwHKNED^|yiZ$7nzZ-cdtY5ycd`p1TV>16s?Yn7BzsHZHnOw2 zjjZ^c@8kFP{kZSj<@#Lj&v~Ec>pYKky}_OVCsG;wEnb* ze)G$PAG^k3EwQ_iMwYndc~Ha0Alk)E?CuEp;l(p%Vq-4gRAP6VzUrY=@xJizqchl? zErJQb8nB^2pO?8)PA#GkZSDJEcT^ag8A7%0$6R1hf0;+iGM;fX9P8RLv61}n(wDng zvvVA%e@y|^J1uOvTQfhTk%AG9@mLp9%T*1ZvaiXRkaRr)WagAJE5~2_?AlyBq83Oy z>f?E*BkeGMUjQG}y&@ zZh(?Gu9>mKDyh^0UbIq9jr{NyhDD-&h8^k?yHm>=iQkle3tNu6!a1EHxN<}ds^?hn zwGVyJzNmoqR`|X#O9O?oSECsRZ7hir%WdL-v z6oQVFazzCcend+^`dbA$^j!F2KVQ7EARmXU zSHQIE^O)kd5LiKe`0xLGWSfR>;d1w)G3rwqTCv+a=G7^-@4rOIS&<0;DV=0Kyf!6Hl9C#Z$^>I84CmLkWWFVOw}8OJ_;ID7gCi zt$0z+6%%GqzP_5WM;A?5_bzWZc8boyWM%MJmdxXa1>tL-Tx?kQgNt^n*<`0kVC08y z4S39q><;qV2ja2mR5HFJKm5-9m)HZ3RDg%E&?K{q8IT{o$RG=Ar-fr}+Y)}bOa|U1 z<-ws=e;Ah^#QJLW$J46{F@*f^l?%w3X`~6Ko)v+Shcon2Z4pkN9*v&^h^bNu{r$*j zu_f0845;_1yUPp=*Q{um)^3SEDW@j;+7O-osT3Y=b%3&RDNMbt0}IoYc$DadSJz5# z_@_abxWSPP8b1eu&*#IRHHxrvbTR)D8G^e;w$UOz{La zDNe)-vPr05a++Io-(pic(qP=HXb|JJFjexy&o7`_S2+~#YR%_jjsmC_=EH_$UpUs_ z!a^RZU`D6}RTs{}v~?={N%Jt6olHKpY)2^kS}o|0AB#Q2?lz$nIS@}a=$|qrCuK3b zj-Le6ng@y}-wD{BUW`da`uKW>C_I%;hx`4=)nl#&=OQ(^G}Z$MPB@dUn;3#8E&!A>q06hzrMz#6VCA6*2&mDH35xI z9OnO;v@?jvfU3ulAk$RAE?IW-d#7msRdWz(RA+EQuih{vy8tZz^MM~9E!d#dYS=ln z2&Z0i#USVrkIfzlr5#czT51Pt))fnCWk&c9vAg7I3sh>}T(9wNDh#H6M#gZ;B2V=b ze}1q*M(j?asE0l$o(j+1Xuc?uG7PKLp>Bo|e=OsTtzQc8cuXHW+c%z-$OOW!ZMiUN z@h@iEx19Tm5hz!Xi7Kg2dC-tr<~b!E{*oU)uA-R*FKgi%ugHt+ABTfX_VD+99qjJa zOt^iXSj*t$tajvI9`YiW?iT?#uqTY~(Cr5hNOO15UJ$Qlz#a%1sNPV7GQDSDiB+4p zuT>A69gAUWIl#e*<^p?ZjEjifrRGh+W%}OM!77#@5W7=2KMwl2mWho|sn#NPmpgX^ z?IDq4wf)?{y;%Z3pQ*y@Q3Cf~JR7$u7NA*qFI3-Cz@#3*keQYPBQ|_yn~zuWyVs)d zu5t!mvumUM+;yy=Pa-@dKm7g4oawjU;33j9yc`#UQ5(1MGbx?y(U@#_RT&Bsd>68Q zgOIIoGCO^Erew`?tX#mTK-9?mI zgGt6C>u}@AC?hi$pR&nVvGS+&@i78CSzS!C#|E^|?wEMH$q9FvNO9)}Z7jT@#^mbV zVZDL`KIspHk8fx39Ws6xAeWCX4$0wI%@S6(A`}Y8XG8hAPF6F!ihsWvgF;Cf>L}ji zhP^p!xsn8p2z6!{x{i-*Y+~TJL0n&ioFxl zus8hU^(-tj3?trK!VR~`!m<;2V71Q=#$EAdyT}jUe7O+sWzoJnuF3Vgv_Q{L3eL-? z!=E57d`r~F7GihfJp}T93=(JiPXfxrL#L`K#FyV~_;t_ZxmE7>vcgQ_@SwFC!UcEcT4lz2+XaA@2kg*68pV7v7O;rcy8 zY$0~H!-{5RGRqs<)|vt3;o;D=iSQmw#aNNy>LA(+dSx`e^0+Q^J#nV*tq3{N`Ck$C*vstaggUtqS+yJa=elszA0rjTNm_$|Cp7FgLeetOxXnf++P_6 z))m0>W8M&HYQikT)N!y=5nfbr#q6z};@<(f@OcRBojGF*7e^!s%4QSL;dL<@YnbEB zj?j~j&zpk=vAaOYcxc*PD3;XQVwMcein@)$oBp2#<=w6@bb1k_t{nmvz0G*BjSo&E zKm6rT>StU^W~)aB!NdW%V7#ZB1&6QZE8HS+LQ4idOnby%*c@g7b@6cbXfpUo8X4bu zg}*+Oig|Y_i~MXSKj`s_)#s7t;6xa_JiUZ1+#-V)((=&X*&o5nkB1xdr@ft&wHoOO zLp+DGL5f;<{yDwBoXDrJ>Vmj($rz|jE(X;`1|diFgy01xXhD3d*Gp48y1LBT;3av4 zh}}&bW(ddsY!@~6I^dI)Qrz08gBy4CWtT!`!Q?d(`1NcM7_~X_wP)s_@#1_8Sfq#* z5yk9rVhFh0$tH&Ok>x9I;v)m1DPNwBzpd`_($l9{_CJYG@hcH5RcWH(gb2A+1}0t7-IZJ}?IQ z3~Lr&mQKgVeJFb}P75{H>98IJPv~>C5N_!W0NCKmGbj3^4*B6H*T|w;W-0U89tO1~ zSx{B;it^$$e5Mc0LY_@U*>6|4Bl+R?+9gAbNdg#G9%en-kGS=z4D`PriAg&u_@z1B z?4E2ctg#M)Z&4ZSaAug2$6>5n0O&Yjd^zH zuwE*z@*ht%3)QmgEnv-s3ajbeQ*j}&yIS(YJ5Ti&BtLB6wPrCSnCe1-;al;LuM0}k zi_on^9Y5z!WQiNR;Xqdb*!lMXC6{>Ku`CdE0&{VC@-N#M)e)vyE>-Y_oM66Fv z!mw!#-0$)&c7H({+zyWcrJ`+YhUa^}KQkMLuL{K(77KY(i98&%qFT4W7yNp;GwrVf z@ySF9Y8krY`EM#*>y0*uZZsQh-~`eG`-MN6V=e&b0Gd6?9B{T`9bxc zJkYO}0|ko`-k=(W2OeeNgu+hVoms^??!>^Fv1u?^?K*ofhS=S$Bn;V=fP6%SotmvZ)GaC%rjQ^0E3v!3QPXhdE47Bx22)`J)w%(l#t>KUB&JWbK_y~$ zeKqv(U->rS%JvyhzM%*rvWG&`+j0D2xEHQ6DnuEtzUbK!#rl6C@6@YYNJ;BqC%RVf zizz?(`v z!osMTp!}Z%6j}y@!#Qj2_rV9J+7{rLVkLA*$YD_q!Em%b2dqoJvj4VHjpq@C`v+y< z0->FEEI!J*HYC8#f0JNoC}%?ZHGW+u4LzR4;PV4jyvM$iZMs7<52M0puXqVdDwV@{ z^1~mm^27DZz4_a}1Hk=vA!Q_J-(9gLd(xwY{fwv%f98Z5FY83@Jbh>;b{FR;K!}Bm zFjQwU22ejks$_}}%37@#$614I1At&;;&UI)XP&_ZSy-wTU><>SkVbMRb? zEl+Ns+#i>~J@UibE>~i|R}II=eNyZzg(7$nAYv(sk{|witS(!AlKgs6g`nWl58@rexcnV| z^fk#tyNtiQ>EtrD{a`q(Gt7d>+!xHUbvF-njKd!C!xx{r#3#ozu@gg5;J9-Z_pCH1%FTi}K##keGGJPzG05jv9W z;KeH`l-Q1foA*D9Cl9;gkKyFLCqMl7>1Hf-h7Wv@7QnCk-eC4Fo!5^K!avI78(7}W zd*UnD1oFdcouhqsK9AV*ki-1GUIKd0O2%^`jl92jh5d7!vdH1F@QVEKQ<7it{G$JP z4#M#Jfl?l`SQe_X^58l6L&IP{_EUKPCSRZ|vbra}7&M&U8>9ujm81~3X*#3~z95`k zI|eziJNtg*TR5&NcC9dh5aL@qADF_H@*@o!w_0Heu{&cGL!2tD5-$4EevJ&;BY#{6 zzP;?rZQ^F3SF8lb$`8V~L!Fq*#W}F?pL~$nssNX99>16wf_JWGqy3tX+;ZR+mPvki zy&387cKBW9bN>`C{g_C7nXj2%1A08C* z)+9x+o}UlWn{zL$WX|M=zfqq`K7%W) z-L8?l>`q3d#(3Iub(r6_c*Hz7^{4_P;qfNQsbzF?ZOU2s9 zv3TtBZk`nMf(;mx1^)=)z}_rj!~Op9n(=w~vE3iftPdhrcYl}_UI?aVJ)yK(m;D*7 zi3gt+;p8vQIMMlrxcJy;2n#5N5w{quHkl%5o}7r3hsQ_z%`mHcxwV(RB^VOB8}Pvp znj9*{gBu+1-F_+F-ZC7sJeAm*lWw51UIG_eRH0NN0RQ>LmQ&Fi^0u65#m!Vkldn4|AhM@kT!7 z)W*|pXJu`?Xs^jGM|eQk%|g1D4}>x=Z(h5>4`1*+{6c>C$8$`ePY}Di z6a)(!Gnjvd682^Vc;cH6UK}`$OUMs@{G$XalV`&6;vON^eI#BWb|+4?NA=#N;@~Al zV0Et;l=fJF_G=aEZLlG6%Z|m@lFGVbN2jM}GL%7veB{ zQ3`t0G;>K&3o{`<{GtnSV6MB5rMYzQkjt4^O@86FiO+DOC>@L>E2GjOW7w0@Nh6rMJdeSK{ zPQBY@S*@w-ZHDflJoOOK8c4Bw;ld#>E^N1@q z*!sdWh-;65Rb^EyGOUxA>Sbf==1?3lyM#|zD+jv2^1ySDAMA{BXA}Al#8H0>(eSD} zrnwH`viG#%E3vzn`%Z8qrcMZTGC=iJw3pgaz>xGm4XeifKf60Xet4I%wuYcgYxE^{ zCmUsedyS6?JvvU1=Pm`6ui8*}NR2y_A3krg1lP!_pxxzJY}RjIuvEy0W2@z1j5gKY zOG5FQaW=jqKYT>kHU@z)Ah$RTo({gr#*iQWzt|+asGf+UZ`AQcecD(Z`Qgt!jDlxf zl`K2*Gw&UjgIZz1STU-QH|6w#u0OP2WW#Lew@F|n7gaIhtOOlLxM9T{c|JC71Sr%> zq4BFd^w^aPk7~!^Ut)Jh$5>*=+kl1z5mR6=vAg%bjiDb*5oL=2w@jyZ^G7`lbhs~^ z{o)K}XNutV91XZssm}-bc%g21A=X&;!)pt|SYku~3@1PQvN?a*{MKcB{E2XMYt6*U zvJRf`bvM(P83(rirNH9zEo>zD;n!)W;7B`~t#+^F4(3l;Y+5EXUXGxhHswsloAQ?H zb8(|eAf8_s&sWXt14lj-K#r<6STz|jtparnPA$UsVJ;YY;F(zRUKeUKiXmFw7VHfI zguP0}7(un}ZCP{dC#k| zC4i4}G7O15!zPg*epEv$o(+h_?PqKF2TyYHk{^EM?l3SYD`h6@WN~|49_1SSaN1p8 zK7Hr_@VrFx!TBCg{jVn5FkB1el%?pmYC6Vj=Hkt0eYlod49jF0^f@s|nCm?W1v&>E zubZMy`7vwBUMr}sp$vmP%_;X(iQmH=u_;Z8sgH+Y%rF&pEPWO|>(tNqssc~!T=?}M zU+g_UAHB;J@btg)*vlUw;7NXXnY@pzX2KR;)E14W-lySR^23k#c8WE8O@!d3i7@%c z30B*6kEhK@$J=4iXr{l3KlSfon&gM~%?ttA&Bd&+KoOVnd{h(WV33*I!lv z8H@=ohIzL}Lygu2@usyi9(`Ga!48_((OZ{g4E2Qk^@Z@wv_D+_8_0t%_+vs=9G;#S}KkTN#s%(!BjCZ`^2a#wRTq0xukjpl*mOoOs+R zSlW#um#GxP&)Z_xwj@!F{P5d}-G$yThkCzh)7Tkv^d)vT8YbW)<3z#wxGjt$c4y&3 zPH%%wu~Nwu9h_-~YSR!5du__%_j|*zljO{dpf&v(c=6f6R@M+=i=>V@TY8L!?q`uxSrUZ#xq0oXsQ(6)i^+X8RezO4}ZT*iEA{v z;erYYZhbQdQ%^avXcKasoy!NC6^dYIU(ElF4Z#AP9OU!6xa!Z1taMH^Odvn}Y3)|_ z+jPqfYZJinm$Ei!?aY5e;?zTUcNBkNk`zn`Z4pacSTAd~T5f+?V9T z?08=|x!Hw1eWrrv!z36>et7*n6+ZInFqo1eg_CKHkawJRJ5L^q@zupBoVG&GoD&Vr z+!Xw&pD{IR67|(pM8*FEoI~s`F;^cSJQsz#3#P;4zEaSfqy-8KG`U)m2Nqu|#AV|K z;I;x^Ho4jlhS%qT(LPy_Mwar-&0*+wCkw|HzTsypYS>+mSg>tMg*9#0*l6oBd>bU= zUeg5h-+h?tLp!@f7rfpNBcbD11>5f2&AaMz5F3LqVqpev^XpCT5=FBjOJQ3S2zhyORui2Ese zd?gPjKfIk%21?D^_^p~cwyA$2+$BG}(P+-ryt%;#&r3u9=opM#wvD$)I+@c|B>LyCeGOpnfn(yei%xiaqLkeF+IaS( z8rz2MU?wktNbH?%J$245UQZ&Qb^^eWI#y)R#yb_r9UKvbBKhIh7Jp_TNjaeE7YuH4xvVo-2_xhR zu%>)Ang$D8dG}!W(8OPQgnB+$GCH4;)PS=pqjpy)d|y}`JKA; z<>V=NnZ7sME@PZ%=_F{9A3o8E=7>7;3JC_!Od2G_wL zY!`DddCgZow`?6dcRvy|V>4jn%yuRg9OcKz5ASW4jF~ge@boL!n5=dhTze4%0o64u zD&-A_TUoegOc+)yDB(YAWFhfX9w_ee1EJQNP5ChZO|KTBO&aaH+p5Xay0u`)ILeuo zPKSQOxzL@ik52Q7@qvqg(I-^I-l3Dglg>fM{-!W##l42WIxD(&xK`1`@$B7dMxpHDqiug8+Je{ns)P1a2zsZ)B)I?9Q=yG+thLUAXtc8FoLX47#}{ z^mf+eZ;yK7f#^cqrr#fJSP*Nv?hjs*^T28DUsltwgwLBCj*8>6@cut9_}2@&Y1TUy zrp!qN|CUS4&bNtk^27f=7LW5o4)VkBm>Fwk!aMTAZx3C~q7r^^dCy!_*dB-_&*J&Y zFlE?%yZ|x}d4r9b3ELW@j?<}gwAxjj*MFl_=B61&^2 zYL1ofmRW>eHirjPcPz{q58w19;-NFPc$VsttJ6o}+V!7>Tl-z1%drR)%Z9*g12g{C z!3WoqAAY32GTOgMW*f&+t=lgb{5N*9xoNApiDxA4JD-7@A|CNm?uV&oK(p0{lHpYH z|LUhJ{2=+^hu@0D%G*16O9*A83bR1-SQs4Kw}kockiml)d1&O|j{&BB{Ktg;FlJpL ztkw2}2a|@g@V;8u@}dY`Y@P9j_XTln*%&ZODTb>JRKg zia2RrF}s)=0_wN2$+_~8Ez;P;tAnHQo@+X8nQ)ieksp3}X(BBAo(Oa0PqPbO?(itv zblmwc8fRK<;qHkaS-{0?uqh6KE$`+rPG3ygmc@ok*Ou zwt{bq>Sk}`>F#C`1U60?EIg(+jwvX>@HsvhU1i4q*{KHew2R;jxp?`|F_~2tNoY)!`$QGyWAf3&Fux>Mt=B}r)jtIi)CEXxiYdhILf`Yw>}+&c5sC_i3;ELJ@=zq?!<-ynh7C#WXv&gf?pV9iq~0*8L_*&NtFLc!0B~I zdDY-{HmXks7zRdx*_CyyJoqdBr;a>A9>FLy<#0)&68KmbfY~n}@EIUgT=X27`)I3ov6>WEOZ*Kd9T{wJYg!N5WA~*VGM_-O&9lA z* zY;%l&DZ^K?an3#5=V>kukO{;Z-DqCM`od6yLinBH1-ec~>{!H5j9FKNgAUE0OlzBH z+)IyK1H}0qZD98VXCd_g?RKXApsdAItQa=XT4DAycuG07xRpk5{@Nl@W3oMtAaJgmcXTr@-G#_?$FwA~t&Fla3!DUc@>3K@{y)=gfI|sx1Q#lZv_m%ZMxQ^TT zM4{%u478fq&JV6X%Cxo+yK7IPeXpDyeshg4ADMT^%TOho94OhuAk!v&-u72GoKGJZ)7!N5H{L)6HjnpUB zc87LT3E1~mfr@>z_|Ozz{9%)iH5=sdpS}y(?UGQiDIj)N@SgSZBqxS%3^p%KquIb) z+<8C)yC0PVzcmxV?r|MEZq&+E_M~IvlPH`eyMbGhAO6*d9B_Od44-TYSx0IwH0{YJ zraK2u@3G~t8dX8-4EfYXyAdB#qCFhLaT4{@zx}kw1q;f>gRc$YKGOTi$P(s!3bMWy zGzImjpW!7=#H*s2P@M}fjo4l9)1ztr;D(4FozdxR5qbw};DklGY>4Oydm;)U&Yt!M zM1}Ej_x&-}I1kIB{_<}Zma#zc!%%4%X;HBunk zF&_Gd*0Q{y$9&T8Oq_o`0t4oh^EVE^*y!20aAkcU*ww_dCdWRg@xB1FcX^}D-HCi< zraD-p5W|UffelaJ3Pq!I@f)$by56>!xjR7I<~RXf6T2%^G6$8(OX_EqT3{e?l%omb zafW`8pp;=pd0Ht%SdD^;pbL;LO=AF-3khj|}^1PmfS{AiCxzWDMLaxbQWc}OgDksp3m z;VXWlIE(ttVc4*>l$( ze!-n)p=Hw;bRd4!-HYLD3srIN8WTt(zSZllDI~8r+VH!^3i}efqb22NB~}Tyf*fIY zI@P)db-<}tKmIOh7RtnsPmTQW@25Jkbyw#=>w7OM;MZ?A^o7k#FANei%cYW!< z;nh&gE65KovoIef)Bj_rnG^eTo$}9R5-g@aL%e)H9(qUz5|@)t?W6PiQn^9H!PvgltQ0vG=0YJHoD^GBMdF*vs=;Mf&GJK;exp{G`uQ;RYSGlpRYRn z0{xwJR~BNOWq;h$?aMT*{UMnCZkHXA0oN5vcnkfVSJL0TLcuFue|#saCO>?i|9v-? zuCM~nMsB$;8L#np3{E=C)uumUx9K~b;ui@G%U83lbG!Ml-nq1AJqUM?rlDT)!)MSl zkx9=+Mx+_rzF-KRv@b$cdUo0deG&hoXDRNF6!wxIzEP`4uu2?{;q=V?y=#uPL7Pwg zrf0E(o=FXQHjmXNh@TJH;wyS)KaA8x-&b#i{c^65K7+EC8S0>Me9^9YlZi%wkNIuegY3+KcnJL8Zz{8idF;Q$FM6b+ z_rqAc_#U#;%n?F>6Uvvhm&{=G>UWL^+4?~Y+DaO%R-S5*r zQEvQL_*q3>F0`WG<$dcQIa8!OJnb5vgfq?!6jsyOZ$a;XvLb!Rs1d~`>2!Qe?})YX z+Nd0<$s%Gs;LG(wc&s=OiqgG#2fbUA#5^2*L>5QHl&~4Y!=R4dLFJ3zu=J7|KJ{@7 zy3so-^z}8aKj92pM}GJln-jB<`(U$E)(cvWxWYYi$mKYtz;}&ZZ729D#8);j{oIs#F}Kh z;mMZ*_$u^;>@m?ig=&H$+j22;@h`r9#7fppHNzmP9img8GI{dDPr4k3^QpGr6*Sww ztc5APB8My09$tg@u}Z%Ve&K2+z9B#SyuT}G2BL>~QO$BHBLJFv!q`u$WhPQh^CX&B z%@6~AOwfR}RP$I+?ep8ZO<2^bhmMZLIFV{21LNu9u$RVALhP=L{O}jF2RHN!n}&>P zDueUmuwq7;kZNrYyCtiq0gNXD<>5gdCWGqo2Mu*p19wm@+$+QYtCNojf2c+- zqgwgO2W>ceU5)l{xzqj;3HGI$+WhS-woJwkqUG}8{W5t-7`Kpbp&Gn&d^SEOKYUc` zHg=e5bT_KiGZk;LTS}b&q#AxV)$+k->-c?*Hs&ms0n@0~zy7R}1*L!H6B2SzH;8sS z%N6pXqFzu#cLc4qv*EWyU@|vUv6}7>OX)6QZ7$D!Q%1neS}E-RWDg(Y%Z0N?$Ds%1 z)ST!pGO5e2K|O8?e4_7dY_~B4iBm+I`GCFX4wLsr4=onl7k1IzW-Q%t@;x-*=Sh9O z%Fhd{=?>JJ?m|1N!r0Q70MPHug*NBE?AXg?^x0|Oz#ZC!G{1w-R@lQ5=x&ujcdWn1 zTiD@*W-g_>*JBos-8Qv6+wm!D%g7{Wa|9eRFK10*zxeqLxwx=jApVJt=N8_5z>e;A zJ^j5wb%zm?&Q-?(y8Efq9dK#tGjR*~;b&?7?6raR;UlT}t> zt1ZBn*q!5cBiJxTDke+p@F3kin_@>|>Z0F*$r)FOrn~9K9cplEjRg-TKm6bP0z4nx z8wbqDU<&m?Ab*Bx-NuuPa{TujtMk zN_XeIM{2lC*c%!k}|*DU;nIi*rxpq0c@)^giy(+vqON=}z92Mf>hhlf9w4 z`fs|kN7LOsJA{k367|7{?({Zvw?AYtNHF!Egp`NJ9T!dU-SRr?cIpLK5WCwfjD^$I z)uK$SBRT)2IH_$I9=1|pVL7wFhk6RRA64LCxC>V!KfFEl9Lkm`p!|nF|(Yfh|Xv7@vOxh^#AI})gBE3rPUIcaM=w)uPU)_>aplhujR-A2h?C2 z#Qo%lZ>L_&z)nj@%`dk;vDOT;>HSq!YJ#h6^aN*-!P+2Vd*p{->3=~KsmF7edOf|F zCTfn=WwOIOL7sX+q0|%FGdzedyzGw)s8@8VMg|AhE@8Wk!y%LW@Y3K{?Ax84Jc)Wt zgZ)!+a_c2NFtw45JDd#8)QdWne2|SMKm1GTQMG$S;_%ef+%f0}dp|1|1nOmFy-a2e z)Z0?5E5N_xhnJ2v;~S~>)z7I2UZ}W|hoVymb{>Vds7Gc_y|R3GkSDX#ZX^ zhc`PCE%fJ4% z9^U6rWw>@RnHQ2D{*DK^wc@GQ7rvV1xkZ9C`QfA4WA<<8K|Z}M9$y|!#)IUC-+%EE z%c0)kGU_2dd$yAWdc5KS^%f1O$7ox>ga>Vrfrhj^h@>85id7J^H0X~zD+T+ERO+e~h`Zzhmq%{|WTh!AWM7_;P)Z?6WTu-!GU;;tZ`~0}k3~ntiYfyS=iRQ%a zCQ^^oUw(t|i+ZLnR!X6%Q3oa*QsS4Xr)o*P)lnUTFyp=>J4HR$7iaTf{vt(q9A3;@ z5<}39db8KEy7)-tO{{M~G%O)Md_@0NcI@E^9zi|bUB42sT=g_pRJ+YqQqOlP^?nby zY+>@`hyO!8;TYLawEI1e+b>jr&C+}rNWJ3*sSAsKqk zaH1n*Wt84E%t!N*1w(X(5?8)?OuK)vlNdR%Cs zUiXVWlsy@z1rAl3d>{3~RjDWbLvH{ss`O>C)GG(-nFmtu{8UCM?NbONe?}H+C%@q* zYigKE``7u{$1w z`=c}X;?UkukXHa%)bqc5*n)i`2H>hyggcsC@x|HSVy}piAVYIL=JV{pbfZ+bMhro% zwHOPDDQG;dsMqhF3a5!RJXAFXe^Y-k_=gR?(=5iB#3cThJrh=g^e;@Ri5zp#~anum&s7(IF$_>kTE&1U`5(_!#^^|Ly*0Ot6@i3YE@Q!)S zEMrd#ZzhH^-zyHSRrhen?qIu!v4jz8dH7`+QzJjTJ299DVlfdI#{Zqz4;cC3n}WQc z&pmy%!bJloa+=Bb<4pHnVmicj@`>^2Edm&1I#p;P_H&&WkTbEMPny2g7TT7ukrr=$+{x^45YC0K*EdFgBM1J^gn#+6dX#joS9}`=yIbpLg z^=gO#_8K^Zu_kx8DJy}ynggM2tULXN{O|y=!&%jGxW~MN#Sv5dV2}-S3p?4C%qo73 z{P1_?rlH;I8+`s4Vt2QaAbnQ?Ki3hjOz;zpnaI3ur4*sss4iL-T zWK;-az4}6*r(yhAN&p%X^F5Q+!-Kn4uykUAMKfa3UO((uu{W=4yj0_V|g$n#Qz0u^0{tQ$c%Z|AwqB7T8Rz zJg&kBEmtfO9*(yMQ(|{+#MFa&^@wp%Gg0L~3BFyghNo^@F!sR*Y-|g_tyl?677@EU z7ldbs)vqY|%GYmQ$ATY5!U|&f_l0)0Wbsk{?}h{nB-Ve`@(h3Z;2J}nG>9fYeB_!M zmMwY1S5QXa{-`jVomIjwQFdSkWeBcU`GLm@Z+7l4WeX@{uqToB-ObhHr+c(O$w&&} zlts{bEDGN#o6tn;uGCRL3hD{ zb{ms@9t|~=!I-6Rn+bpGxfq^=vXs%-c;y6F)@fzYJJUh#c@!M^zL6;`?BcaLIq3E} z7+t~&`M;F;xJ20x8Onf2F4{7u236cn*^s?w-SGEvCEmJfIL*OGA)7KJ%Xy_xMVS)n zcHmVLOMH>PvY}s<834V%OwUb(<<@3m7iChiDVs8lGAakJT^Ab255I%5E7K^$!eVuK z#z{{!iK1GUGA_e|gIFhJULs8LU`@tf)_!>jx1So0J1HA;BKHM3Gjup>Xb)Pv^d^M3FF-nsZ?Qy`u>7tbwYmBE%W zJF=AB(fK-&DUcsNlrlYrlY%a14)d&+x%+DfRHY zlu@pC3ah59QlCELkzL-+s)|?hTa;xoq)bzg&m+DpTq^w zkQQF1ELMoMGrmx}AZ}he2I`63EjmHIh2y%yb;@uB6W_Y|7d=%B z0VP$U`7sAHr7YNZ%7j_H>`U{RvnV$&fj@T#!K9&1+~ndM-1$#F{+q9eSGN|kBa|_F zdOaJ~t|33X!6rT?JQ_`Bq$7;D%YV0?Vmm3LmPJ{$3tvt#efhgwnf&nM+oI87{1!ev z|065AOg&-p!}~m%$GRx{Hiu=7%+ulxm^*0qbPZ_!Cl$FcW>&ImF>frq}DbA-X z-Iwkv@q_~HEg^RIXO9&q796u~q3qpU`fPtIO|YVIkkDI>!P!*GA6AWlrM$Mh!%w7)U#ru>wf6Rhl zi9rS%l-wIHPv+qmnV}4Mcx)FC|fzvKo`_@y%j^kUC^GgmkaaN(dy(x z=21<%5Iz*ZH`hL}W>GwsuL#6GzO)0x^B1>tCTHfA2q+$z2`9-9KiRXECsF3po3fu# z^P70i=1Xh=WkN5x#zFrtlo1Vj!DYr|QI0JfN8DS+ulW9D>&NF&9?Bnj=7up4`k`ZZ zA#GPMSj ztqmVt$k#s!#`mLg&?MqBxBFbl)E`BGbYD7oD_dF7%{s0+G!gevX7`tN1J@@%ye(yU zKLo^peCRgjlKh?={NhqGtT*$rG$b&27!=MabNNJhHCdjGaI?4*)7~zg4CslYg zWr$a}OJNaZiszf`7v78+ixHGHew<;Arul6RYkp4#%ELqdv6CUI^iRVmD*;c=C;#pO zeauLz6CO~8`7vdgd-c(Vf%clze{n~dzlFHE-$1CM(`@PqE3J>>6vK_j#_;3Bbnz)=%bQJzC->FEb@SSUQp%pk zQ3kyxZYXFN8u3e%O+5V=EN{HPesgx>*RL>cyOpB^?yWhFPZi@-8TCXSxb!Pn^R zV|V_Kr1OsJdH>(P5!nhMTO}hRC8=J|hp40wqMeF{_SCMo5oOmQGla~HLo$0kAL%4} z?{&ot@_XQkq1Mq1A7pF&%45A@vp6?aL&7q@;0qoHlRWmL%NA_)!M32b%Ct z!ceZ1*FAzbr$sz}=y6wOY%3M^8^dYatw=H-0mj7F3)0yzmNSrTsLk6XbpmR|Fk~LO zA^VUvd7fyrq5*dRAKn?+2-Ve1Hg4IPI{rx~d+_1k4A&P4&Cto*KZ~<(IdY%rWop-K zbbw%QTV<_~Nd9db+voo>8H16{2w!m9);7VGyrJFA>eYwNS?p3DAj^^EicYytoq57h z9nrIgE6qga<5+ziT5x8Z^o{pm6J$c}Eo;bUI%SL93;d}8G9&JhuSL^fR$L6yMPX87 ze*Qa^6=YieUSKB!PPaj~R|ZY(K9*LE(-uJ+y73?E(;t2s$%*#)>MCSp%qqYR?qy9| zK6~3v1s}c%_8H3#4dkx2LzKrEM4Luw=x3=By%~E+Ek*{%x&po&_l`VutASXDjE+fQ zI-Rp^K^^7<$O-qnI0;#v`|f{a@0*K6d+_0J8YNOspBkb2X}i=KGo2@Z51(3iMoyhm zCFUU`WHBO)29Dn?{tka2cOpY{>+Dn>JSrcZ+&_e=M>3T!_o0lrVIq7?Gv59tjT<71 z)aBqn`4QP9_n1s-7U)dpH@#BUAiGr55nio^c09PBw;Ja%fVv{%^s)X>%IUS_SoLBn z?gAa)W+RO$|EnkcL|D7e*A~P3;?3`Q5 zTj@Mty_kMGnC^^>qy7!9i&LMD$Q_>|I2L^PpEr(3>mJudAToI`0)lCYaiLgH@K$PH zn!(WtQ~1z2jdV+EL?@8xt2yOP?erW)%ne;G%g^8|q2pNpX>+;iU{@NkB#Rz^51*K_ zNjZY7U^2A3f_K(zl<`-2-@E4Ii=QoG$siib2I`YTg7-&j*aF!@Q~i_50b3_J@(lgQ zx}B(^T{r2`VFJ%u1&)wKOJ3UEQ#cOyqNKzWI#g1RX0KZy|3cPr1Tv4c5zpnW6I;da z3!!9>Oyp5One+qT8{ZSf1;@g8f!{tkdGdWR8Cl9@lcv#{tYso`$rowbESdd~xqQ_q zTE3jv6j{DBYJyDWBzp_de1RVSYoEzZ$ZYCetyA?z^rX&zvS=pw@Ogz9O53>pyaCUg z<`T5K(Y8lsBLlh{+MOdZp>Iqg)NbIzE70yPbuwTh<60$C+XeRXOe%7-v(ATJO-v@cW8AvSO<8#UkQfR+d@eG`%I{-hbfc^CY7II;!H3^6 z9NOLd*P;~JouCg$vgowY}h85i5>C^ZfW9h;@0rOC$N=&9-< z@ZmrA%i=M=Ih=YB&Za^vr6}5l?09Z&skkie%A-N?-0j{p&R@S$&dm8FYK}p> ztMn!Px$&Y>17;>N=^tKC;4h(f7P%eh)Uyl;In?@j+9t4x=Q9mwhzB`1(Y;3M(GaJNiHmVYxc z{R48UW&WB|;@_9iR97*bqO?jxWyljb=u!d?0v~?TyCq;G{Sq!<1+0norg#4ah_zq{ z95hMi05ApWKB4E#z9a1fYrq@KfgZnaDQoT<@_lG`Uxp0lfA>17k7`ZPTdAR6;KOV5 z^vX5C!}!fv=*irVi*xgoMPMA91M46vtvkK{)A8>QLuP2eQB6R zCSWEUs!5XmU?})TC!wFSPVT<9QndCDqKRNFT(!6@(u&LE_HEGaz+#wVB&B$HNi>`l z3r1fEt&ZP>eMYTp-)9E*TH((dEa%A+U^`p@gD479uE zp1rwn7qmODDYlQrto))gX}@hJ=dvqT41_NEu{HN^>MG7?dD7B)sniRsi#@}#Wy)XJ zH}{*t_rQk_$tn_tQ}Oo(Bct(`OCqbilntW60@aD&mHW$NpWe5{J*{}!b3KU4udYU~ zdYzmQnZ*4({n+JcnjD(lm^{JekolA8KR-pJ-e|*5!S1-EHx^ zNP19}rxYLQ#~bmv-0oz}nlJ9s8)T7J@0# zRc2CmFh=^`GL~)qCZd-ao!A3h3fl=*$~#r)_W@?;^h)lGbO z=%_IHuU9j=0;WoKQ#X1!*F-!9Yh@LfD+Kn+p+S$;gWnA33$(k2V6$*PFU78{DXaKC zRe!eR?}jUE?yRyRotyBft;P3lbC#M9)=M3}k81;auu1h#sW$z>LuYJEkf$UZCloUZ_blZ}Jy$erti;eQO%`3619&WA4iNYxaqKgTg6zL=>GF zT_HMy55EhnnqO|AT)Mef>PEj5X5hpBC<>sz<8sAiFmC*hq;R)wo*cQ-Lq;04B9rs! z)|xU84)4}+2Wisfc)ugbsL9u@a~L{R^^5w!pAA>rHVraTPB z&;_qx)^AcMkI#575;`YQcEl7q?XMAE!6wqGOhqQ%o!xIZ$iJTG(s(e8Qme;O^F8pX z{nM4}pxtfWVUK;%YIViEzND0DXh#k9?iqQu6ACRDet5n)dl0{nhRR~FleD4Tg@F&B zw(6w1!_kTNJk8_-L-pBpl!3?tduee&I-N9bNx6!zEV<~#YhzRR>AHICTn+~H;6Q5E zKaql6pNr>Dw#sU-ogCa_c<-q*a!+QZ&_56bK2;dqow8T>(E}L@Hq@}O)A)z~GTA2l zoA@$5nJkQa>EOvI5gO2xHyljk#o)sSG_sJ5R_f8xrz3hFc!9 zX6AMd3s!EY$3*!xzVcwY2*Zk)}j|tat`?Lr@_{mR_;;*{rd9hhFBV0$&&wLqF}sYk`}DP#-w z*U~P&;tE(`T~?;^zRnZ)TQ38dEcB_eCX+(#oXBzVN#$R##EPKZ{Z|fV=1@cRW9}eI z#y-Ox?6GYN^2?)Yt@%3UMTZ-GdD@6IO0&K8)M_buHp;uwtc%)Gb9OAh`zwPt-_zx! zFAk!`Fn90(QmJZYBTApCk^5q%a1Z??{x{^k9MG{)Eb$AbXD)Hn{Lc-MSbIpOgQd3e zYXsi{A3nn1s<5d=C+NLk+G?|2{0x69ZD!5jxnQz=`)`(Pyikj#&P=5<0UlK0?<`)v z)!~R~8QeN)9On(wk?U@Ep^q`(@`pH3tkq_vk?6xz3}(w-8$OUxR<3=t8JP^=|-I>`?$Wen=u|e8|fU{6`YR49%3CBbpc@2 zEm&8dKF!LN+FJtn2v~N_{9huYP%PGXgwi32Zs+m~BF3vi=7E9dG$5Q0EZ8Rx`rH){ zz{tCHZ5nM~Q6R>F58t~2dBmf>{KYX|4hv~QUDDD>7p%Ple_09tt?f9ib0*(o7w$Xf zr#f(Y4=RUt*DhlOHRzh9lz`pmdlP=$WmdegHvib}pO&;3T7ynYQ(CjnOI`YVIFEpK zX8{)A)r7}NxG#7qv6<9*W_vQ*W+Gi$xpD7zXMP1x` zpUxtxJTOFA4CdkoXh3D5LwHkLFWW6WtjQS~kvABP)=l!&M(;=RKfBQh7uB7o3~MOv zcN$BJD>G>6#WsXUq6`M}F{fS{H_2?wjT@zlP+vdN4o{-+*gA3J$!eK&JBT|si06Mh z+?M{8W#Zqq5mXI6{FTO1#5BGvGr^pkF(rikql@J2Rkh;p{xisDi9h-G%@)(Zho9RZ zl}FU`bT|Eu zZ7}_xc6V<8`tIibvIP#G=0Lmq;L;mR?=rRWsv}&n4T$7wPNk;O>!9c@ZG@tymtGN9B2XU?s_CS z6^GNlM>#CB(B=F%+HcI2yxs$m~cSU|U z(3WD}WRL)pbjGa)LVH$s-f%jL+uM#rPjH^P9PE10PEcDL2VTSxPp)tT&|bmS2odW(S}6RBN4+#mbD`{2WmG4WKtbwjr^_JamimULC8b@}VfR-6UxZoyI$9(O)R`DQkf zlEJVw0?W38!!Na8*ciV3E`!tmYR8v8Sc#2b->x9=)WC;-wguYVNnb8LlEiX=;p{n9#U2x4e|jZ9>s^7oab-v^vvQXm?Rrqo_|2w7X1W zw!_}ma=5~Kj&FOf~t@2J4He9z!!)r|Xu*b&|rJ|!F*+rp4q`C`@ zS)(HZJG=4|^9(+5SBG=iS^SFfpoVs-bPjxY>n^k9oTmOfE)CjU+H1Lce33|h9!y^z z$I|mpSH!!P<+3^`l3!$@?@mRM(&VP7zax%Pz=z-Ra;>P#e=qNJPh#Jve!SH#U9K%? zMDtFh(k>Hsir;A`Dk|G>?Fz6v%E$7yXl>bibvGJ-eR{|SdzzzJtt4f&5-Rf%i{M z=UDxg{4?EG=-%?8c+(Vm1U|g(-379dbs#%{Nxd?xMxL+PE*9SlrB&l&Xky73k&sv= z7lRLCvN_UQ=(NZw8 zzg-_F!V=o^T(Gn~!PM4y@=EpWX+R%3XlV7{cGP)GxRU8GfS*CTTdO^kLw9Z*=6_=- z`9epz5#FD?4Kr1(vv%zB8~u|uJ-K`Pk4o@C7b+f^NoI5OC@^W(3BnEq` zP(E0;Ren!y5P#Nq{_VQD?i0>k{GjTbNZ6FrD0tMA|74Ry+-@$dFs4`$o0>m}v96({iD zM&r+Deqoj$^Yw+O@#UU@^>%i|0Z&J77 z@7xA|_w~g#WSLoNI~_k0{P66EpYh%BHp(PzK{oi={qZ|EcdJ@uF!SGQXYrv9&^JGJ z6}#{|t2~uXopf5!`_CTo4}Pc9@w=^9TA!ai9&bQ%r?}^Xg)8%3r4`zR7d^U^E zoRfy&!{>qNf5kYQ$|Cm(#%EcD&vgFzY1}rYK#rODMYL{`OquxXuQrJn73c~G!!xlM z&xT2mg`8H}jvjQ)BvU*)u5CXm=kY8JeGV>v+z8&+HB;4(=uiH5=KjmEBGcFNZSLY( ze2Qn%8qa2necnpzrgl_|XSNNV-A6UA)FC0x+zXut-SKSu-yA3^@vQ&+FO9}KG$Ws_ zVY2ONAFhY*#MhBO zPki@=h1Y=1n#i5d+3>o0k<NbAm8*qkP!CB&jGv#7VzB2B| zNHWA(Q?;rawT=a^*JvzjRb+4y&ZL_qcH%zHD7{~)@HaN5#v{|^e4Js&ah5qvs*{r& ztPw>xjE(k9vQ?A{2q}O;ylsD{5u2c2p;47|N z_$!?o<6Uwh)=iwjyQRms6sla(fU5A$8H{&NCEh`8Ce+H;MVo}dgAnq=JE{zP_y-f9 z-J#p$$+~dfy|PqVfDiA8?h`F^plIP;=bHLSs(ANxbn@fJ^OEEx0R5;N0S@g?GAXyk8ncnE@mSCG*0I(m;t0wu$YXQ zVDO`4vKjSDDBYLJN0=G9V|Msq{Y0h?E)m_%PN!vI`nB0i=AFU8C=8M9Bk zxUKrNF{C&%4UNNW)WK|&@(r_69JIUV{VlnXPFver%u>TKQ|+2#La)cLlTcSuUU~iHdwb37hjPWhiF7 zkKn`aXmeMNK3FQEFbi(OOgPV6inWa|N^Rp&KJQzNLJGKRwfuCU(Uevu0ok`fGKE#Z?0<-e_H=VigwvMPaawQ|o(t|Nm zYhI3%MM)k!MM-5n%-pT}Ws4ll;8inb(BvtvMe~>BlXCF_GyGA^ z@@3$|2RYr8qtF304zqq=%=}HkhbN;XntIQVwl+)`lW{j#gFC`Y19(6p?Bs3S9SU)W z*oM2r$bs5oCAz21;f`_U-AE2;l&{YE*q0VUy9>o#r0%Du?I+w#UVqc@wS5CQ^5_ty z$84fzxWl~NZA6zBft!cB4QQIY0e791Ck%v9^#t05JJ3Mfh2CrokjW3dcwWC0?uWaQ z*Rw_967Eh(=-8?^wnqHKoyr||t90D4J}o{YzvowpDBQh1h%j=+UF>Db18HQKz^1sP zHOE~oKH!HK>Xl5p@_eW)BuwbIHREBp+vTAH?CXkw(i3;SMBM$%aR;my^h!B2z<`}` zH*~`t@qLMxI%2{Aa)Wjkg*)V*j(;5+zRZg6KfoRe9b_d2S&A#}oTa#Xc0os3*Mg5~ z+X@$s#NAX6chot>7Ge)N&6?uw`d@Gp@^Fd=Ptli8p!;m^v@gIbpNzttI2w23n!%NF){ArE2JXxWxH~t*9a^*UxzwO@?Ot&JSP{AM7Vg@fX(|8n zqk>K4A@<`gUWPllXgYx{4s?}OxT}A`oqZ1O?z`QqmFwGk@dVuI18}!5ve8vL7Y(9i z(6_2jn^W`Lvhrit1vo&v3;5lKN7`&w9%EPV3OfVu&0WD-Y9Sr4OYp@`VLf&W9}*qK zz#Hx~4m*d#3$$qa>sc~eGlk7h&tMidmcAN?Ua0&BacnvqmFz!H1uplS-?r+)4MdgLsV{${*}f zjM2Gwx~jRnf*ngo>{>4Ev8UK!o0RSC`|@qB3gk4WvvrhEgyqkQ69Z+-Zg63f-lsLpwB-(q?66}f|7uTaB z*d6&C2;f&~iG0NmJEdz|g(r4QS*0-~cApc6v3t_r7sWoqd4+Dpf3ZXRb7m+vn4WyNN!y{64DD_Pc5QujMyO8Mz5Uf*gRUh5eiu}$ zjKyxQ7j|@mLfVsm`$4h-ySqi$;dRC?Z~fCS;k?m@9%9Fr81_x<2v{cj-<-+~^%MB0 zcp&Hc?-dr<4K6~5q`iBUa6f%UhG2JCgdO7h>h1Evyc#hOyG6o|aqY1MV#usNGI4MU zr(*}%%hp%61t0!Ya5}9moj@-f48$+&ERSG!IjhEr=PbOa?!iv86?U5#qiJ)Qx#E6$ zAj1#Oy4Zn!$jP%^Wob=apxsTxj`VT;HR^rrOy}&(LRUmLj^C>-_F|`Mhux|#Iyg7K zvXdLIbA5L_m3LwX+h|t0n2pX(Z|r7!VnFLu2e?0lWE``u&)?QZ33@eVuT%=-Sc?&mCV30PP|~FzR1Hayfb#<|9nNa^IkVO4!d$^?9At2cfKKe zp7^g<0Qeb+6chDQ_!k$;X3s+SN+;}N&R&r5&J`jDyZFqN;dCsuROpYrD;=<_&&JL^ zb?HhurQnmOC!yU*Uz!#YFOueIv-Zd|UV@!}_I@jQ8XACKT?V=Nk0I}WekzZl5&VQ! z5Q0wGJI*<32{eSZH#P7eTG5B^&1?%tTJkYy4Zqu%a@Zg*Wh**rADC(=7Met^`C~N+ z8iip&CI_I?)-}gO9D;^X0WD+ypyt$OTCgmH#_>0_j@qan^1ZTDc#oY*rqDtfLKCTk zHd2KS-Kwl;E=;MGop+oPWzbM|sM9Iu(@s$fZDkcSmLzB`y&o-+5t?7Z9vVzKw3tp6 z0YdR=&dPvve&9Qi|G8l-r$f7uYcnama5RnNTgonIJ2Rm1$U)$5nK`Okq5WKd1{4l0 zXlUyWe_gBO_XixUg zppyKwDM%|>dV2b?Rb&zuXVuB7+LdA>x{W0?tZd`k!VY}+%jiBXKnL;-Jt<3|c^!uK z)dwBPh0`{P%mua57uuK$G_onZ=gGXC4UkR4IS38yl(wt5j?U$=4bjm7jjiZ)J9!bB z+sT1hlziHePVYFNjQ8x#iO}YD^;5We`j7He(Cq%F-R*>iS2XWldFULlC!p==Ozh3e zYY!-Y&BPv6 z5opQ_!N_p-IQ~36t3Y&1fpL z>SoZaImASaTHKz?;^AR{rv3Q+BlQ9_?v0%^bh&OgW$p4*TIiedD}0{_o$2u%*4Vf~ zBR>VLd|`nJ{phJt51?axD87&J(A1x4{ZfvfcA>`lnWO{Fz1~$TX#_s}v7|H(@@&Fs z^Pt`R<4fnD)&D)}i+EL5AV=Ms#_OQv4}hlcvtyrlY#t7!KZ=Z?`RCoaAmh*xzh-?*K|c_{Anqo|obwfA?!etKc!Hg4f{5Ons4K)`{OW z2V(+Wg#3r9dJWzL|2SxK?-lxHqob^bcflL8Zjb~=DCdyv8h9H5aZZ-P>ri=gv#Jy4 zz{b&8TnaD5s5BjMSTm0H1fXjPUWscPoaKfv4?dES%FQyhctwlZVj;X0tKqTeFz&55 z=DlA22k*rJcrZ3=U6n2dN5y}?Bd9Gr8XwLb5(nVf@R<cJH$YA z>otKoEKjFF@VJEd`pPTtyp%?#aO;)zc;?v!VgNib2jGoyv3o9Veb_2{+z#brw;1f` z&d4!~Dn*k+QFI;Nng;OLtQh-1zC07pD@IS_TjQ6>hw$Pw03W`84`2GSJxa6*ZOTL8 z+35oB&WLB`GJ2gJdBW4<0B_Gk!&+tYB5GfFYnOl(Rn z+R<_$yh?IRGS3_PO*(0o2^C%@D|nhd^n4)lGWW`($HG_}o~JnfO1b~p8PUc!27Rod zR1e;$#gWhD&q0Y?+%J%?7Gl@9s~#MwY$4B)x?r?G)NYp+iy z;KfRZC(Eb(N#)fQBOd6d;cf72Z9Zb4?uLgeAN!2*JPS(C`K$aYJYMmb7kw`E<)~R} z6#WDCWCbtS1bD)3ywR3}FOB7`@QNA1GnU%kQ3NRN6eUxsF+64MH)!Nqc+BeBC-EE4 z_tLs|q0k8orV*p#sPx?pVgCJ)Tn3NYJb2Z*!?Sj2&{gsLbu10P9Zc){uNNQTZA+Om zgExPh!h7$7H4pDwZ+PIceLbjZxU-0aH%^5|ZW_FD_MLR(y$4g!X3U=0A-OOkU>#8r{%{vi;EKRU6@7*YsJ@D?mhlj5}ynIy; z_2tdf@oWW;-?jFw*zKl=xU$!iW|gJT(G~S+P=2nw3~ylgd2}I-e<`M9kWz0f5@{ZP; z8a*ms?euvhe}*TvJhVF>@@yzp!7DrFcm}nHced|(MIK)>nXmp%WmkA>OAOORK!6`T z2~DEeQ|rV_cyQ~351;!dj+ZpOEqh%q6Mx{!$}BpB^V0H_)M_@Ctt|b0zOR?L;O##JR5MGJ>Z# zu-^eSET}j6!)yE}-j>>GZrkcQ4CX&L;|KN`%*$v0wykfYP%CJ6pB;MBkMJ^8g@^em zyv)n5cjo-DdLk7b=O%y9iw!>f@eo(JR@ajo{7B(L1r4|_`0$<9`%|Fd42tou6)W>M z$<22|I4nJuL(X23cj2u*4Uctqc&%T}EfbLwZ_8qMuy?_Wousam&O7SRQ;+iC>7=P|1?xz@5H$659k zd*MxA+$SA(=H_&;TcGTo?9D^rVIK`I`$2fxpPmjRi?a!oI{JyQF)Wc4@V@_f63tl| zr{uUT)nXaE@q6Hr@6l+tSe*4p?t*t-3m*EZt(VH)tA21K#|#PR>*r|41=tVTkM&_Ji>7H{0!@RvDR+J@$hI@c1umrduAm z!-_3$Xm~d=08?A!D(%6CKl(U}evIouSLggv|3hYA<6CeT7PaH&y2FIlmq}EspxZ1? zo7PrAyF2U4^N~Hc9R5k3MiwFO3GTf9@iYn<1*2`H(hQk}h+$DYt80b4ge=1_!&u5g zwjnX8Sgcs}QuasYVFGqTmlM$8!RCLSOwk$O3u)2EK-?RNTd9I_NI zk*P>c9YvObs`3q43yHn$6KjRtY+I{6V$JA(nG82%Gv;UCE>AsS!#U9Iw)E=5lg)Q2 z$C2d-jLf3e6}AciGZ&ZBZVPIP_g5J*H3NnXQPZyoc87M?1DTuTF_)AaWN_9$%A`Ca zecHaoK+Z-+$09JDZ?$R3ch>j{t9xEF8(E%#$n=bQwLn%YU`!Y#BD+^34OO2fFHGho zf+KV+jk+R>bp7B!aUR*E66Z|L{e`Sj@hde6*`>mc8j3ElqesT!$~a`3y1dk|9-n5mjxvg7T)vN#A?si%`ZDyHDWXN<_ClNoxnJKI9`n>dLN zq~nZtXv&!m(W2DCmuir~x&=P`vvtd4d*5knB*7yXdS9A@4^Ne0)M0xR-LtC{dy(m? z2R?kOQ=we7b*pSU|GDTiJCVwf0c)PMK-@Y~kAsm7D?&zW`wb7tb}eb{+H`u+asu_e z)>XK6>%PMF!M)t$(korHe%a~C{YM|*9>C#;W-x>HLaZGwpg4~39@Sm z$grLIzDb=V?RoB^EIzWME8jJ0Au^G9Yqm6l>Ydi5p_3iuX=LKM&Q9gP$VS}ozD8st zGna(y+&1vx_c#>FzSD#G)yO!W)!@2}{d7d!$d90jZzISJ8N8>+;!Qv%&+-cR@U7O% zp#^WncVzbFCQPB@a+XNi0OmY0eSc25^Laf-xw@e)g@tC&o6vD|?P+t7fGl7^Jnk4C z4*Xy0Cba-r!9-|xVuKBZ&fIU?vS`DvO4H1KGnv9ZstI3P*wI z1rCvJCvM!Xo3KSzaneb67@D@Cn|hvdz;G{~m56?o-1=Pq?_80NtmAlO9tqjU{~i|0 zYk{F)ZN{)JvXPa@Nal=-q9Vg^dhEAP)SG-?IwDK?=JYiFJiR~~Ecqe~khPqF%w^YC zaUwRY2{+TEu>mrfEgdZ6N{jS*W;Os zpKV14zs_a?k?nc*=Yhgv zmm5XYrqK?8Uh@-S(r}#*f1a4k=9WL?{E&PRiHzzZWK~BYv-&Q2w|wgq#&eNnHAkj3 znE9T^~!om;S75cayr z(%uX&vZ^fhP!mBo2a+hlSY3@q0c$!yPGjjY8eD@0e{cK6@Z`ImphB`*T~dFB?q)nfe-^4ip)R z++D&%PV9yEBeM64Mh1&0Eg!OakW5*_eu;3?rE(dv`g@SszuX@kGj=5+6j}ZQ$nuql8Iml>^4cK zbT9>qU-T1~XLJDbF_V)wj^-7AZmBam8d96V;7Wo?aACir!nLNn4%(e}Z%bC2>e)^S z9)_P?Ll-hlsQ#i{H42P_%U~V+1?GWl@LO4!Jcjy$gmbZb` z@F9K^_8GO}#{EP}YlFT!%Xy*@Y=?h-BO8|Q$&Ym1WY12m=~4X*Y6=!aWCMMn-JvsI zHqGLtU`1HHI-qvTGp3`^?z#<9h%rB=2+g%o!5@BlM})0xCeH}e|lr-O>1p&wxk=c-jl@#!1!>l z&Qrs__oXKl8v5SJn(P{S*vXgutOeiGorGym(*x5MGPf; zYG90zN;bfaXa^a^%VgZ;XdOZ^9Ur{5Tx7i`L zPn^zMz&44~oRNPORSCDA(eyXiC$WQfi=H_TZ`-ur9Z_*(KnSq|1pKA0=MV6T|>dZk=5H{g%Z?hL?Y**m~XZR0V3a-rQl z|7=M~Emj;m0+!3V2RKW-`t!Y_EF~VSm#<( zQvUjUP81%GAzd(QT7X@%a?5jhV;(#L8w0o**fu-BxDgpC)Nh~{8LjdV4o0om;Cwpo z(SjFwc~{xiq!YaZD`#o$C_3JuTDiNY7vF$(*Y1nL4yulNb;}?shQ4JC=FaWxgXNve zZ20lt8jk(i2RpRQ$}2E>n#W{Ok9A$iBB+IYpEi!0PDQUMm_8quIg0cmqw)+GLNqsmbB-L5Uu>_7?O+T& z@d~DdCWRt(-&+}XZ3eH6n8KO>jl2jp(U|$EnP&;MrB18a6U_s1FEs+^x1jk)1R5sQ2cD)W`mh|%utO6JE=HI!^R7G zal@4-l|_zDbl_6(xs*5AQxZjRp^&MEZ>_!~-y=Oh#pL*)|tmIJr*s0h4NGJq?*0 zv!nh+>B{kp{@mq}hBaVV&FnYJYDveTWC!hTGZ7dWCzFTjIS0-R;#TODd{ zl|kKGx{|ZCj!3!Ph2KKEo7U8kb<9iDM_|Ay#qg=^wV}Z?uiK7mYtEx^#((g{?po+7 z^T*@tpQ)k6V9JdOl#sM=|O54q!MCXbmx^dK(c8`jOj-t)`?9c+=ivXnpoi zrP=fz@EN0f#b6{?H_uUX!S3^hcGq*665 zk{X{tSL`2&O?JCw%j?s*COsOw(Q3K%&?&L-Z8S|eI-NE?+bP=4c_QmyPvG*Sf&BgQ z5^22nm$0o(Cf^Wm8o4n*-1l$JRi^1Y1&qYyhm2(kGpTUpXbPEmOOaqIwj2d7 zm&tH;DRodIz+8L}4d@2=@Nb&vmj}h7I~W>KCK!#Cjq{Z%mytB$WESZ}cL!gpp*-Gc zEdTuqd)Cd=`@e%uahNELked*4?I^N=7i0Xx#X<87f0 zKK!@c;k^7@B$zT%>NmbDqQIO~&tqnsv`NfcRVz33pTY4<{JC4ldD3@Z14?UDr5NVruW!{I^yH?*rS?`$9X}?rmo}G$@NQs=#>CG>o-Ffv=*gxxgxjk5! z|I_Y%4;W0LbMBXOt}V}ncBcon=Hh;3O1;63bi*Z!e&6j(e+|%+6<~2T8IZwo&s($J zC|6+*R_BwSDYPP`Ar1A+mi^ZFa~YVPXTgULFDw$;VIgFi1;zx}pSQpO)lH0K`(_c` z6l~CwV1zbk77v{#h*p%X7P@QeNvpn4@RADzt?nn{w$gT7M>CzE2^bg?`ZBgB#b@V?UqeqABh2A zymnBr&uEq}bQk=Pr;C!A>iO{YreU&QXfyf%Mr=M9~&k$u?qJKNCbO&XHm!&iUXtaeOtV8^H|POR?2f34OLLD}QT z#~<_NT^+h4o#oRg53Wl~pTE?%uJc_p_JSW^c2A zXA-((GMJgijFb-N1-IU{S=REjT0qG;`& zFzREsS6l$&I{bV**Evk%GP`ARHyGG0CMT0&7hkek5GB;8rhEyk?44j{8=W+lw>Ilh z7FgOdz|@}i^p$eLyC*Wdc;>*|ZoWNS^>!FQtD)Wb9UV$WgNv{j8p>0k-QDj!fdAYU7n z>6;8+vP}4UPNRbr@pPo^12KKkURfuRVFhda%J@n-ib_GY#6 z`IZr@e&|2mFL2%TbnKz6U{< zbBrVJ$2UY>lf!Zs*zhyJh!1{tL?YoLmc5ImYB1zibX||m)3>s0?hHQuX$ps2m?am2 zIbS;`mB`bBPWE&bZ^5M30h|61`0(Gpx0aosb)o%nS+vU6fkKaNQr?X2!+KlbJt?-~ z_H#tCJB#jjK;K=xPBJQMJfAt0 z&UQMjc-UtTp?$!UHkPK)%cba>D$12>!H4g3Hj!V9cqt1n7mF!i_M04uA>MF798Ic_ zSz!9t7>9F9F#a74ht$4rIf@4Ruhmslg z8L9P6sp}qZ^&Pql-geM%!{-K^{{FGjF~phrM?;TJXiwQSCepS8*nzLpIN6~Y7jFp@ z-`e@m`W4CKZ2vGUa@7on3O zt!BGCyy1y>a65s_(Ah8w-3Go;lxU>wXPaeWP1~W}{k5$hdCbdK z2mKhyeW2Yf4(txiprM#(gzRrc27I$^Xqc+V&Ko9kk6)==fKCh75$VDs$d6=j60Mz7 zCw`#&V&Ox0k$=Xq^5d4QMK?x|PVnl251;Z<6{8I=%V+4&$U~Qg_xK|D@<6SyHN#GL zoP>Y!%G5*m9l6c~opKjD z^UX*-(cy|K?w;v1pmA#|o9ilX>3OmVI!InEX~1E@^F-Z7f70(cgMug2iseO{WUB`u zT$&QgU&}AaoC(nG?nct*b>U=~jD1GHZ8;R(C;I3>@!PXfnjNYW|0O2TKqqvho1%YHiy-dkEWn!kr%-E)&h2lyB&CX#(x;>ls7(DQ~Nz{~pcW;KSz)>c~Ztk-LkT zNR4`f!Gw;OMM|I)>E5h7O6EnQe#s}@mWp-f1L^Fk1ad{kOpn1Ovf0_`+~-j=*F~L@ z|DuZ~Zb38!fe%0Y^Ddz-dL+AQ5_p1`%9iz(%GxbI#LZ%GvA%edtO*h&;KO(Nn#Oa! zOyt}qCUQ(n2b#Y+ll~d$O#hibQiioL8{O?Cubj7I$Evfva?nYP7^fwt?IJX`D8Z=CV`~Wi(ojwE6 z?KA!fw7ZMA)0QRi2j5Tf5jueSJ)cIeedFnOo4evYI)c2xhmW_6;ybM>q(kG2;&ZQ9 zGDfFRW@xe4y7i?@L)XwWbPg3b&Xd*^_33Fv3JqA{NjYY2BC>63uDO!VPyNPosDZv* zrPG=248UGDaTM7bsLFSA9NEm#a6b6(U({CWl^JFPKRoUJJeVfTx>J7Zj16~&cDJ{C zA9jo1g*sYvC`G{Ud!!3Jx}_s`8o6?RvkX=*=&)aKo&QobJnw^gRVbc zMpx60DX-;zbT>u638r=TW2qB5o!m{zqh<=_*rG|#?G$qnblwNGZ8LKmRN$FVrHBei|Z|O#-c4yJ3 z0(*+7zgqD_H&rgQyPO`@96D!F`K|pH^yVA1@O=Zp4jZEKO~KV(8g9GCh+^rHNJDU>n#kI4MCK<3y5a<)MtPeRw$ z=7u}O@F$`4!#M{1AZNtp{3_`KK78BlVcf-VuUww;K z$n)XIP%tyon^H41jY`(L(Y}=f#bEH^FS%s$p|8%I6#PnEHo$-!^))nds~v6I9;S4k zfE+)xyCpv@`DzOt>nV?h(!UQh)Z4#5m3PllJ+9l~9LwV2=qTH^?4$C9U1+jhCOS;? z=*(sdxg4ElZPU^?K{REXplI>`NV*Tdn*aBI;3&H^Q5rNQN{aS*UX@CVN@-~Cz4tmt zBH2R7E-ND>D>~1MBw5*eCL~$uZ6- zcv1I=^5>`*RkA-aiF^=39P9@ZSuQoyoIM=eRB-n*|?Wa-q^{~sB`Tr z`S5p>m$5_V2B2GhE~cCKW78>L?&Bm4zqb~E|8P(EbX<*X*B*zrx{7d<0o`HxwTSC# zbYOO>2u|)}P-mqf__f36|98T@yKI6FODe2&23mnr9eLJ&$y2k~B@Vw}kB65FQM5}9 z_3fqDB=X^LC3TuM$$>>C?ZVynLB9ogSRFMOxA%!yq9_<_sk81d`S54GxAKDtQRrx! zhMLrEr%yior}YV7`yl};TN~I+!+ZRjYZ{(yi^3`YwsFHDzOdTLmF}S)u1=rVJ<6(4R zC-h8$zZyoUKtPzC_)MPhZkAywDujkcC#A)V-KX z+1*A916)-bAV^iPg$-jw@IX-$@*}#%uTosFE1?K|qQ~H#(MD`9`S6ds@?ntq2UCgSOrr$PXobsaosfZmX>#vD^iR7VkJJ)MLf2Co(5 z4Yr3R2ZRu?LLE*Y9?D-C%*J9dSv%Ch`M%4REhZnn_E#SK4jV$d83jDyLlDNOWMi&* zFVCN_nH}$pgoD)WDNP-pH~LTTj0p+2HaHR6KcD6P3U^p=c`8(RL_^;+%I-FPg7HdokDMN8b_xZ9sWtb{B%%y#kQI1fWaZqsC zOb;6?M0h*g8lNn@-&iXBe?I)svBuy}H(z621!zOr-7)gv{U@Ciw01he2+HpCevE^g zJC%9j6L-A*y8z$+B!BrZb*PdLpZ<;R%!{e-Zc!<}rxS|WTYdzd%_YF)8V;WB-~lOkx5m2zUXhdN;U z?I_Rf@`lmX)~u^d0eyN4F(t$eZ#MrCzpfk)$FB)t5p~LXx-AiuZJCUVuhTi+*#eIb z8QEAs9kh*f))loG!X0C0@e=B&?J%T!-Cvrh&mIdj&N#!QiXyUf#zI9Y?cL?h#SdBq z_{eA^&JT}bi-rXNdzJ%Dll$2Q!D?irZH)Wl=vjCm;Us=lK}BFOrY;9|_Y93gCAjopmqk zGo!+>czO#x2dV3~;oL*<3Uw_wM%mqSBO7vT9R-Ve4DmAEXI#@T#Vm=j*0^5E78OkUEr@hZ=Po&%73!YlfDzS$CwIZlep!HW z`l1`nFuYPhBPBT|V@Sq?VIfTTBm=P%-T5SuFP^u^!%~q1ZgN`0EH{LJV_Ft?*?nX( zD|hnZsA&9eK`MSfdz&jbH?p79S)D7L0D9D6Eg$uO7uBbs`Rz!QxLC;_uIObO^|C>9 zHwXe-@>%P$Avi!XA7PvimK)get54-Y?IP{`QFnIbmm%zLojOJz5#si@GjZ0EHR8Dg z^uhW(?F>@)_UhsDtX0W}KS9~uyuDL##3@t3wiN)69Yk=iN*gwfxFznp2yp zgaakDn8^)KI2>32-}R(m%M0pue&>(7b#t-V;2&?4T*>Uohd-{C31Xu+?6$^!{(D|5 zwsj@r$J8r)(vfCnshteT=5bK2Rm=P;UXVFPX6CtYJgZ#J`wIG)OAvLMmIc7XNpUQY zI?Vlh^YLJX7asMS$~9Jwfy3!E!!XwcqTY51dMq??+c*(^>9xV6BYxtJKm!m{b{9&W z>7V{po;BHSj=?Wzw%=kPE;`CrxCF7WI9}sdd4L7*YO)B@klBJ?j7C4 za|d5%Vbl?S*dqo;SL|j->O1+ArI|R3y5w(!m+?IE;dO+$(5LSYEYFuc&>uxvF}aTe zJW-sb#>1zLgZCq;r-^*{dk-%P+K=mCV44V9dj$BN$%>aBG=dY9ZXHwC{Xhes=X#I1dZt0B zPZUJ`+QL%tGv9MR3+rNnvBOrxSCS7eyF3s6p7nu|rybb)E;(FVQHWRl=isE$k=*{W z8Z@sF!ZGsUqt8|e7HdwzZIs=GzqG`Ci&r-e{a^yx^x15Rj9`wXwz&QhL$xFkj$foh z?<$uBxvox7*HHvsf0ZF6U7btlc;fi=1?WpYJY@K>8*culOi9!r#1!b*IZ-YmN)SqCzJ49T^!0xtK*{=JY$cqq{Bhy2r%|s$NoF@ zgGb5cp#6kEJiI@NcSjGWyfYszS$RX>TNCEAZ8Um1(f6U+6s$1}3v@1Nc zrog*10K>TAZcxcvce)s1?dX8&TJgzLEIc{q+z%)nWiFSuFJ z5w`Da95hNLgSu%*wKSg51_B0&UbDux`b&3^}51-_h z2%5LgvI3(!{K3amocAaSw<=I}SO1YElw`q~U%@cqc`*w+1&f_rTuwgx1U(@pzaNJqUMjQ3JP)wBQUH6qM}hH2FWzv(4_BPX z#aHCRtM4jhs|-RR=Xxg0_UvM_$%lXTAsT%~r{c21ZCo64j%7O~!KCtd&$CSdzE&x2=w^I)XMP~f&X+$l5&KgVR_J@VoI8@`D($I&eA zKblh*{)k<_d4g-y#pAlmiCFkk%u5w-F^#3EF!xF{R5?_$ReQVnJHss8v@8Ul4J+jn zPY(ij$vjZ|;|nh?&tabpWKdy9Au617N5vzG-0zzT?3*TpVswNDlaC7=A}3-X854si zP~Tnguf{L-#z6D%;HhT}h6|oIDy_0cS<3FlIZnjIag^Pekul*Sgob-6@W5>}4<;Y} z(Bwkwd?|y&qup7toF7Q{P@!D zj&r4)MARHgyVa#9_}ICRScOzNNL`JDlD(DeXH73}OwUFew;-Hfme0dC4T035^v=I% z9-J8&Sr-^8Z+#)=J)XISlQtX5e+_4nDwUKRX&23$m}1;a*`Y`?0%)kJV4c z`_tkuZ*VO)7q+wQg&C085Dv0^YuLSwef&yw4o>_tAKTx@@|#H`p!j<}jQukg67?oC znayMHQ)UsKb|OnQvt8UjQVYIPc9+~`1HX>=3aYgYaVDL0jo#At>Ti{m<9>56r|j+~ z`S6A^3&g+H*de|W;so~z7__io;CaUt_Kqn6;}wcvC}+XnfA_|$jC_=I9Y*dbWp`Ht zp@Mw)K~sJ(?YkAcpd$j8grwtEjVFA-r8+j#G9C&olHj4*IhOyUjrUzn!Mn~ecs+YJ zx7*ak)X9h6QV|M&W|lF#hJo0}bFpxm^!gUH@Cddf>rc6Vchexe?6Vxegu=c-5eV~-xwUQ5i#eBDTe1QW- zCkkl>LlvK$mSr+a=fKclddAz$X@UC%AVY#`;4_h%-U>^Uu-MIp5()a zA1Ax&o+YT2uC!LGosJ{u_v>I9nVA+kf`Ho${O8l``h}5gdHSf!uSXqN$U>v-O}Uu=wT4vbR@guH|#7MnR7 zW6$QJ`7X*cYD~E8fzhzrrHJm#U7%sld%?R<4ct74=BN+a;>}6%;>qO0KcMXH>2Wh? zj(b6+#AZ02vb&L*2KZ-Bl0fFBEgb3>LK*q+6=5I5HRD~e#I6WG7LLaMN~W_NP1JXD zG#?xthC_>eGT$*fZD#j49hzCnHiZlUMmcvgv*#K@c@m2LZ5EhvdswzY2+zrf7a9!X8z0R^xz&X@@vqLO+NgfyZ70#v8Q=+c>+d$rXG+74SdV&JB;vBP9=|I?bq>M+A4}YaK z6Yo#&;_F`SX6M3UAewx5=khidviKa&nv;a}TJdNZT*p_GK4FDv=^$|>0zBd>*v*wc zcuq?;o;@6hciyLQr~F|sK$H(}A9zFZ40Cp{K@o?JCx@ZV6?QrCKt6oGpoLX-v~snd$!Osai>vR~@Rzk6Y}V5Z$UZ`|qo-G}m+AkwQ4d*h{^SVR zhjI@WDKL*L0CbrPF8;dgTZ9q@H5Fmidnfd7dLSPAM++WMcBhvPaK~c0;PJ4j7(m(G zZ=V@>azKc6w-NQjQ+9W_N*|&xmWv$**yH?zLJSTVk8v9%nAD`%aD^9wR--&PKNaxo zUGp%pk9smm7G!I_J0U*$$`J=qb~oa(3aYLc&5qx92l!J!du^j&|894#IL;5fe~`;u zMtyggrR?gS5V)H3ow+Dw%)&V29EhgXWhGoxTVfXFpYfpl$j!U z+pGl-`yYuXo^VEo4MjLZUkPty(%xOhTqx2hfNJvL!ve#2plCkYzsSLx7X4h7e0ZN* zVG!1m0lEeqY_RP??oB?t$Ma;24Qb^mO)boQWipIA9}A(KhuFu>?R;f%2JWy7$KgGz z_~H6K_Hb_wJnx+kpIai?Z1Umv|H{YVy>l_ROP?Ri8VlPt7eP03fwA9S3OtpxaFnqK zXPDaH;-fy|`9_AYm9o2niKfukuh{5!+#L7Z6Jbx$WL&zeOyEvFye(yS`|T!x)4;#t zlpHrSA|HN=ND&o1ELiJ5Z>X@$hy2K)FnCY~SG^O6p7q(N;Qo#Oda{vScpm{_^V8vk zia4o_7myw!a7JN+|R8F)DD?PzdEIv%s^XQSf`UE}o|Bju{DXD~u8^ zDwqa^^c)=0V+=~dCyiz|t!VF9glClX(6jE4-~##ZJLBm&TCWOIe#&z9N$z-SY9U&* z%3{8%E4!NS3!AO-Ae(&n(Jl+Q&gc+amyv}v9v}JfqMfX>CknniPKEj_x7l#ZM*ci0 z5w~O|;HL6ZWYaugw~wa5L-OGZ8n>`>XM1?CZZ?)Z48qfI^7-BUgW=SbJUFcG1KzW3 z*%b2OFK#Zx$A@QQjqFgq_nHP0}Z%ob2$4h3z@#n&1)?^(Bd|(bt z_W8l|8`p9FhzK0tkdFQ;&-la2I(D=*4os_)V4HRm+xPt%Urs)}-+Ag*E8ELw?&)N$ zrJ0~t6$)EJma*bX1JI-}7iUiO$ETZo`NuiZkg~l1E=$rZa)lcEs6UQ=*NX6h>7nZpyc2kq>`x!3OJPL#*J&rH-Q}>xg-pztz31m#{ zEM~tCNuu4-Jd8T$gO?sS@Ex*pARS)_>x1XO^t~h5+a@)fn<>Q5*$()suS%SuIteW5 zMNqWf3R+8#TF-iGg68zuR7#BS-&qC0jIj*%C5gagnGUS1xFp`NcNPw$?5)}tw? zw1ajd$%oJMro2@x9;|GrpF8atcP1bHR8s_=ajD?f_y1tCM&&@Hb|4%%l*Vo>8HR2P z^KrI~H{KpHogZ#dgm{f2xV_I62Kx32%*cn2=@O!CxE-qODiCikoB}Fz)-B30hZQ=7 zR!K@`)TK(D1ttb~V{)LNN{P(O(IRLVMY(fzxA=6X3tovY!k&;ZczL=J8%sXC^SgWq zoHhb#2gP&SJptHGK77r%KK^h~Im5@{uxU&N+#?^pc10~ubC1IlGRbuB+ssQITw#y; zlVJgirT2^)wq^YrzJPrAu6<$XQ@Mgytoq0NrsqP^Re#tyIFudnl|n&G0U8|f#Kei3 zye(A;@|ug_^h+n|p1vX2KtB9%7jlnd0B2g7h`+W?g)BPj*167rNB_bbUt3vXGbiKk z{v`ZuzgDnnggrFu7sA9+b&zTs%C{QNMrbU=VMpY#cbFY}WbOmMe&m5oz!3Ply?_UN z2|`QdY+R$?%U4X@%vg6M$W*1l3G(49q)zg6S_!x`AQ27P&vMkb!{pYdg68aK_@znN z-JXwpLU9%jEC|7aPfK{p5eX1E=0Sg`FKlv~!{(^UqJ()NT1mU3q?If`Hj19jUUZ)^ z+!2;cr;bvA9!}jL!n`1BlrO#4I7Pt(XdWK?<&5F9V4yh8Lx4>SL|8{Yyl3o5!TAr4 z)CEU#YG1~I>^WsV{G~f4{VKq~ucL6~buX51&=0os=0Z?D_1<$EX5&i9Q>{HEK@ z+pkx%kgjOZR!RjQaT|M8#QDS5i5R^;9vu_vxm3Ut);}g4T%9A~%z};Vef2j!>RdKX zwhqDv`q_NY%Av5&J|7&&hnFm|W`Xw5oLc;wZL7Dgf-CG;=sts~ zn&OmxC2RXs3z$RMoqYOa&>OK-yw{boqf68YmpmR#7W@@R7rVi!_l4kEMJ7U#fbSbN z4`-O=W0w!zQ)=cg^S~e&c_bSKntWpme{baK(UH{soQ`dsk9qB@diJ)Kvb$@EuwW=> zKc?N_)`qEg>v}Z)^seT{NHgbvM2-#wA71tap@I zLm6dv2c`9(*{D|hYMLWHbrqu4Qx(iNlVkP9?jUPW2pc@lYfnkMkY+f)wQ?zMc+pDFC!6rMkGL3#R(Rh@qmv#nTEA( zk@)3ACFh%aS(AP?thyOQ^CS7JxO@oK56;K;qkXV+lr6XKkcXg_LfShcGxOFEW_(&5 zO^yn&tbHb$JFO904k2TLvb*uJmheo*%lb&}46LN=u43m@^q*lScv22vOW9rP7HueS zz9lvv=8Pj6i?Cdvgfq0X*qJ+?U>HENEt*o0_BWKj`#_G6b}q_J_{TpftYm*LhJlv~ z-6d(hVPd2G+|@r8f4-%&Zuk{`Ufj$kOiYF;Gvc6*eE2oHUhq}=8E7C5$8{shIWFyE zULoYEEee3p5pir#%m{qp1b2x|Z5!|&@7jIz;0M->s`CmWnR z%}-n(ZU8-$-979!g{a7K{G2Ko_Y?0+}-K?ojuK?_S5&>@(1;%SFmd@B4GIObSVD&l-)m7 z#|M~E2R!-kLxwi-{}iq>ol7a;HaiB?OLwzN=R0}H@=PqM48?U`W!(ALKp43o7q030 zgYSG_7B_7a{<%?rEBicfp^qA`GaCmTQuJOdoCRH#7X_VXbg(^*df-0@aMd{(@yt3S zXrX-T&1n*&N;%{Na8g z`5kKpK;YwL{3M2AdqyUfnsoBif%{nW(-_b{NN3&RYt(<*#NRX|;o-w^Xr)=ly;nVB z0oT)Em}3OU*sWuiFVcO6d=6Tv2IA*sWM(D}hmDKrci+q#o?S9w$99dz9Ooj`*zSr0 zAAS{A&DVg}l-=DwY74Eq@&(CdQ*aYycfvDf*xs<+qEgEY?sSS^fSv)|)lL$(T(QM+ zMG^V(n&_?ZLC`qP73yXd!2qu@(3)Yy6}cCltIJ1ax8eBNCZ6Tg2EaL&92jZyi^-A? zulFh(*UM#~wBidcyW$9Y$>ZSe@MOr-Z)VT`yTaRoQ?Q(Tc!SL~d}`erHrpfO5x^f=N`q~D0Sw&h3A5g5u-`7qc)qg;Z`^mndu?sv+Yhzj zOppi`9Aj|I(pXUbcq$&CyU&@+r(?2ko%It(OOU4QuHwoh_{TSkhm#LKma@CbJatTw z9>y-;oehy|$a$!e2R_S=SK9jE*~UCfB_ICt(*id4XAs;`%mz=_9`sPB_u-`}WWZJH26{yCuhi38#*&Uzrfhh|U6hu>Lx z$GTG81Zf@~SInMdi z`SAZ%#)H@FdiEvf3GYZw$K&L~8-{P>KlwN2(VPu;j|GD5h;+7g^H7v@B4h8VH?n!= zd<^;UgMJjkNKZG=8Tv;MkU0SjJB28%G!wV2EEU_;PKI}s-5HdT351OYpBxEpsy!P8@94a_UI7<03fQ0>^FY5p4}6@5LVRcrw}}kGt5Mnb zk$m`(%A1%^QY1+9r@^|uhiu)86WsZDJg#j?#2Z~=-m7_w-6J3VZA&!lHmznA$Gf@V zv@GPMA-MI|V%kj^1lmLL;KdJL=sGZm6;7AI?4gC2Rp*Xvs}=dyJ{8z#Og{V>M|l3B zUSJbH5pPm#T80IeBX~6S1gkaQ73 z<_#54OB~JnZqfXkejx@wmcjl-?u?K21Jhr*FeE_&&Sfv+Ppd+3qDB@rkPn}9W+!`} z5)Bt}Q(=zyCNm-*etdo+&HW@`LCy&t9r}prkq=*dDH2jPR@pxIcoc@T${!^-c6c-)G1HcFHM=T3yfyY4lta!((>x;qDDe$GebYq2~la|Fo# z%!lni=7Q~T^5J)m!4cU-_;RKThR}yj(|iR<`i7W7XWjiTP4Pj` z9xFU-4qxt3Ur@D|cZ2d9^5ON>e=ysQ3a;20fqw#NZdKt4zkRQcWeDP7k!cbb%ba7M2jAd$ zEd|pYWAJ{=Ztk_WizPnEgq!7|aBg}TlRq~QADzoZ#|?gXOU0L;V58vKZOWg5JwQW? zx}ELE;geBxubVjwX~kW9f2l5fqU`QMy8t@RjS?KqnuZzl99&pyf?tcySf9RP1?tt* zk4!%N^_<;e-31OvT*5-3s1`S#h9EtJRd8GX_gC_ zpOARm5;_Gfl$$#(AlDGFK;6z$?98kO+}=M8&$~w9 ztE*eMu(pTkXlKLIfMD=>BV?bd2jkb)JXF*2!CCVi`0{=^$f_)a1NF1vw9!ze_*4yD z){%=`K{F(SD#e!M!(X86PWq-LI4xaaee&3J{6uHno&+NloYoQSe8AwczX;3@YlGJM zR|k|e)Na;fmyJE)*y;j^m6C=54T0R@r9b|P&BfIV2cXukWh~7$4AyC8 zLjRafwtxISKBXfDrCd|6des%m@tfFPOak4SILOjELOACOo_L$AI2M5^TI;xD)h{;Q zpL{^YKxl9yGqYeg4iwYO&UP>CEHmK;j*f=Qu0?S7C+%cJzZZ;%(Lgr|ns?f3iyIEc ziT`*QfZ{6=1RODgV;(YRSi2d%q_eJtssT>dP8OKovxQ%Ogb+nOeAvzp;v8*P474r6 zlI+naw`)3+z2pTYhx1`G8xD&ilKC$4K-}h-gR0uU_~AqASanDQ)SV?C{>O8seBcO= zyh#4+o+Mmv(!?#~ud=*II!`sz9pa|F?7@aku56x(f#bu_Fl8ApIX3`o^K)U_8Glfy z3Sd*6r19(40?Z^IUSq!ozvQb7SG$VfgaPdz__qk8-)Q5bU=iNi$LOw0L;PTe5j>~+ zjE|z}@S~`raqvJZbfN5S8=r(r*%pEQ1$(GpE`;5?)Zn(;Fz)hvHeOm;h&4@eSe)U= z{9Sy&v>}hK#RkK>-y%Lw6pZn**{C|ThlhD@Wg8NrXg44Y_Fufu(sfVsC+ic?>q7$e zkPkmF@D6+bEfsFGMZw~K+t`!p&)jfX7XJAdjJJ;zbJ@L;khvrej=B57^$utDeYhOf z#};A``S9V5GJNA#RY=JY!jicT5Nus7xIa%1yC}Qsue8G4rFD&^FHJz5p79cP)8K@q zg7~T&!)cV=MR@6ANiYhQRLz1xl-;qBtS>za+fz&P(s5eZ;oQHHyFEYm_wS{L}qvQwzzqW z2yZ>sM9q{A!Nn98_?%D#h0$Z+p4Jp@Fu@C7cjcqL`3N-Xh-12H0Z_R%2hQpBF`uk* zK7f4qx3L*`YGgalE~;f}ZgFr}J{c0GwlJ-)t-SYlGIrWh*Y2enKCH2W9e$nxXAg!^ zzsm}?b^-OT_vT>py!kj)CzOBikb;v@1;8BVg0iJ9bB|ZT*bC%pkq=)`@<6ONNE?!! zMbMQ9P;%Bpuw&Fz6n>{ZR?it2^E=eq*VGauDZ9I}MISOH*NOuM+vAD@LYz8pJT{$_ zU@Zo-p-fEH&Ix(&>l5%>`{v=PUwLG14#7>L0_K<)1esykaMiq*O_;Em?;s!k>W?(s z`uri!=|923CM3Y_;6zZnCT3bTxA^SxR6O7jjq{9m@}D=mS<8$p=oN;5>xUAyfPDBz z!}HMjvoAU(&fz;o%fe3b;ZKEn zbe}P!o2}s0y!}ly&dNwdm7O>Fi)79& zwI{+Z^5NI}pJ4t)k9dG0?H{{EQYXSj{^xctGtSEfYnvdjcFAVCHpMQ2~P(R*uIb*XZ|)eE1R0tHsLC^r3{ZyXVdpkS#yb+G_O-?DtO@;?dbQ5Yr%2r$70Dd&S+Fogpak9@b5a>yUU#m-?Zo*Xxd0HD+uHH zCG*kcSq>^r>F3tu!^_;Gcc%*(@JGFay>~yzw~-H@`y?5kd9?BYw_2FNnq;_1KKz1* zhnU*_cHUo-fgeo6@d^3xY3C`stI2_u&+}nIO(gr|HxkYI@^SSiI_qB1=OYToLfw`k zP_T3X+mkN^vFciQn6kTm^5N}_eZ<RVy1`N=r-J9LXjtjIo7}E0KIUa6 z@=>9vs8`A#UmplNnsaFn$qx?p&Sgs`$YA5W0zBdEfp0pL_+&K|Xq+m9ZSk`pWH)7Z z6}mWmkqEo>Y4_s1w0K7GG@yBS21JK3F*fz0Y&-`i)%V zhpFJ!c$=MdYUFQI6R|iwfx2!_@y5spEU7*XW|I$}Si6NCzTCr?>1E@_yFn;_DW8`g z9SrH$^1y$b54afGvOQ1bQF?13>KvGjUOhwjqdIj6KSKBE%k5!)$tJ~8v2UYj0;f2`6l;=x_MRpSg(+mZn4ZxcZ0U<3Q-a*tbklBael3RT*-aW(wR zHr>yH%Q3;wv8I?c)=T1$<$0*o=z|wdJ8;dhauA$Q2%Y|Oprdpo8-H01jk1L3>*RpX z8mq+Nnv-BNWp}Ept>D+vBi3)bY4(mjTX>NX`dcar7N|0qK-t~QMLLjk_ma5m;4B;> zDa72r%6KwOoxRicgy9zo;QBXdSd#9?PrCWzm%y8s8GHS_(mpKq9xTR(bHlwK{WSAx>w0DbF<+U!wLNnkggHWJ@yCS1GgM}CDq6Ott)47&%;4YF$1=0zF^C@)bhHyad=%S z8RIOP`C#(l4-ZHI1FKj_-(SPdR=?pdjWW@2H|6QeR`6L{$+b1j1xNDXTi=E-*I+3; z8(V;T4tS#3Fik!&M+r(UQZD(#35;ZJ2&!*u;||L1T*Cpso;4QN-JJ?+=&Y;eI0Kgb zifnYZwZwSJ?y7H2!eNWn3U)CP6$R_FX5&qCBV%o4^EK}Uu`gl-Pe@GAd5n*80L-* zDzf~$qADnO)9;U@Bb0qRAUHNt534tbaJ-*2x-7lhcw6cJeE35njbUu#K=Ijm0<@>> z?g;ttPWw&@&h$8fuAUIqy&nf2FO>P-*X}sh(5v|K<{v@!Tdyx;nN;w@)IYdHL z+D6uT^c$bll#SEOgHTT@o1a`qyXcPj@am;Ee2uVXH=ihA^S45LKhF()XZ;bw-tl17 zCIn}-nJ}rXRA9GnGHSQcyc4s)nF>Q2*UvG9Fv{+}HW@<1R2T8L`8K%2fJ_(i;oHwX z7CgP+41d=aL5tH^c-K6cI~UKzCld;=LTe;`-x0-*$p(PSqa1LO?Ps3@R`Y${;W(ds zc&|YnTq^k>vwIi|WAc*W2Kn$gpIZ2@ie#L4Di#$V9^miRbg=F+x__G*4ihh|Vyidz z^GP3bu&8rBUXF|8zR@GW)~En}dd&r$<@)Sn*;stBjm|u4opHd-hvGB(TF^t;U81fH zd^K|rNRtmgp@%ZnF{aq@eVp~lEDPwTv+nf7$zbKYRD9lJCKk5{aY*cV{K)?b+7`Qk zu&WU6Z&rX)TLpZW^gLWTBOgsXsN2UjhZ%(i!KXvig{1S1Z63afABl^^z2w6$di0p* z_0+Rbb@8z0aw6RMDP{>aH~3E@^3|_IW1mAcwo$91 zTt{c!FE5O#5BItC&z07oK-t~#p?YvWg|a(SGA3Mv`23yWi{k3|kFvY8r(}E#UnAC%(ua@dsatG>B}^XUX>ChBd=cGe+}k`A zYwFDe${PXp+l$~#g*F72-4d%xJLBIoMQCcKgzvx58qn% zf)6psz=aLr7~i*smu&1~0bw~1zaRiMb;q)rq!E})KK#WMUKlfOD(|Ws1D%;gaL>sF zF2{BWCOc~4C(7tO4l0r@e)@rttJ_)mfeW)OYt>gu7==!IRPp1W(r3 z!G%{sSTRNe^w)e7&$#1?wPUEKmwfo|6K1e^vECrc$cHJe!@%%DI=RPznBA9+-cx?? zD|aebH2LtQb?IR9Zq9D@;zlQbv@#Qq zRfM9ieHmA77zjPZx%B(s52;qZtj}x|#@;Hx{_h^>Xr{*h3C6)wX(1dZAHIM7MZw2& zIvAZ!dkLKanst^D_nkC?2b6Ew*PD>v)X->g(F#u;5#i3aWK5Xt77Q$KfP_RLq%Bhe z%_b?{TQ&z}f(vo=Jz4zy#gPSf`ha;J^)IJL!gc+HTzhpe2As>nTx0StbGNgr@1vmF zl+Lqf?y@L2!+%RAVsAkL`mJo>al>fKAwzkyCT@^HU_%a8u7(VUZ{ICpL!IBRn`IfL0G;NJR z+vPQ!8fDp`8JRFcD-6z&5C7*B%_3Xo;&bkg2hIiXH1gqj0(CoAd4k+!4K~M9885sm z!gsfv@XP8p@$=`};1nzZt6BzOjnf3~FQ+2S!{h$N)6uSAz4a_N%m0~FaG-e-4EwuT ztVBM%(>~g*&QQk-Q--mEN3%hBbs=PIk%vpscKm~b4?Rorur6^huI(#ec7K9kk9;bDVvLPwh>`5c1(SeA~gd-uTGg zEXjiJJ!DK^G3&k{fuCLTP~O`YOC(&ln}i%FTNi@izc~>5REACepo+`Wg&6dkvb*d9 zVszJo!acMzxWft_EWK^bv`uglJ>wtEnuhTW@`9pJ0oaQ~u+(1{RMb(Nvvn3)=?d}T zGwQojAIIJ;^nj>K1+et_C>Sx>n`@o+!)tZ9XuEVEZaZJfrp^e32d$Z)KckCTk`I5l zHyR(wq>|~@#)mCF$0Fw>!QEx?u$a{|v(hI#DUBSVD-k%_Ya_pR?Hh}4$%Z?J17TTr z8gr}~inY%9IN+f-PBb;=iyIWd{AVFJxw%1HYoDMbe*$(@&dRNdba|Q8wU9*_}+gCOk6i5Z{h>!K-U1tFRi2y%JN{ zW)&|GcIU&SF(bkGc@+Pl6oC8I=imeK;j8jjvk8IW5R;Grf^qE($cOKI9*d3{$>_~m z_{@%0CjB!RijKy@=Y|7pMQsPSc$$GL$cIncy^8ys>}T3NIq=7y93lHqh8ZK#*Q9`Y z`sSjhc`&@FM8DN+({kxD;03f69Eg{H4n1;@}SCY zD2x&1aI;wIr;o_SA+vh9rrsvjnM&E!?=-Oc_>lR3I>FOU#bfye+E05T=AqMWv1R1L z=U<41y%Ve1wx({LVUmTf7lvTK`^B7J90b3H(!bs73;!*j!-iYQ;Gj<)E=g77 zF9)hpKd%s$pKydXwe@+GT5`ro%JdEK?wQqEK&j_OBeCudqVJ~ zdKMm?+0BzL?_>cP(IB0j3NH@dWKTo*|7++>{AxhgI9^wgNW(K+$bW;Ah{%wTeeWqc^{LeX>uD&*(2gI#kEWnW4q5E@xGtud49j& z_s1>e$@oYu3C|^*=jT)Jv5Vu@!-p$Bf~aU8Yi%9mBeJtFz&#T6e5JhW=t$_+mcpQH zIqYo_uv~`Z@NL*=2GE=J2q(Fsl*H4m!O8Re{<31 zh2$-#_e6a7h4qV7^@jl^Zc5btbq22fa!;sk>`!;Uy7La{>%6suikVuNeE@EUc1xW(h6@G34o*Uv6K z%mB@8G4RvOBG%WYPJUPFWGf5D{x4zN(?buux=DXp83d05h!I{Iv;+aTZnGElNuR})1wf1_52D-kp=FXUKTCZ0!w3E)G%~)^59w;<3Y7Q4~91UmXLO5bKC-SyV_N5(O!6p$!Cy;Q}a9c zuWc=?r6mPkRjq|`gHtSfQ!jtjy&gw9iP6tsCm(<66SER!!zld-_}iA4nHxsqK2Cb> z{$;d(Iq(S$hVa^t{7UbAV5-%Ca88OjejTC2MTcEbWV=??8D>H za4AUTCj=3=!#x}QjXv?~mv%CDu^8-{)`L#}BlcJ0DPGaO7QHG{@ICS2FPU9sp7Cjr z{7XD6-CWJyR=wakOEYlaq*xq|-onFwQv<8K9LT(|5*!LdtSw*+#vde}nno}VY&PeU zq9#J;fE?Pbyx^p1n{eVw3t~npv9yw*?`1PpDe>Wl>7EfHT>u@@Jd9!d z#xi-iFr?ibdbTJav0@6$OxEIG`us7kBoD2d$6;ZaC#wwzgWQH((2drFUv%>c%o2qH zJ@Vk0zvHRm1I#!r4$iQ35Y%v+&0pBa2Y;sQaeopzUTorP61&*$kLeJ4H4fx&D=44w zh9{L|qUk^s79HBaZ`NvnTijX3KZs6{{W*j z74TZa6s|;?a=Vfs%x{%p(U&ne5E9O+FNcHr$sEX%sl((yiuiCt3|hC+o?+6@mEWpZ zVM09IJDmm*8Go?jm6y3VGz9~vPET#Cn1h?7)OWXY6O+|NgPUn4 zc+YytiaIN~FY)2Gr>A4>j_Z7joU^uP$&gEY_)eem?0E4#{y!t?KJoq$4L1DD`yLOn p<+3br6GXzc*;(v<$tZNulj1bVa%?UX@I|*Kz$W6u?|SbC{|Cj(QON)R literal 0 HcmV?d00001 diff --git a/ngraph/test/files/region_out_yolov2_caffe.data b/ngraph/test/files/region_out_yolov2_caffe.data new file mode 100644 index 0000000000000000000000000000000000000000..44807ba0ce62c2ad5696c9fee9158786f827cdda GIT binary patch literal 84500 zcmWKXhdb776vs0XnIT&xWh7-j&+neUP&9~;N@iMUXbP3eNcPNXC|W3@lHz%kkqD(i zr6`pVN?uCB+h1^B=en=+J?DHs=V&}0p^uKLv6(T`SmS8|?9RqYm1(Ijo3K1C;+#aIW5 zfW0^_#6Er@&JO9$VqZOzXKO-!(VIWi*wa3eECN$haECDa(qT5c&{3ML9-5*)yl?ae z=%dojAL*W25%%$v1pA|CCR_Pp9($==n$;SSVbwJU>FD1%>~mo*o2sbAiuVe!`n!2F z-0vnWjHsvWd*-vr{eNl1PIb2Zybzd@(ndPEcFH_?R187!0WjUIk2#KuQ|rj=&X*)w(F^w9g0bf|h3n=m<#t?rR$ zv0F8LcD{#(_z1G8BU!W{>NPd)lVpAW2(WYRNwKy%)zs6!gbM8zXQkYA*wNb(Y>{*W zT~w>hzJ6iIrq6juJ?0ft-|k8J^zc(!{O%VOr@!f(BNOzvfdp%ism?0OO0n&pm*}Xi z6kD0OlwDTdOuLGTXcqWHo4e}hr2KE1xqFxvDtB*Dp3b6Zk|Dhwd zeo!0nX>3=gI6Lz1&wEpcY0tHkRuSV)%H;v>31~l%R6clR7L9_ z=TogE`BX8rh#u;DNpr9KqIdTX&>Mw4w9vhaUbg*A1uQk#t|Ak*w^ENia&Hz})-TOA zp*ierM-FRQX~QmCx`{pV!;<}2znIOFP-MwYS=LTQi5=0M&z|$0!5%k|W_Mnn!5IRkY~+f3#y)8@1~Q zr2@_U(tdP=#&x5M=|R z$0+`_k&5q=rPo?T>E8MKY4V14y5P_lm8}?|3O!NOO_W19reEl%g|aM4>!A@)oc${= z#MYQUrA7CeY2<%$?7CukR@#1ozR&8U+kBp(#`Xu@$^eVltT2G%I zdqF){KcoD;a%_C~Z1!hzAMH|kOZQ&=k1qQAg08bXO*a>xrwY%~=$iw{RLU)ps%&bY zr3?CLh{h8t>-vhWY965pK4bLrhy*K>B+Hr=PGb$p40gkGCALUZfqmE^&h9*}!B)!7 zV{83>(M+K_y3u=-`tB8B9TKLqMVtRpl^HYHql?7ZiHZ5FgVKD~`R4+5zJmt4{Iv?J zB`V4e?Ri5NdW_TcG4iZ=ml%6;?lkt|^A0*u(@(FB%dpCp3hb7X^6b2mGOXcrVfMM{ zPr7~a3wqvqoO)iDU=uf~v5r@jS+HJ)9T^i~^}meKRfk6@%AKUf(KA@P?NaQw@@Z_i z)i=8Td^*qEqlp-nrcjwGLWidPB1<~MsmIj`dbCoG4foSv4a26fV(=0@<$Ij0v{d9v z9GJo1QMHt3R>7h4QbKvT>PBA1sZ#QJP8oe9mP5Z6$IN3Vd?pcOL5E zWlP=UUGMg!S1r8o%C*M42a9XV7x|2rZ{}x}R~#TjKTw`7HV}GPBvo4e_0cju8a~NW zoqK`TzTK5_W|h))-}6YL*ioWHM*-)-Rw}SZnY|V?jg|HYpzQ{w<=cK)@n5>t@JMGa z@1t;9`3je(^hoeN`l$L5HRnvxmU}|%mpKdAPA3Jn<7^#W^XX7|j?5PR!u(bILkB;V zt3)NzdkX{Usl{iA!QI(Z$t06HHHOps9>?+Vor}tr`F$=gXbk1uaP{CnvvT8Gc2&WG z^BT0HqzQMADf8^Ub(Fszh^H~IiRM1>Av>0Q=Kblu&XX1M=B+!wg`aRvo1f_Mw%j>; zCC_ifmquw!kQtAQ%IAIvB^M4S(FbfNRtf*g~xU{YIjGB4gK(uu8mEg9(wy}duRz&HW8q?ZX5VTKN-I94{^bPetHn!Q>&b}SIb zrA}II(MDy4OQ{2WMoH=i`r-92`ceN3)qi!LH)l>z`L{Licv%N#@he0){4R0Gj}KJm zf7~d*k2&;=x6GrE=Nj9{8*x(PYi*y)pD_``(|FWLmMW$5BrneAzY0YB&ucXKZp#Gt zC)*zJ4#@rFNv$#F|Gjg9KUdp@Z}Yr`SHho1JKK3Qr|K_REv&-pe8=*pJ@Dfx&Uwgl zD0|J5AHB_+|FepB9YVA8&+_K9%%|7v%xFvZGG0}D zAa62Rk?PDjN1u0uQJfJ;in;@NC;L07ak)F&=axxN=}PeMhvQW3bqYCRy++K$=mDx2%9-DD`MxOhAJ>tQ_mpf8!V^Ekow*gT=atK4`K0~gCBR`iyI z#&nfc9O--*J6(pQl?m(uA8YpDcQJO)@vZE;HG%AZZ#S`GVoG$Yye%)Ys<51?oK6ya zN6E9dndEEQcgiz}X1`3@us4oAp+__;Xm$P+oohWn-_+kFL(2-vjcSE?Ka%BmpL*rX zH^$k~W9Mbrt~2?pf2AzD0G=Spoj1yMYCIrIo14qG)5E+_mlHfFAH%CzU%(T;9LBTD z$$seJUq%nbB(R%z%%?*Z&v+vf()_0f+IZ66zw%DJFXn9!xXa^xtKg+9Y3F^r@QHW$ zOF1ve~wIvs60OrX@JX7D|>5&Yd;2e#{f1l>)u;Iki_;p}5cFlF9Z zIJY7b=6vSBF$XC$|92I<{9iH{i_T(tXD((c1%7frSpMYBZWUo(G+$?Y{RS8zzBO2N zwi;ybwuH-7%aH#JSsYw$gP#3}gcCYi@a$(<$jh36^8EyHV9R{GJXIXOCwlnTCV8y; z^8?xj!?0d^1KyD}6Ro+w0$Ue~VJDr}sHtoPk&MizBMstg#+li)#y_2GccG+fcMk5J zr-sFA#E?wK6&SNsfJE?3aEHehly+YYU16`l<0+|V+2?AUyWQ%aRw|Hh`*n!D(*?M4 zc^`mMGojaeJ=86+72WQh4=?z8zzwTvpzx|l;AlS=F0M;~Zy!v;-RmynY4%Al;=l-0 z7nK8RgpYyqQF7Se?jvO2sA3yXYS06^_hUitj&L~m zXCu0hupiBn0Vv#{AG-B$;QGLy;EPlmG}v?!+Qz2CjCW_DLzo^M%+Z4ecf?Sv+d)MB z%Rp5{LFlv7BBT)g8Y=KsLTIl88+ZmdNk9g#nWstG&gGK6raq!9I)@H>DbpuaiuBWj z9(C9gOT*MVsZqERTk-P}t=kwt%8a#1{`f&MSEG|`&pJt7E&YY{MMOxQmmJwyAwbkj zO~}zdOX*@4Ay#6EJ2f&XB?qh1h|tzi>~by}X`XjMA@f|I-p;v5yY(d8tR(`1-`Ijz z@!{}d)p>O6MkZ1W5W(q(G7;Wvf>VuVqYvt5K=UeTAmb-3H}m+8X%_gELe1QWDiT`>BTvIFY;xBd%*w}H+#dZK@JKk@`UMD z^WYH`O^|iyHjvIaz$BXG0Y_mcD3$vMbXUJ+VjjtXwlm@&Uw8}XwhRTzrv;&vWghIm zd;$&#jxw4@bh+H@`M^Ru9PKD6LHAk{(B7m(q_u4)xDc)du9@=~t(l(S0LL1E8V%H} z6#>=$u4PO|ZDCxVKQ`OfiXL4{Lg{}6kdd7*T(I*i5X&6|G)x5YQ@YU|u~_t`VGzw_ zZBcO42>NbfgKcFtpnh$6l>giop7Grd)fg?9zKekk?oII0d{tt{h!Am$)#Ty(P@HDf zfvj)+Ml!c^k#Kexg7Grw_Bs({^N~Sm`!kTkNp*ZX=_b+2n?X&3^=Z)3bRyCPh=YYA zxt6k>{0>VX(!Q1#XSY+`V z?9%@RyFV5o$$ABNKknJIw2!VUe9SHmIOIHV~cfP@KsBr*X~IJ5s1y1C&m)Y{$+?nv5l_w}YT zwsRd(xuRHMAt2A`+*gU*RPYyTE-AFEXref`z z57F6@^>}mhGxTM>B^H5SfNHS>3|Q!eA6cxT)g}paVzVtZF>b{J zGk>GeLIKoir~zeGegmsVYhkd)Q|4Kk7h|y_0f>w&$LnTl&|>x1b0K6?<6WXTEGQ_Iymdj%c~>A++0&gA87ITB@@47Ys@0?&#sFj9$2 zVM=N-3hs(WKY|y)t#;X5dv7yPE8~K4L(U+{2`S_n>jAxU!hmINA`sUq22{TnI{K9V zd)*nuJIA6GA1C4COB<1az#^<;9)hlzUxb}E)!~(X1}gtCfKPg4p=Pr%(r=f=zg3%% z`(tfv5UYWWCuOmj=SuwSTmt&x`xm--_Co$o9c1b2gf4AZf!f82k?5!r7Nlud?r<$; z{$9uH=SAYULwWdXzAH}C)x^>E8o2r42JF*(7Mr-A!)9)oII`sy?sxUab=yx8$+!#D zwDBGN^XwA6;IBasmb@T;O!J8O`cN|Vu7LEF#F8z$7ttj|8=oyYimoI(p`GU({>7n# zYp5D3zj_mn@vp&vb*tf#&L+^IJp-1k4uWrNN6@;^7(7t=9G%!Lk6dbMp`Cs@99$d3 zJbgRGJs~Q}of1D|n!2vrbnPB(ZsU)AjC6oITd@N) zUumat$KBQ#PY(a<(BHt}O+(W43`zok%1TqlN{kfG=S_o?M1 zoVtve(xG?7B&Fjd*<>S1e zEY6H9LkYWMVd0|~xI2IX-Az}yt21J`OB|J$lnE`+l_ZS3XDuQ;k*f%QOUBJlpCC3l z5XQ6yf=y}PxP6XErsu*!jG{VojcE7wlqCwa!1{aT|eJ5Dz@)3G2m_FWIcGzq#i$u7GC2)Mu&lqYqo(R_3vQA zg41|!nJsyha1%v&38T%qtxW1SafUm49gz5(4L{~SfN!t_4B_nryQg}Xq^3w_mV+n? z91+E>&rGocN<|??CyskjQK3pWloK@vI}ar=?SGTGS(MAnj*5nU&v&6h z#tyyFnU5x#bfL|YJy2eIHkAL@W7q}FL$ZR4;VwNh?w(yUIcwHITLwq*G0xBi@}Eby37~3V({?dH}v#HFN&UPkA8of z1-*UcL3-h8kl-y3E+xQX4qcFkH(|%?G?IYTWASB+KF+j@i?kI(f~^>#sH9r1?mEOKu7i^ zux5n`%v-b@T@t!NOv;+cYQ<6FFL9Jq_4<(KY4&8{Py;?LvmaL$EXBMX>UipxCN{`* z!fK|sFs)J}(TNtgGWG%HTK&aForSo(tP*GQGVpnAJN%?)8lEQ0Lvnv-V{2-OZ+@%8 zY>YkbTdj=mR9;5*Z}L!J*JAYLlnQ!#To^I(iBP9i5ljx`aYOQ@nUR-dfOQ1TLg>;88MEGt?A{kwI*Mor0Kfv)*SV}>%rb-)3NNu zySU+tJr3P6j@kxI(6V3O!C?Vg&~|DmH?%e0xFN8?JVl1THX z<4Y4WQ0R(QwEWL>>e0G@F8`)X+S)&mHV;(-3IRznT1QqE{J=vwNm&2zcJ#8d1TD3x z#vX@OlUaL&*%}ca_Q>ZM?9q)mbls*hnxUcAphB1)(WVhl=F%mTXUISgczMS?Y zco4BI(OBOnnZ&FfBcrR9(0d8OwE6KZ>UjSiZR_`=lD~RM-9O#s_(f2yA!}+B{D!>s zeMoAp34QcIm!1TBsinUc4SQrsTP{_R)|U^-ebBn;5DvpW<7`QN#lH;v5V7iFp0Zh6Q3(1Db1K)yafZZ zAHda@&oMJjzXBCDvC#Ll75MPQ7kmpz=UxiEVXE=!nQ>TepYd($bklT;72LQ*x$wZm zRd7%;3>Z;w2vo|z;bb1%AFY5Zo1K7R?-Yk~Oq@}ajN&eEHssP%j=;Pt5=HbM0-{1E zL7b2x>}n_lN}v4T+nGm@>xVXK~DENuIQFX?!ndaybZ}nauo6Zv*aqYq0;Hx5!Ds1n;XA!27La@bZxx^?UKXz%%%Iwl!|ODS$1y#PQ5mb8*0` zzo<)~0;y^~MGNwqkx%6}cweDL>;TscW(#Uq#JGeY*{*5izIF6-sD>n<+al+lehGT+1Hb=gjJi z>}6#I_p-QlIjiJfPVN5-q`SiwQ(q`XW4u{5J7+I@XQKxTLjze`fk4*g%2D>wmSFbI z(_`$EVj!C>bchwyFlG&{G)d~WNW5duQZ`F|Df{A@Aic418#}`_fSo)a%&wgXW|eR- zD^l*y{&wEaZV;Qs4tR8 z$Jo)_!>sA1`K))^H#A$xg!J!yO$Sd)P`ArbxS#Jp6#uhE4UQ!+VtEm*^7Ue0HSK1X zR=cnRq4xBgO)lacy@$`Vc+@L#;uRmCU^5027!o3hSp3TLPKOd4l zPao3!N)hYL6U5q|jdA_gCNvspjLz(Gg9l$uhkZZpgMqg{!2EnJ+R*$K&&=9NMt;YT zl|JLxVu>?m!+)UoO$}(u%MX3LZiGH1%b)>CDU>q%pFWKfoNu>+#BF*&t_G%)o1v=Y zv-(wh{Zs~S`F;tH>gD41s<*I$-c>BSKNDw!WZ)N7T)k^EiOtc zL2_1M$k*gBa>_pe-}>JHk#34mtD+cMEW3iNwCa#*P6HC?t%9BW68I_c-%L|q$@uJ# zVZNST$rZTv%5?f|TaMm^Ror>@eoTFZ0({c<3LdGqglAKt;l-Cb(B7*(VDF9R+=VA3 zIhpjbad*c#gPP_EqiM@DO;cU&a_+BC1rLOT(2;#|(Co@)aO1hSp4~n3aqbM03T0!2GO5irp7x48CWkg?o zg+DEV;QhM$FlD77F4|mzUJ2@fkE>ZRp}ECuCw12imqo z!A}a)kYc4U3Lc$>P-rs>k*X&u6RjlbJWIRd+sV(KVEpmK6{M@AfqWjuLP4+*t$7oN zmOhO_9(o<{&`1HCvE?@QiVGlKQvqb9n-uXB=3p&JJ9H;j5QWqlp*U%RO!r!#Q^|Kw z;v_Uc+l52Jt6{HvF~r4EEIAg}axA;va{Sv2W!qoH}p>e=#(~8f6i9GYmkd z4-KL}b_;NtdOBLg;iAM-O6WlMAbhj$FLbx;hqF^(Lb37;7z$!g(!Y7vOF|W&_~$!_ zO`k$RE{dpN#T($KZ44UE=Q9Dk=}cqaS#EGg3bQ6$2CTI`4{!aKO`;tt@bmS1P|+Sf zoPA>q`0V+{-4GJP-6A?-s@-wSBv^L2Nso7o>6-m|-1xQyJBN%TDb5pa%D~5rIK}In@x0~u(g)(pLAHm&zkc}PGW#=C|OMS-glG>{Q zxLT+f2k6engZZlPeUk?qjFNzxJAQ%Ga1lHv;zcCD7WQw7H#-t6#R}?$k=^zkuzD9k z(zB%SOK5-(zfi=7j_HwwG=3~-%m};7fwf<@V&edL+T>9| zco)A@(>I6Mdd@+%#yx;_j`C#bBPpy>BTN)e{DyO$FJrR3xzM%kAo1X-vyxKl*Z@ag zHm~6@dqZO{`ytDh9d%y8F0ffbK8a5ep_`VtcZ3hKJ_nQ9~{ z2N#_o0;i9YFxrk2to-m&v13?0SR8jZN#l+PSM2%N9Df+>K}>Kxl5P>e(8C%B*%;si zhokt9@?RWPlY-kqB=P((+nL65yYnP z>v6i?D%>BhjVkXwc1IiK@?aC6o=GBNG>@SRXP zd~XG6B(8KOpuN~!v~iwAn(+5u=gi| zLOIj$+pp5(mud`t`RX)K|JcJU%aH)1M=yeh$28#bflbgkVI{oK!iAe}M1tM%H=>P&8eN3|5|iI;T#6;fnj* z;8SFY+Q`lAsWYQvL;N3SK6 zcrfG^US3;(*W5_Mj=76*`ay*K-rFKqIZd?l&H!p3C`C#e)}ikuo=D!v6&V_6BHlI; zB=)-wF3M|$hiby%c-|k_V$u)W3!-4zpddQmI|gNv=fk~ye4vwW35JfnW=hMInQbo< zxO*-vWS(9)!DI`@1KVZ0ki*goP%1VOR!|G@;=o2`Rho>9= z{JP!r;Snz9@g!olp8AY>-v5P#sz0IVhJ1MNKnJRA2`k%yTCWh4 zJ!FR_jp|X`Jx%;T*&4e&K7}p*i-3DnC7IUBi_F%>R3I*F0{g$|LI-aJ^yiHeb~PWu zy<6^M^&{;#D`Y*XYe(4Ma)K+gT9F&K_BHo@P%0?cln1{dOSIS|2_-L!!-iu=@vovj z?9g}}mz#$Y^}F?Od3P%3b>bK9>v;}v-ogU3W@QU@bh0P=GRx7{1+#FAk0w6;wg$fw zS%EiwY{&P!FTibM!8S#;A{IZHu#u>q`&S!k@E$xaCkI6a`Fat-!hlHtIWjFI;v#d z-Myr=F&n=azJVL!pJMAFf^F=?vHmehyyRUZt`#1}1L@MlZ>A_QSYL;OW;>81=t(;A z7Z8_Sh)jCbkqhq)sD!#ZHOxFrBl>pHOesin1cm56QF-zzZvx$t$ix0KRfyN``&i-d zIh-XCgjL>QOy|5t1-?G$!cQZVQYVa_h_pjb=K}O+dI54w6vbaItU)mwrP27m9`FZq z3=Xb44KH1u3-iyuWPCz2n2dEhn4Z2V=I1(dD61!oKW&i0GH?kr-CoHEw@ZQ{{Xwq3 zvlDZQ731z$tZllw{)>snr3}*@&Mh2ayI#g@vm*MJB50TYCAerP6zY@TOyBMS&USE@ zlQBQk^w)zEMygWpje8yhnwoy-=PdeEz!ZY%$j>kot@RIrHO1RwT%q^>HjXE_4cPov?wPn$|Gk>j|jqJ{MkpV}}}z_ffh3YAE-OKUFt4ggPEJ0);=q zK=1omkf~w=`xNqEoJ%kG)#r?MTUVfRCqtT;4OoQ<4!fvtkfLIL)L%)MqZSJoUh;NE z{{2OuXV?gwegy&7v*!4rWf3WkkzpTBY-C;Tu4lbD71Vdj0$fx14AhAFfVw;dW?8-; zcucMW)4{J`F;vBOqwI;xGX=I*$eoQkK-q@YR9fNZgp-eG!4J_4=nggkB}5QyE?SE= z3JD^&lCLQ1)Hba7N`TIj6=m%~0j;fwB7I%=(A$5yziX#B{B+_CYs0}~MOlE{v*MGLe`1Ki%2IqwWfDDowhPrDifX4{MIkHn@S3YP@Q!aWSai4r zMLyNUCzmDR5Lr25E2l*kHM){$pJ);~ltxl563M3BXGpH}5i+I$NMm{lJ~Dg*39q(7 zPp&*bb7dEy?lZY??0qTBy>$j&d~5?(=DI@58@4d~-6=TaT?S1@!%(J|8~P^p7an}o z4qyDtfgjuF!AH0F;LArfV14cuv-(OHv(oW0<7JWojE0k;j9&nH?ren44;_Q0DW)*j z=Q9|Pw1l&rlfg;5|N@@C_h1eghm&l!gyubD5V;GXP(%8{TN0 z2|p}b1S(RBm}7^;xIl7^ap~%f##wwj)79M$+|@tsffP3emY$CWRmw+!U&17KzJ!H=eV7_yg5emRxx6Kj>E0ylF-`bHxNGU3Rjxzz{+~>IQU1)z9T&iPr8wxGya;a1dI~-js=*a2yBHH}4s80KfTm6N zfr>^lfJ5<&Rq-9};>1L*YTp7TZ9@St4C;eJ=k`M7Cv$=7`KO#sQ7;+u`lqm2Ukbfm z!7che%yTGGY{ZJ+XLbK1tsMxm|4#^XEF{m2ITX6xoJH%9uo@0D2 z2?60X>fp?w4Ceja93aW7M&b{4Anx}Wn2VKhQt}DZA)$a=J6++vnJ>V(-#l>0_cvJY zBL&x<8vxr@C&GoP<>>0uPiQ4I#660;@%&r~{A6|->dTmmzL-tIX?-2=*e*e|a)knF z?wf<0{WhX)6Yh9=vol`w)eZlNoQ2QW-9xvLF#g~^fxbp%qIaYJ82pVyi4 zHBFVsApQgLvHS~FqmF=u@1wwTkv_&}&e^ne*t$)RcRBdCx#4oPh&M?kwwR=pFa}E@`xE&?VaKtES8g{$R z!Izx=Vmr42>}t^jFBUxnc-AeZ;G5XLZEzhB;*T6CnqNJ(8NyX+$qrnQ3!RJz7p zUf;&JPmO}!0187|{=k`c>wszH9p-q$3+6yfFQckF19FAWLpzVV;EF&1*Mon{^n*b* zcj=!3_$>Afircab*QhQ-w0Hw^YFYtTEhdah@FmbcF%5}0EPxO0nE{ygfK#@A4`Zo! z2c9)IML!m=L~&2k;cx)~1r2W)x+RV|`BV_TkG_l&bKb)h1M|`0*>W&Lq!RAjYlb2M z`(fb|OL$eP3M}k>2c#=6gPijk;8mVAyfmhX=O`4RhV=%x=|L(wigu$`t}^OW*$u~& zAA*3ZDd6qcbh!GWE0n>ZP)N!HUEkV>c=KPPkXav4b=bMWIx7gST%MgHE;(QT)tR4cZTY={%336pBn_OcZ99(h8nKkg*qvHGNT#wIeS z*OR0y*+EuZIzgVxDAEB}OotkNk{K!uIDf4a3EnM1)(7v!4fe`-g!dUOziWqfo^VDM z-XXY4!IuoB7gNpchp4_LpiU=EF`l%<%Z&4gU>CyY>U{8OZ(UsLT?}UkG@-rH+4%W^ z-+0r)^>j&vF&o=IN!x@jl5AFpDrBr+^L8Z?Q}hJu@4g2g+RkKN@5g9{$`~GvF(hW( zVJczYLMOSp>?&ZxhWuW}hW*;bS_d`L-CpW!e5E?Qdb0+o&)Na?|2{*$@@w#0)jZlH zY(;u&@dFYa7GI)kOz*&D2h zMr}AAXC%o)-9;L8tDCyIP`d2!1DxuiNbk+hCpK~&*h}j?R!BD@5l=o7aoIKW`O(G>6Vf^uqC;o4D8OpuW31=NqLZN|kaa3Oi&h<28r{-tSK@%~0 ziz|ZTR!qZIcN|IC!#((T$O!tm=mJvq*Ml?mVJJ@p(X7i7*g|UtYkWPHM*ibQibYqj ze#jI$w|_Z?M+9-=uh(eD5r5Qt%ouHI&p^BMzM_%SD>0K@LkH)sp|9rj(oQCZCe$?% z$wk%V-B>Cr#E>O^D4?E~bz5(rBfj0Y%A&XicX+O+Qda5^eLy>JJyli+RHIS~H;c z%bn<8<7T>1$(ANd8q-JXJm|`ATWQDee>9O*WOXJpX{RrbTv&G#&u;#J8!IfyzRNZQ z0b9a6C!}|`A<_36#sZ4wr0vXAqLJ)I$EG@n$EQdFZ1&??nc3JqIvDTLi@~nn&CtEW zrZCK|8rqE{pwy)sh>Fn|UJ9O5Q7=B3{?ZM18y>}r{;NikcojMm{g#>iq@Q!SvYdOh z>OOGLOGK*QeMt5dA-vJ}F-_*(B6-^tN#V!yMAm8y|NdeID0 zw*R=a3z`;r7T>!iM~O)+)!HM9Z^XBw9G%nHQahKdXp5(ZPG6(A?-6z7{UHMO-*A&? z1XewK8CS-=!`s|O;o{b3@Y;42%$BF)VP$vXK0SqgUVe@q*GZ%D3SM|pw;pL9n@(Gj zvWUPNdldhwh^rni4w&KyEc>XE_$I%i52}-BLdsFxb}tF9xS0+;CS}l_|Axqcxdx<$ z7lJN@=`+2jK7or27yq~5DQ@erAnl7PFu3*x)k$1OdfIPc$m3{amzhQ~cD}=#(tqF_ zr$C$5BlXw}$6(eq8+)u4Cv97Il3UxfiOEwBa%pP}%G`bjW z+{JOu_exY@){K%KDq*W0Pu%J$jl)j=#kyHpXjbb%v{5k)efY;{G<0TS*|T-1a_e;T zZngpJID8iD0u4alxfIIX3_>ruAl@e*i3-_$aQ%BRn3y#P0t9uSU4j5mQ1oI-7JG5O z>8Nwo*4^hM^rUiUhP_}Md=lWP)iWT+?-Jv+{s~A^m4~JuGnrw1C9Yi1FH=1dYx36K z#pvbN`zCbWFlYItT;}rW6HsXF9 zac%MzlcBUy4!89uQy3wS){0p{pI1kLbwx2Z%Yx6{V>!wlo7==l{Q1I7R^7lgeK^Qm zzby@FYZikdm=333k;QPh7^oB)G3pL1H~GjqZbhXycf1QRRR>bJ*$wYt)VQV~nroF1#r$wt$*pM= zWIXiFGIcVy!I|qHLCc{kM!97xCn7M`WYtPzPQ{5)Cb&ErI2ZYWaep%q5d47iHE1FC z^WzMzudWIxYrBNp7SBg(ZS$GE)oL8U(m}4ujyMP<6`(OZ0oacZ8SQZ+Cab-d(EumG z>2+4H`;P+NbOz({vH7T@^a7aD*#MifGtdJ!O~_v_0Aw9*Ge&b)fpc=zz#vZwZq1cL z4t1+>W40i^a#aKiq{SmIHUWWunp7x|2EPurg2e_8z?#kGaPB5~G$BM#-alq#GD`<@ z*xGzpF1|_rh3|o3DXl z-?XA_-PcgpFBdeDy%wbno<=0~1Twg`2Q^kkqFJwQqT0nL(5*0Oy#G}!;@QkZDrW#P zlkR}}I*IV4V-kFj)(4l3K89(AP2kl!0l-L~2Gmy&MjHX7bVv_x8QzI_YXGY5sskm$ zPC&#Vhsjgh0hHhQFyRrmI9qmJ=167%t|Tbq%G{p>_ai(|rCAh6QR;x>j^|*>hC57# z;|#`rbrttPzbZ#2AjEjBg0Sh98@8MpF9EKl-gaPpcp;KF)5Wa+vJCh%z6L5wPcvU+ z+nCYTgUsEbr<@ZNk|t&R=_Wz!E7Qq*3+@&VVVE(Nfli;g1})Q*VR4WX2(s2^_*J)= zf+~OJ&A!Xr8@DBal|Vi-B*|y2_vb+;rv8>3i-2Dl$xDCga=aB;%npoIMmabOs?Jy&YF2HoTTAu zPJ+K8qpRWrB(-0I+uuZyipP3fajl2kFA2lu^CiiD|9o8aKWUr+r;Xf*l&?&*up{uC zP=&?y28eU%56I5Qfn)G864zNq+V&gcswgph;Obgn({zc`ebtuDM1bYxdNm zna|JQ4SNp~#oQ!PrKg2Gp-eIQz0g^5)N1UHq zka1lT!YdIZvtLT%82`t}%sm1h(>aRIKG((4Kpjg|3E(8D2K4Ph7uq2;in68Tu%K}* zvXL7_7dD;7>#m)|W)h2uaDMJg_nf#;GwkB zsO!NgT)*@^cb8 z)!*RhKY?K9zq8O&V@2@Ep)T<7y9CHwG7YSlUBM0P(=jPho^Ep8(8Od}+;5W?`!txV zZ#Yo--BHHm+gYYUe;i~Bw1A?`jqukXK}8x>uwQo?*K&ouX>U)GslMMLj?#fzW`|w^ zus9RP6g7l0MWynfWiF?=GD`6e7$d3&<~XhfH%3EY_+3X>@r;2HDjVT#l_lu$brqzl^OEs@_J;dL zaSe0nRto25+FY*LEG5Q>-^i%f^MG)+5)@+xm|2S9%+j;3`#xQq zA<0~m886mylqG&Lnfa$+(4Whwzaj`}c-k}lW#_oLhEv@4LlmSftO6@S`1=>F}0ke1k+&MwewhdF;Ujd1-FM-E^A{6T@he~%}BF9+*c*id*{9x;5?00t4znv0w zyivIb73O=QWv4fzV8>&q<)-_;{aYubm3R#KbB0k-yCEt({|1r=8BvbpD1wMd1Z=CD=CIh^Bjbl1M=-GMF(I z9uheNe#L2csc1Buq`ZcGuqu}3%V240@iq2>A6@XQMy9@(;(6k9$corg3j)jp1#g(}!wMG0pXZ(v&!M8Zg| z21chu1_UZMfS|I)Nb9r;Ipw;E47nb}U1}?_W${9s5-3ek)oHYxAH!_;_=uU@CIf{P zPhs6>YqaxICW=^5haA@1;q4bEBj&3Edc07T77kpXU5`^xt5PUiFg6~v?HoX(rrP7p z+n%H1B{}H7zHzwz?qz(U=>xi);EK=oJRwtx7@EgU!u5OP;asIU*wb_cuOB;#DCfrF z$Xk}!@!Bx{C5|WT@^82|`4k!N6-DmG8`AyP7L)zezfhWB2o1MdlKVmSBxZ3BKKk<| zzWP3#lmSe~3ALzk#VY!Fygw;iRYgpL9m&l{oj7TJ6LzcFOb+*?5mo+jUkvzHt_GUFY6c3zI_*eu0q_H)$V$y`q~osGp@;*MCv3P&F>VQI;F&li|t?dTHU3R%&X}MRy!h zXF6~Ae#d=$<7R!L10 z8fd4lfTQhS=*WB%?zV0f>YO@gsJ0@|e>vJh6ET%4opXhbb z6izYd9j(7&!0~mT(gV}mXv#w)E~v_!^PDHo?d#Y<)0J=1+m&OXgHdHI4RsFyVe7A$RWcb85BAhHH#d z#R5(8jUj>5#aOy4aXFwLNC_C{Q)nJPE;q7!%Y zt2{R@XC^hDzmI-U=bpVV<8)M> z(6fIdXhr`G>a#`S1>JX!#iD45BAd=(i6ETKMgKEy_MGQ zy+fCKex-*uf1(YKq`50)s@x?H4K6KDo%6o+k{-h~bba$Ts?lZ6ot0DJ&RqXY?|4aZ zS$*=HjfABiHW;A+AAZof>JiH4{?dm>TIm<&6}4MBfy=(6#qFz8>2F)8iI9T5xw&thk0eWA50Laa_Y^ zSw|<=?M-~x|zuAuar$)K$ zLo2u~Ia|4vm$z~MR`^NW4))wySB8tbCeQ6DyGF-u7^3G&r*k@cr*Hx0>fD<+6|S{c zh1*pt;P$62;y!J6=aN?GbCw@fIKTT<)Y~ta#^+zB?o<9y!>waDy#rQUmE=3evy|`* zv8t5s+D~NOsM60-%V|mDeHwVMi#{K^Mz`muQX`96`n1QG^V*@wot|RKWj- z=`{5|`c|)!?vvR-4Z5ny{MGSfo^la+k{?3XtZAeb7A5qrOeLKj+Cy`H{i2-2!;-uD zJyp#g(mxxh=Ky;=uxu!{hkc zJ@W%-9CM6D25+VV?pr8xhN17@{UrPJFtwQ#L*uq2(L>9Rli=<;(fbxP@n4PYq5(r4 z+TT5gS{SC2Pkm)%c6L75a;~48{e-DSYzEEnK1MTJ77`pZUi_c4P&{)ZE>WNfeiq)Lv|95FMJbOKf&#y|^Iin#l7EpK6{S zFWUIoNTg~titg}OLUdzfh}-cuMD(topaZ{Zrz>TNF0M!v%PjO4U)+!{n$x?J29@or z$^7xETG)Ju8k@hyTdWUb)nFS+7B8nF{S>0_cv+OTut0on%XaabH?KuXnjkAkye9j8TN<+T~2;MLV6dxs}I^v;3Y z_CH5A+>qf|8cq-QBoZI}0&>0aHYpN*7nwdiDh~PUA-<>+DGHVGB_W1esMCoR^qY8y zK8@4n?)1!|bvFv&P*6l|jp@f4So%x!ts`0dS;I}dV{D8_Z<;Ob>^w(;K2{J%`kbyZ zy-nBF&Y-K050aM8UvY$uED1_FFAAD`RJ=5jitXVH(bWfr^oNIjtzx@dP14|EI`CeL z+O&Pb>X~C{`5k%sX=esm1?Pz(A|u3SisZyC+KMFczs&-iuOCeiIFpUlAEd3Fvm|v-F&a65G%a%2SlZe@5>U)#W^<&-+cdLDv}i<FkUP&k^NCS?4w(2?5ZFN zo_0_)XYfbudsP?uQNe^;%$K;NS^y3DX-Z9>+^?CXxVPEaWq$#-M?$cTi zg|1rT3_o&QSVkZEnschB+h~W^9NKPGK~u{-sJ(Ux(bP~9?HLmznz#XpJfu8Dz8`!= zmg4iZ7HOO5Dxt(HG37ON4?RvVj^0Cm>#d=ddGo3I?Le zkTbQM$Jg=`rBjcTaq&8W@PE>bpuBgmrf#{{XyJ&Vrq{wTk zfha)w4z8SdjfQ`4p?gpE)Trn%BCn>EqJkMNBHy}OqNt(^qWfnPMAe12MWMf{MW6c) ziZ&`Z*Sb_4qz7MJto3|TF5-{BAxiQq5P`ovB2Z=~zNBw1em>n&JUMNe*mbp$_~^$` z;`yRnk#}||EZCPNx|?M!Ryl~ov;W$P3s>2T6Ks6MPFq)q%eJo zY+-64{-PZxTCej5e4REQEHYXNYIlm+&NJiKRs(hRaQiaWExCeqxnvFW_qMWYx4E)D zzQ*jn<@xO7FR4(rbO_k@ECfT7S{YVo#&m4)X6n8bGC!V2GO2!%%%(4;!mZYin8weN zo?_n>@OHc>R=F;j4LI0|HknGp@prvIWbSk3VI8|3uGshis(3PF$DcQ}`nxpu;ba_X(}_T6uN5|! zGlp2}uSFJj`%n;3!?V7`NXyiH6$Bdj&0FIBs(72Pw$>Aq%s4a`4ze%xZ zQj;u~J=;#=SX+9p`b(2S+p#_Jhf(aRi;)wej-cDDs7L!j&s(VBfP?l={_~ zo9dQJe`UwP$A$W!PO26rcpzwYBaeMQL*nLWS&A#`qe)KWA^1yvB8<7|0?*dk5v-Ji z#(8W4J(oD(HF628*PR872?Xoknxn+fY%ITyPlm$g!)I<@fNqxoqc@wt4+iP%y&Or8 zwnYy_ZI1#At`~ty$^5Qq#Z(0A|Dc+cQ}EauJ%Egv3i753!9RH$;6C*_J55Uj6ob@Y zLSiLor|m!&Jp>-P4sgNL!?1Tp7#zL&EBoSsD>$cE3a)gegBdZmz|xC~=s)Kx=+^!s zWZrxV*`0Mp#!915z{*HiqBaZ~+`};0xf=g$$-p7wOYl7DvE)kjDiX~LC!=EzleB9U z#1+cY2wPiPQ8$mWJ*6aj{ZhO>dN-aTGZD)@jlsI_#uHwvBDt=-pRC@KOqS+pO0M(9 z<=YDJ-5bfo&hjA9l8wbVh0k#Ll1EtQNdfNHCFFjQG}*gzE6ISrk+x?T_$^n?UVhOF z)~k)ho6Gu;T zFyCGYo-JLD>RdG7raecPdMd~6-m?$1C3V1$rvA_%#}l}2Ze&#qq}V%E1Z*#8N+V!Ws9V*Wh+TG{id7%#MpQ8YtmTqTLzg}j|Etj%_#UbE;csbms zHxn#~C}WOX`Nv7p0R@k#qf#CR@6{P;qBem!IThlFtU6LYn)mMF8izF@w2z0 z>8{hEPg5ML_;Z5f9!jCv4Yv55SvxMAcn)8gFcF{1sYZ6!#-o~C5gK0bXm7hr@3C#gRb|aBXG?9;lAOn|5Bp4%hW@^3|Ux=CfGBPix_x z1y$8YQyhjc)IJh61U#P}E z!kp00<#lkcbPGFg<18WHGI6Np=i{Cb;pbb46NLY}FNev zw5i+ZT=!|*BDKS0dY&=3&EHF41joDJM?gkC0EoK!s+yd zaMpt%+}*%nZl3)(&f%>cZgW$B-6AFE{gXg_y8}#cpS*36Ll>|1ofdZM4&k)^hH$F= zAzbEQAUAfk14(-O3k(3f~%elvF`_mr0Tqhn!whzH& zn<7EB|+y$mUBE$m*0iI4^Vxz9tC87Zw6M%i}H5uwUn`EM+otR!5+xh)Y}O^g1CZp3ACevIW^~j- zN@Fth@dpzPwEW}%-1hAOTDM;xH@MG14x`l2yv3o=)5{J-9LQz+-h_cDQ(2UJb{BSE zKZsX;??7{+I#6~(EOw9V#&vlP$l+lq{QKk;V;q>n_d4ColR6;bMnt1QxK<_X#Cvh} z_}9>+aT^M0*2I@=+R^A~Cy_zSgdDly5G5I88e!$&sjhTaO%3wNg(`y;_y!(3SQ{{_BFYaQ~ zcoT$X^~b?%+Z}MZ8%42w4(N*)fXOYkrfdjCl4cL&Q##GIFBk*$Q5zJY?pDAow04sl*paau3K`+xN)+9uoU777AoPOMo zH*3=kLC=9G#(w!%!3*m?-c9FGf`lKFm`#y9_Di-6``dUFaPCkB1uD1MF}q9!TbDkw zRXy#@FJ!%$*bhg*mnw4*mUapj%^n3t@7u|SIQ0spi&wF`x7-DGIwJ6gJNJ!>Wqgcrf1y``+~TT{^U z_#`ac5rIn2JEB7UU2r5+A6{iv0Pbond(2M>?)@7h~gKJXAMRq)$o!LiL&7VfZ z$8O^9!I$t*(q;VCAscW0k%^C6j^GQ=8Yvu59^n}a&|%i zu8wF!Rlg%}b?i~n5j{d8_dX#0{~W2WX(*MA^rBxRtoDB+srbwXW6YTuLZg~s=B@P+ z;n#KQtf%22Xjvy=s~$}y3F{THVXqOfbsxr)=R8LnOmwi*tt`0g&U2yd*BrZ(*`aoG z*9kjKU4;!7t7E%o_i^^-1-Nsb47i}YpEu>Lvt7xV zIzE%A1l~OEg=?St<9qLTSY~q@IQc0XsM_dBW|BEPkm7@7`I4+N{0Pvu8OsKY*f46X z2wwZt1D~5Ip$N5fj;ZRJJirZpEi zsmLOQq&m1m<`Jxkkr#w2U!DF|Wxp+d(SQ(byCyt)Y_wo$(==i8m^`+?LmK`*vkWY@ zl!BtAjwtt!6>N*Lw6kviWVI-Iy^XQ^8sR~4Jfz6~S~8wpJfQ)VO3z0_dK7x)MuPb#mB8buH1=ThOgJ;@ z2r`d#hB>N!U}o@fruXaub}&B+-a2s)Sti~x}Hf=9jv!=dSNI7S9xt1Jd@Tsn-DT>{XAspFCI@Ce+-Wuf!rEMY1xl9RJ@ zNqk5r{w-6BKQ+pbF*{1JjoTXBe}u&zdivPSQ;74^kK$dIL|8kx6E{uTjziAnV6P4n zGWNwPQk;=QR*yuG#H!h(Z>9`MUwaClR*S^%@5kffgriuy^E^7VuN7v^7>$CHYmmcP z73|{DfkxtzB)_L;(5YZ5_;MhF=|)SKQ#-G+#}ePbS3oicE8!f&43u!(ylIp+`4w4_s#{W1+}27ifZus(08^*T*WwUZxUW9yTn>YN5h}XkfagcpIUCOKyo^8fqIeG9ty z-4&vLh2UPxU-sADShPJi5mkmA1E0O3`7R!JtlySw<7K;Lu)mVt;DZM1@bjw*Xwa-2 zmYMm$G~HJqsdo(Ki_Ea|Q)Q?fypAp1SS#42x0AK%yMYf&+^w%nTgW%RDfnIW2{g1M z1inl+gmJ4)aH1S0zuQ{Su-tvv?4b$gew0k!|1Kr78VsrB?rWs}WPv z^b}HoXW^@xcA^^v@u=~0AgZdgM#BeU(bNDfykg)yz8RuToEsd;t14-7S*-{E=bDAn zUv=XfIoh}-sV19urm_%-R?xibuaN`86VR0Q=Mq++d^wn*WJl>rP-mk;*#yW&Tc{>1xKJNlJT!@P1AyICg1}wRbrpl+|hGS!@~N{mEG*OpFymjo>NxQ|=d-|5Op|;Rmy} zTV#OD@F-B5%)$kKWkI8s2Ak!*nyqiMXR21#2y0h2fKWRZwC2=pkSQ+*C;XO%Vc*UP zwhm?pjU)cBYkx!w5}u6apX;2#kAOdf+Vx*Rz-xab`%DK<|0n_(2DM;UFcI8vy*Rz% zjK8f$=zgKinAHOA&Ptx2jF_L66Un4)UjzbUvcVf4Z*)#_=6Ntb6$Katl5-ahD$Pn7aWIR`yX4lZAGDAQ;@rD34a=I z-WE+pw>1o=7G^=g*N1@Dlg&bb5qnKFkLff&3|8uPK+}K{urxxV)@D=yr7( z2sEFAl2f0diSPeFwGM5lbt;;@bHs=F{9+~8yv_n&Jh}{3sZwZde3>0uZ3fP{--f*_ zi_wkeqwxnXFPt2!j?aCuL@JL)z@PiN2xni#7y3Q$kNrycsiz;ZU9b#=S2ZEk)zN6I zG)911h33>GNNW4bXp>_XQnm)Tf97)%SC&I2P5wxR4(F0=?^=>9)}ssVPNkX2lW1Y% zY8wAJjxx?Z^mwufm3=giw3f7z?VIw6f2=z-F4{ofQxR>K%oSgsC&QWgB6@DkKe8=W zhn}j-qLXjGq9@Z@NKT_YF)EJ7Z+jn+^o})j-?*i;(sL4*)xVVcQL>pE_h}b*w<(Yt zof62ItIp%jZyQZ3=11ZU?yYcPXBdf9>n489@3Gd41UlDrD;N1XgviLUpCh+U>Y=lMf>Y#gZHTZe2klpvx98P>w zg5kelJb3&)|K7JT)1qde>3<|+Mv?my+2q^Cz)?36x?Xz4-m=hS$IsVgO4rwbnv)-J z>yk0##iY~B_Yg0;<85Pj;oD@H9bQLJ^&Tzk{!RlKXNbW!naQB>Cd-x^Ooo0ZtZ??r zz4(Nu#JTG9LHO{x4Rh&K9I&WOlsF?gk)6^8q92| zW+&{q#HURiHcuukvb}RFhW~DIC2M|eFI3GL0EH(evQ?_tOyh4AiPFIV4E@jpryn`6 z)&2#79;0Qp%D3j(E)M1MgQLRP;P)KlId;L2wm+2hVtaH9LbqsJd$slp=GuHIE}%i*^0 z-TrM9W{ek%om^$%FKZWR=E2lHhz47sOEXnKgmkR=2_6{qevB z*|FBjVLfC5uR*mm+%vgFxQoE7DU`IVCqH&hI3a?9ZGpIT^b76W6<#>3)? zrs#y0HDO#DY-`OJUd(76~8d%*&TL$bj1C=>Wo^%(NElOgKT?uaYl zp{2&jsNl#!6qHy664LZRk8hUn=Z{K$a!C^3M`O3p(bbYYm8Js~tJCqHKml^pbwyc| zTM;;Q59X%EfI04JtjwF0JcU=u)~a_TlEq%6uCBZYM;dKCab_5UEDx3zJ(K$RB$Jh6C|bxI_#p{8|BLT+FCI&gAiS?vaXR5ws5&T?OgW~? zb^Tn%ZTDQv@jRw*3#+xcaz`UhrsF8l3GyNC!M*6t4jq`0d<-7DVvYagm*C|}d&r0Q zv&2v0y_NFPrGGA_(7mBo=w6*nGU`8NV!QMbb`2Yi`St6u&!HQ*{980$pSKL(Kd>7Y z2ZiIYd4pKBKa3m-t09k%OduESN-+`t#T~25vCO7>*iiEd4!9VOCzSK?r6Y53SAQW+ zTQY#BxVR9_e*xq{_Cl<>Z4jmJ`+<0>I#|kc0lwON41IWLkFtOK2SwlFfOP*7kTQ8A z^h}kUJ6;~btEbFCD>Vk-(r!MAzO_`sQEfs&zg(cc_YLqWWdqZo-o;-!_cZ^R=_R3z z>>dy`?iHLiwi>uDGhpMlEdy_ocf*#crqE_}GRU5El~wutM{wuuA-iT&U?)glBDmw) z%*J-lhZd900Mm(PY@*o$cJ9CzV057l&}sdyVWB zmu3iBmcuo3idc)+%Nh2hCXBLiL@#TU(Wg(&fXz6?XfF>$)rtUUR1sdrl*q%|2Bk=&IHehD~_O2Qh|1Z`N11sy3Vz_f1B# z6TRTsq>IALMYcSdf6aobuu^un)jc?W@qTb%%`kH)&WKsgMKht@geeJcXEje& zplRDz_KwD5Vfe;Y*`ykG8_QWqs_Oin+k;Pa^x> zYa9Qfd7@3c-+h}&Z{+#bU7D;WG6%kv(dbnAGPHbZIXnnOU{{+xyCtQbC465tYotN2 zY{jtckH86buOHb8H0zBcaDiZ!Tr3m<(PXLX66Ijpf z7g*ziN7$%X9+;$&jvC7pP`vhX{BHY9QYus;|FW%^A90@qqc5Lj)S|BmT!T#c^~T%y zP8MeD2mQ%-aAN_ston*Nk9Ok1>s2J!C6aZ0)HnUxrey1GuVeg^8t(-fgX{Rt?Pml( zW`+T^r}J^>$2Rolzyw^&Fw^|HJ(?bMaUoK8i7{WIXItKsE8fCu}-Nz}|jB6wHX9;0ns9_ca+BkiYNpzP3VXqBP|5l{S&+~tK5Tc;fISa_TCT}>f*U8BhH zT21ocga(QEB_jWoSy1x_MI`Rr1F|+emR75DQOhH$oG3t*Q!kd{^3sdxi7Q5Q`kJ+5 zQ^hg7zV9a<$tfp`_xvPX3;&QZkAAW(kC!jmL^ z;P5%GQN@Q0{PS!KX}x-!7(7uRZIUe1W=an3Y|MvaTG9aP_7tdj86i=JKYo8e1r=55 zqD#+u(ES}^yv$}9p3w_X_-$__J9P@M{@NuN3UA}Dx?n1tRr!ULTbu*aC)|d^W8Z;` zmF4i!87XYD%MX0RbQ{{q~29rj4vQ=fG% zfi=$2Xy$8WR9wCnwmg{&rub?Kvk%<2-Ebq_mJ8d*uUz$m@k$K@!4h|(`D1-h+G`6Q zwn{QLU0ISg_c?0LnhaBiGFYe3Kz49|Fbyq-nC;yEVXhasK#<2yNeD~8Ol`;O=8e#OZmW%4@(&@f9m&TDH8jb%0Hr*SvPmev~b z!=#-2-dI4C-<~8ol73Rt#8&+9>_0Sqz7+nrU>v@Z7lyvNYNL-OPvN@dDX_G60i1J< zh3RH<;d3KcjDj|+FTUtsU9E@S`R zJPS6PPe2=|SfUegWB5BRewtnzWnx#RRVF;%w+miO{|&yYn?d(Y>TKq96ZZNDU}u*; z05`;^;eoMQu;+KOuxU)6oy9$Qf!6MLCg#gSD3==rcdur^o@rxQxAwVg=lA7o<%dh) z{G3;CvhW+YX*&`2aYk%I;dpj<&j)sPcqAy*jsfaX&)7LOb?mJe9X2I8i!tw-3hus~ zhSx93K~f_rsM}x{eE5rlijv(a+1Y-O37rd_-u(v$pLoEq4Zbja;$)~lbP;YF-H7go zWg|P6QF!5gWz2tf9ce$>f|4r~(X6F+VRz$QsNN)n?(nQ7`%V_4g6Xbk>&b<VCFiVXW|RHgjs9)_%D+u3#Xb6uq3)1e&Hpd<;T}z^Mmcssp%)+?Pt)}Zhru8 z|7DY0U$SjS_AsEx41#kJxLKj?&Q0KF=@YV5~(CuV3G-lJ8CjB+c7tc`E z^?8l()M;a(-+KeW!`q{ntNEqijCumpw0sN$bPhqC@E&;CY^^Y9O|!LYSgLLBA_L*L zQO&}as&55y@*4!grWe9CYX`P>>q+*IrWTBs+)q#bW*C<5#Jgbm-l{e$$eR6QDO~5m z!1x2FVd>o`aMPdDZ1tHh%*e-6%)_;2u%fC2#fRv_J%+i0`Hw|5s}^YSo?<=ru+axR zsU(NQ@z-D@UOW6)lL<=oli2XceK0#r4?pU(fm!uxK)BOV_}kBkS#n1k2KWi_^ZG=r zfc;Q(R}Gw=lK__f^I#uEYQTHvjLEZ8#@PF}H?rLy3P#0cfXrkI6!O~w6X~hA?7K8} z7Y-w|;T&pv_5#j7e-!;Ydkf1~ZNcYcXJWHiRVX;D4=w3-z}si|;KGD)nE9iGMXeYI zjNOMFk`CjckSlm-Z3ls}gILjMDS3M_p8QeVM%JwRg5T|!hZQCy;xI2IQWvjBG_|Bj zo3oHue+-}%S3RiIfoUWq2jE|`OHosYBu9JM4ZS5f%Zr5)K33oc7zyv%GwBydy|4}4 zot;nT3E$C`7vkySudi_1qht7kcmg@PZ3ViRa|>iPoo5}@^Mw2FqzH%W9x?YzcEDKq zd>XmFjhae0o05qpXt-VtUmoD-la;4P&G|5#v4I2fC)Wr%%BKioKOAKy51dBBYt^}i zi^HVgBqFCiZ-Za<*yG=Rv$^fxZ_xIqW4ZC0Zxe|^h4ow`h^0HyOKvcLN&Q9q4u1;^fywY*bE&kmO$OfZbUk2K8IF4p>CNA zY2mZKDDUKKlrZ}=>HWHe+?VSHM%{bCVsCBm<)s{q`)iE@zy3$}D^243XCco1j|W{? zQN@1Y74mO&%d+Py!(jU;P4?Yztk8H(3R8AI1=J-wBiU>Iq^S2VUHp@92^&sQzvG3> zixpvZ77avDn&Sf!+UjAvMFK3b4MWz)kAuDLVxZIg6&;LBpskttG`n>PtrTZM^*f`4 z|AoaehTdARw>1UNTSIW8t_+&=a58*9P8-ej+kyjf6Jy#rXNjB%F9N z3`@i{*s0B)yz6iwdWTtJX5~ft)no9$9}}|V;V`bA(~ZOWuVFEpgxjYc#p4GJ@K38- z@Pfi2b@ zCG>qQnXO6DpuZKgC1?CWy7Oi?HO@IttzJz=H+x0MbI*KS6EPoo{4qzbf{uZ${6&Db zT!Ccs^+-+3aWoQqj$EHmN5}rY12he;Gm&L+pfNs}l{Jzu^4)#RHmQ@ac1|mv9nyuL zzpO`^3vKb9?sWW3%8cE2wAH$N>oVJ$r#=Z)d#jjJiJiiKf$A(x6v5~PJSIm|by66PJ{DCegntTQITYd-prFlC}LpSM82)wDLJ%t zV|(rh&YC@n_@@bougg)K^~VgKU*U?&HrHSw!gU= zzuSBq4;qKz$20x#yNj!_l58M0&e;*o+HP5|@e3nc!Cg_qO7xN~jn zB7I|qj27_zX|AwdXsu*pIKGW%u4u)UYf3?@kDJ(C3k{h|!BxzwvDev-MP}^m*S^eu zk9~wj-p#x|88zF%ZJD-~FIfIQ>mKILqaI-N>JCWkj$kyOM+;Gz4ExeQf_)I`4yN!; z*s9s*ncwZcY`Q?qsGm6`soVdtrSZo>{fvC}+O*LuJSzt-gnkr0f#!_D5f?CZ>=j{( z^GrcQw}Bw;#4};zIzH@j(}jsIAn;Z8Wh;&!VS?7_2#)TIw;kM}$qPLn#l#)FEC5*y zZ~YN<{(h-s!SL{O*0BPyDU9RXPMRJ<6D7_zqjzMgEr6I{ttiM(O6bLbt~*|{l)fJ z6fl3jTQdh$4A@DV7O;MP*T8AU1Q`wJL%W}m;DPH*=8Ps_r4n4A@ccB`l+J*ht_3Ww zBah{&-)HrV&#;}NEaAz=)9|a}V<=#nEaLqc1siQ#p#PnnXm+wL5`PPa3vM5TPiB0A zKQrf`B1vX9V^ zBLnkKTY|kg;b_j`Vu{b{3cSNgq0<(9U{+U~;7C)mZFZHLUHG&-!LdblptU;$?rme;<3;v`YS3so{$jAE@M0JitlRw;v?O0jT!8HjM z-;SFuCAP3Jj`I?j3M^R9RcDysq{C2YXfw*#Ih81#7)v%?%^-)ywxZ6>#c;$bQy`3L zwVASqBGuCHWl0h z210EmZRnR8i_4Fgkdv>!;ox7JNW=HT)bq@B+T;{NSI?0+Y-Twi-vx`|O@}jV{4+W5 zC{Yu|U(>@kURU9u*Ba!($ZT>-UXNZq6huEV5zm{ zFJ#N{2g5J;)m$CP8P|%;@(d?)()SSAY1$q*PrZz_ne*}&qQ;)$jl1;oUenArIq2w4v@HfiNkK|j-cPF2GHVr z95Zf5l(6>MG-iJZ0MSvCP;%NX^me=fIKL?yjxAq_a(s&s&s`p8y*!PoS3G46Z#3}B zdAYpnQ7VEJmPc5J7JHbPqK}pvJYq{^wt=UglVDGf1)88xhwfQFL>KnRpgm1`4Diwt zEOn9=-kg)ny8M^~pCp^0e{ySqlVm^Q`Ee8BugD@emv4by+>C@_AD2Rx;+5!^q#ibX z*h7k zRI;nX_Gl{XnQQ{8dQ?E<7cW70K#6ty?RBy7PykN#SkNSRolv-&J}@aP|Cn3V(s3X|ba-xrdu!UZPp zOc_(~4S+Y!W^lCjB*5QwiN8^7Xaj~C?CzY4WzKD!kJe6ohSqtngUW_S*|#fHSf9)R z2EK>|8+R+fzS|Gj?jk8RTCGlCdm~7=GguBJZb*Y#f3(oU@Nux?1p=$@&SC4vJY*tf zUI0neHt1e$BwTFm3)_p_fxIvR_-cz`sDydxmhwa0=e&`VNCiFroDP+bbMS?<9O`}0 zfZ994(7@Fq^w2pOjVvxfzwZv9(xm%nn@0h9QdNx{TME!L_J4}bJ1(d9jpOaTX{uC4 zOQrdobKlo}pB51sBztGCh>+6ImUgL(L`x}AD0!YxvRbzACD|fWWTbxQ_iumnI-Sn* zoO7=0^Lf8hF}HaOzMFRhXR5y9`7Wu@vTYF=eb$S#r>c?kzt&{^9!yGNR}cUsC8)9P zFEYi&I5+hWR9y|C*T5dut<8lzt3wcSQX8yz9#>ZEO01LL%>+(Maf;ce=`^srP+P4yd1rpHiu1H=Z3;I=CbK>rvxdP;>`Xvm4fNh{h4heH<0tFA)*RKfE_ zJ=xLB&l&z%A%`9W{&i1b*@!s#&TS^6HvJ;mw(fMbnJOj4lj!10tLahmN3`anKW%n3 zB1*QhWSaFkvQ4^(XnaCsiJc!AsXaT_@O47kl}HMpC+ z|LXB-1&+D>|91{!?#E6QF6&7T)yjE9zlrYfo|iUi)@sQ`2`6&1Q*F5B+tysJiY|9w zLxU^0pv9Hc8gn0V4Y+n48BVfZj`LXXmwtYDg_?Db<@Bx?aS=SDZ@6;5$^mrL=p;#N8U-p`=?E6JA-;aM&q{czJG3O_z#x3}6$mNcxbE;8_+$se_ zu3(*jdsS)9bmfVpc8SY=LJU1lzMjyp~qtA4!=!kR`jc#b6 zk2hYSnPoSr=&%~6Gs%eC63~0UiDQ zEG1G*okumg*|l7g!eRvdrZCLUeSx(C;H9t32h#EMso|l@(h#TwEgQK zjp*Tj*T&JDJAYmGWGZq8LsU6QZ+R|~XKGAL*W&h8Yje%{TAX*4CU;6SNT*B?<9^)w zMrEf+aE2vHT%b^qOIo7Lxqgx37W|UuHl3NsRTo)tVKHW0t(qoxbK?lL*(T4$56N;T zx9D;6`uOXB@9tzKYI3g{l(`cDzo?O;EVq4*GI!F9XQVic;(~gmxudsLIlJUBTzAiS zZXf7!nu}$*ZDkVNzcN|QfB$nQG{p*r_$oD3JXYJ|qyNO8CJ@XU}|y>xm`4c**&n9hH5ld4Y^zvL>eb>xHTEFtHQ$>Co-U#P zN-L=%lTO{8gXy@m06Ho>imv{UPZJy~X_8+heVLs@`OrGgkBFkKZzJj6&Qu!RUqt&1 z^60U=GxJlF8 z8>wZUkY+g~(nnwUXW5N~G_kRqp4)tm7ENiOS1hhjtI$TO5MD=9_@6od zuBI1i&(lvknke>pNVi|F6wTrL(`xZ=gpZ7sg>&*viQ=h+{CBP-Rhw%@9T!N_TB9g} zb|?vtcC8SVMLid-w*D`L#GK#m0$gYRd1A-c3!^t35K zbi(|p$gpu@!^a*$!;3pk4V^R7MBRN}!UZ{c>z)qQZ_W!3 z@2wDZwzCbl?HwCVrx`X(%T5-R8mbG^<2|W$XcOJ2wur_)K0eW>Dg~y)T=FD*wV8Te16>|A6h&_-5(}5 ztY4enpb_NU(0fC~>X!;K$NqN9thiB7LE6=H_*OLhG%FCUfvP7TXjBl8q zJ*(lx{wUE-xg3#!`%%%7^~XipmwiQx%?=8a=N#jgh&sagZfchu^UH(-9_gaZky;J$ zJ4_pv=-7zlAFUC2R3wUq&Zmn$B4bg>_S?dJ3|A_8ZA}~hSko<|M$xv1x5@mYC&~ExGl;W81fJ@vL%zsqQCBB# z8tZqDkcJd;FKsu0NN=*VtAOYoI1Ir_7MCSDHeNQhOBY_7RwLYJkRrUY>pWgLshN1u z1Eg|P0gN=I2uJ;k7k*py6V@(L5vo0I6#n(?5sr-cE-XDcTXa^QhzzyTg*&GEl7*`t zlPdjX^+(=y3$tEi2*n;G2)7PT6m2@-EKYx0UIebUgh`n;jnJRpc>LMCv;4U(mJwufE-vQCm+`XbTE0RQtR}YGs#s`Y})zdCpE>@sVw=2@p zqaxCBN=hUi5h}Wb6-073w~Cs}&x`C{UlQr}#flm}P8Xfx%tc=6e&p^JN(c6CBEg0S zgbSrpMc>k!L|UVhMK%2YJXs@IBx@QaI^0G?p$E1JsY0``CFm85JOQfISw|EmiHTBI z-4$iO|0&vGbWt=gZ=Yz+SQnAT?9rm>A3KDZgVjQ>|Hg}ITUQH@yFMecr(PzT4rK~I z1ZRqTq{~I8wyY5?({U4NPe!5!rBNc&;a=h1%iY5DKJ!G=rPM^8pAVBNDPMjj8!24u zt1fyvL0q)qXsz(DVZYE>?W)kTB}v#g`n)hbxLbI$fDsw@{u7p|2a~ewOmfo%!6xVJ zLN&RcIO*;p@UhXwNyjfkbdM$p5wC;gXKvuYC*7AP$@Ia@%Acf1cOA9L^QQ8fG-&Zv zJzCrhv{ZX4UAoqh)~lG&_e&?x?`Q%QlQ*V5tPeeSGKe~bMWEfG`%&M+U#y7Fk9i(R zU~`1lY`|z)cE&|T*7l+mdvf4CQ*O73k+HI6GbJ}6>)1E!l3^88E{tYHuWK3h!dPa* z&m)4Uq4$Ctp9Tfd3}&8;a$&~HFJk`Amtb$r`^j!G>SL$ZyhTbpyI4GXFI)Nd9drAT zIwM~5pWxq`UO~`fj>(DkWQyl~5Clw)Vm|B~hot)tptakq@vb!a>fGJjJ=JF*VkjfU7&u%8ZyQR zQD@jEHggDA6)u}8Ydg+biU*^TS!K+z^R8^2lM50v$pFPdbMlWzC#qM;fYt3s=t=i} zH0{oAcE{N7Oxo67w(_zQde-PBxVtz)z=4k?QFAR3exl5$9A5dfIMs;Bdx4T*84y!E2Z4e z4v*c#%Acg{3ckDCoX+pz^Ii$;hUHkY`z0gh^G~3qBE>eA6tXS*MwqWD9~gU^SJTz*z1XmVt;&1CnA_$t#*I7JJ$f(LUHT?$s)9FrLt+f;^`)Ds zNv&s^W{zT??461}O}9p;dgPFn;bS&=p*QN}J&p2;2!~t0Lh_<}$S(Q_nty&VTGC07 z*RPvwTjNS>-d>GQ{q+Eo!YEKWdLMc`mB{If6G@AoH?gk4Wb^KEK~W%ht|8d!q{jN)KfyNUTcc5@uj76E{%1he7Z0Y!pn}1d%%2OIPOB3A*xNhO z(F5<*?DmQ%R!LKv-<@VLDQO;TLcJ0WzgmpF_>N-aLNjbSrknM<>CJ3XS7$q3wXx@? zFGXGkap-*oL1RZrA~EToY++my`Zcrwza6KJPoM)Roo2JD3bz?e*)>dDxGsC#^f($_ z9f!3lYH*-b1nz%Pf%>BoQA$Ptt`FXXuX?^k8gDMKS*0b6OY{3)0|=UJ+bS z;(MJF_JIoTV*Dv5072Dm7#e;COFtil>i9JZVKPvPWBwaNW>F9|HR*$yWsZ|H;{>lz)@S@p^Sag@IR^fIB&lqG+Dc1 z_rz0p8o&QkJ*Gxd%}Xg)l1CkOmXOrdDX<`Y9j-fFi@f?8aJl>&to7d*=-j>z_pW_} z8;+=v&U=_tycTfp%Jy;>%C~dRgfqE~S+?BjKe^OXA(mvQw?q4nP?%=(96g>j7E7mO zLU{Wbi0E3(opcW8*1QPko@9h`U*Cpum2G}peD+0J9?$Pl8alA=rXpm&coZ^Pu@ary zzk*SB$Rw+aLpjN7;heFNfkvOKO3-rZEiUk ziY24r%*S-wo>iRv&ZkuE^Cs{!V^H(V{p=dApRCQu7Btb>2DXK^BQ_MHNcq*Q>^Mc7 zWH}nLpVz_aKgQr6B7;ZdKC>o|*0GT$9NXkgP_5QB$Xg(W6YpgrcXFD2KIs;FZny{) zUns|_<-C7PCWX&$PsC{~Q1nOS(8Enq!gOivxt})j^6+{3DzOH#BTFzIN3EGUVB$!?;0iedLs%H=Q+a6^M>#% z#sS>Lw?Kq_3G5C&54Ag9!n^JEq$SRS$Ym#xLo)4TsP!MYrzTHJ_kJSTu~o!PZz{BKRU5W*c|+( za3OYjV+B_~?IJG{2&~gph3Kok@bSh*xGWt`!d4w8+L?37%+3_Zo~8~Cs1#S24B{Jq zGNEd2IoRzwf_IWAG@_Y@mz0S^)VN(Bc|IO)AB={#r{%z0V2;Q9e2*5GS7W&evtWTV z1-a+3SnApZcG@m06kc+h4ZoL$3tKrH#^>7h zqpcrn+25LFY)t4~X4{QP{Y0>P*an(^!wOAW) zW-#8FVTDgG)rUC?I5_mU2N!+0h5yCv#kpF#*m2r@l<%+-Kl&(6c4UqtOYJ5S?W?LJ zEN3#Q{?C$heYS**;0hf5x(M&0b$Ey77o1fk2GP7z{+H4j67aN%M6RkK(VuRTn<4Lr zkJeA(s4q`P=XR5R-wNUwy@cG$av>3+e&pB1$>fMkE3vEjOT^k_=y{t%w4Kl3%5^TK z+WQP?)WyKMl@)gE}bRrN5?&` zqW!Hi>5QQfGUen>Qh4TW#1qGO*F-eTdZa~grx~$}Urr8rC6dMKACo() zUX!K6X+)#@3m8t)As6?~B0>ijVs`K;tbRWl?zR2{rN9(`SD#4W*JyGqp@S%>`;rNn z)?nrugx7v-zzQlVyqD4%TK^`&l^Mp+FBJ-Nyx-x+XMSVB-(P4%jpp!Y`}8auh7SLVpzRf8_#LKf$g@hfzds-P*SZ0UtHsGNN_UxW5uAQX8||-J^|@P zKk*KgS4e!H4Z6C(28~RSf%=GXBudqroYovgl8!|}S!Xp4*uDUNog@w|d153iMw`^9 z)IsPLD^R!}3iS)jiKP}tZe_X?nGXWeT0R1cCT2qGgm6&VyB|6;ec%z31bRN5;4#;U zbcgoB_V*J>;=wS|cQA$=3i2mMOeYb?%}dF(ojZxeJaZyps|qzAbiqw`4B2^}CDudd zU~P8=Z14$#pL7l6#@NBl!RI*IkoVBs{eXA27Q-xCXBfVf4cpgy5tSTcvcE?kp05q( zJAz9~G?$TiQ4_;alvY-sa= z%3cgR-lpMW3lE~eFMUWpb~nygyBmCV-hFo(KenfQAG>ql33JnNz@e%9s>A3XiUMIh%gAOO zLb=LeaN$7|q)(LK2^~?WNcjfyYy1JHR!cL-TQ|-#*RRAn{uGHEMhtuf#VdX>rxykx z?a}M7obOw_GOiEHbuYqmAuROM66>MS?S{C)nTaE>s^R9o?#_3||KY{wOa@>}x`ZA|=( zJXT#{?ZUq?r{8Zxao$s5WyX7a)XfUFAK8H_;|g1&L0$c@9Ht<0hq z@Y((!Y`!-eHlIBTv(~$U_Mtd@&u$ey(>H|U-ju*tV{@`*@Bo<}yPXW?0jUa5B1867 zAfdVf0^XRCYChMUH*f|2eqe?t-4Wtvk4(V!$9hnBFHf}px|5ZBp!Q{HAgMRK1o55k z@cQk0VI*G)(x;T5ReRpE&KBda-KZ734lR#7IfrS0F0NKu16O9Tbi;dpZa2@HIDc~mS3m6%xn4)fqXjo0UA-FA|Jl)B zDTnEy`lsZ3emj;3OvS_3LgCp;HF}_5pDWOr%LQjf(=Fp0NjCrYA6Q>Z_S_gGOUskV z$MqdhFnJa-*y6^ts4QVWYhXNP(Lu6&bToZlet;xNm4ewsJu<|1adQIag6lSQHm&0) zR-UAZcW;qkYm$`Ekfsxu9@8aaA5y_qT>^gIaly`CeaWGcQ^a_}LipVw$4)NrMt-H{ zu(GBUTdZ9H8ijgb`H*+sR4L-c)eII7O2_&2`w3{)kd7=HQg!two>q~GH;5Ht@qq{k zs!N32khxI#{S4l_>@)gXxf9iRWn+QFO}MC_OX^Z2NMpV^_-%;BDUELUT4FO^pICyg z+6bH8+xA;+kBTTCPai`5wsV%ksqi`vhW|YE9;@ zGAGU{3@M7UB@aCnpxDV4x{eNm_ZOaX-EfT@RW2i5?uW_DTg!;yIVU3V>o2r;WkGa+ z9>@j8z@`c5xN-dzc&;^tEV21bHV7Y*LXY$0`$|F%T{#RIFI8ddIaQcIPvI>yi||$X zXg+6f0AGL947utoad5dt{upMFSpSXW=4owmJ*gH_c{h-Wp)wZUJ&D6}*1;&Y7Ie$( z@gBeDn7Mu%D&iE$l%-#=wrK!XNlwG5tK6`wem3%3m5MgLM2C%}{0i42TR_zD3 zA)_%r@aSGXNAngzZ}|wCy>=NI|E>TP)%dWXu5XzOA_L@HJRJ*dZSahnuh8Y(1xRzF z3b=ibfc3AkaL5ThXIXHOWg|uCqEQy|c%_2a>v8O6ml(9da}wSl*oJC_2uEI9K~>&N zCQF}Wpyp8lPI}~l<0Dxkfefz)yw}jcu3r&>Je)=7*kQgSbMg{SS&k9H2^HZ>F_ZcUd*c8FYi|_0aza-=pcnO)s>f%54y{PTYdq_Welzc50lD}Y$w*)of zhl``(bk#|`^|~ZJ?&E}hebzv)4lF=z5zkQTIxif4PnrJyw47$REu-b0X7tm16I%Wz zm-bqWrxEK$flK5E^!IQf3e*ZmwMIOjOhW?KBtIvQi>K1Pswe5fkkxeh!W*P`^bzt^ zvJ(0RdlBw$WdmZrvF0|XS;@;=*^UKbmrVvEN1dn1Pd2QVb{zKJk6Ew zS&!I2Qt2bu)I1F){Ca_|zS@h7v;&a$MS}L{d!fNdRcu}niA@qL@Ijdy=%(5TI+&Y* zLObT;3cCe(L-T5WHh3ScP=0~j2ldf5mCtNw_*Jz2PCDY*w8-b;O*XYN0qL%hK`Oi# z`u3PXyvcbseqD{R-|{0!EiC~#P7Pr#6Aanny{80wt4#z0yY~zF?s>5PzE_|vX8q{= zbtBThwF)%)dDo7?ev&53lDjV!qSGF`(Z^jH=*9Pa>@=tU*tfyasPK_5t`^OMg5hO^ z+?6H0<$9FK?juaRCy^Z}LTUbGtkJ|w*3Pw+ZCjRwr)-TPGbha_f6mE~V|RzZ*t{1G zD>Oon#%bjIwva?@PQdOvhS;5ZUL)V>`s9pwFL`$z)1|E|X~2eKjegExxg)ivoD%nk`d@nqtuy7|i+2wGlRp99ko$qc zl+y9=O+5&n+Xi_`m?~Y6;C{hauAo$c3u%5xxAcTTLdjJ;?Q#lAygLCcySo9!7G&Z% zg3)lRd>OxYlqRMA31rYvky?7X&=1Y_G-!W2JoHTfl&%fp!RoO4@k#h2t3tNa-GP|a zyHL@{1+#uwe>__@o>;F2 zE8|ije$`VD4~c}|z9M|lA{heQRLDGobSPQt1!ivJ;C-kxmpNyETHn_?*2m3fZLxnx|=)L<-q}(tXKM-2r`#+Ph-RvX?yqN*E zX+LqEgew~}C5m@|U1n<&#v_-xQfN4TKXYn_Av4r1%OoWB3jWI;$AmnIW*a_jMZ@*# zI37i@MOET#^s{rUqkJ1H>9d~wabv`3@td)NA%QWYHN2ED8tcIH4A?T)Y-8Ekx^3vo z$_dC|MFo4VJq?AfvOy@lgq=0o)5$uw!C{tJieu9U#_3N;fMD##F)Tb8&$AQM(Z``6 z6p|CqhGup#CT?;9#Tqg6w8ukW)N#+@&5OGZ-ha{@L6-j>ZaOW{3OmLQhj+5iE(YL} zmgA7ntcST9Hs7hAKTg-T7HIy}No>xJ-Hf?Sgwu2L>5jD)O-|k2hnTD7Zs^9=b6DH6 z6n$EB0QG*%VP&Stpc7B5Q0kP^sKbk6?QXmm9IA>E?CN?W*u!QsK6!GeDMuXCVveIl zn!JRS_kGDYOQ7+oD^c6a)mSvz0jJP5B-D9|_Vk*dy$VOz3;sFi#_l{&32nkFLnh%_ z-XgSLIt@K7?Zu6LZD9Pn5@Pq~LTA%K(CE>Ge8~*l{@x7K+gd^7*#?7l&%jN+j_gh8zpQOUmKCC?B#KE8$d`55BYWGm^49f&xyxKu6y{M`I=qqN<_$ zXv?Bb-kGh3o%*Naqn@%5W$6i>r%%B#r7S%8tS#2;79zLIBxH9W5NUU2pfj5_v3q_x z4jJW-)q1HT=zvf~o|Tattg?}Vbz*>UK@gTp8<)D%6vR>Lkg+ks^4YSHDF z2hoaS?r7!>0d^j^hVJOJqP1m-_~o4zlzQ$YD$}t*-Q|3N))vu&~$<&py41^lWmV zR4$8r%D({SmKAU(R0aHPJYe2-U#3+44=SB9fo+nnVQhL0@n$12Iz!HhJ9pEbbG@WU zJ4#DIGeVO%ygUHcWa8lWokL7_m@67-xr^)mc#v1Bf9TOM3%Sw*zFb0xfK!gUPFxEz z$kVhJuxg4ge4XqDZyxyLBhL45^2gmk`F!5f{w18`Vo&}#UW2O*u;jT558-3hSQ6+{ z1xMHELW=!D&{-)DeHnZ#w(1rX+#RCf0f<|ZrpNV7UqjdNouuU4@h}kl29jv8~ElM*kI?^Mxf5^N)^GWPL4=C?4Bjms>@Vs>a?87o( zZsUG1d1^!M8)uPO_Ws0rrV|;L#*nIaHpInBo*0%igRDm*96o1B!o)hr-)92qq;P`T z%oozb$rUtOF`OPKRj1Yq!%5K8-9#(qD=}*hqvd|z$vDAdu$~uBOpnc{x}NTI-<=#P z>dvE=`*P?g{@jWqHIn!6KFoV~2qrxOB6}f*$fexCKTj4w>f%jwZ9xN7pI1z!u1}!~ zE0pP9=LllqlLDuCe%kWZepqe0h;;uO2D`d<==XIatT*)$ys7vE+nf^N`E=esvO^sv zNd!Smcn)6DbQ<3}kO*{{J(#y|z#FR1qAcslNZx)T)_ivqDQ|GY^PET6rqUtix2YH# zX`{+!?R(4=T;lTv>dVo=rB~P;y_0Z>&vdkRC=8o?ZNpL1PvRNL#~4p#ZRYs87REly zik&%kI%~Pvk$qhx&9-%?u`!DOA*<~%I4!P`#7J0ihrir`-ULg=#Cp6_=@)OObK~wh zZBty%C>#^9&lA*J;OP9^I2R>r#wGj%@Pz!+;+^*S>~i{ zJ614pIm7OioQVcQUn5-o4a?Y@5jJZIclvV}H}T_kPOP$?wi)X|tL#x`cF0kuDe5bo zE|;!mg5?I;Q8(ToD+Qj_a9y6Tjq=mGq6e|f;W9R0=SFtjy)9_( znBR!JG{Jv-CGijQMYx%+z(3!aVB_%v=nxl&pRc@)o2EU$i$;c6yJ>^$lfJ#^%W^Mv zZR!AfWWgeK()=Ly@6r}#s<*|JPc)vrl-L=uFUVVhqlF)yfx*3;e zD$G~?NT-#jz6u^RsIr-#rC4^Yh)MJ{Vosf!!0z7h0jVo1AxVvAti%Bw6rUc=yiC_( z&IQXmtuj99I7jP~)9w^w=FEf_jA+?()^Mc+YOSAui^IIpfhsZdJ9iwDW2WZ#vYun! zO>Y-WjJ_}M;+bdCi*B>Z`6lcPO#yqeIu$vVUq|2U%)rM~6X$)XWBShAbX*P#nKAZ> z$h$WWN3Ko8^RBe9WfRkx<@+8o#U1-l?P_~mrmGF>eRQ$Mc5U<;RWZTDft{&Y!`iOQ zL`DrKSjCKcjKiLECV8NUnaV%&>@rS4lkT{8(oq8so*XN!pWGxVjbjI(lJ)&ocPy zO9`C0xDe%Du}AL`Wl>CII~qMV6F&Xg4bPsu!zmYHaM)l9?mE_k>rY+68xr_V#QJo6 zYeEtBFg%NgJTKu*!(G^kk%!_ZSFzIgOSr7-0&baq0mmtP$1&Fc)B>i0PuEuH-__dumzPn%s9^R#e)fMie_eXN@3qv(nCC-BQIX9Tp31Bm464@NImi%=| zCh}KgF`1o-mSwNSgX_N{hwM8jB8H!{8DD_z&z;bqehGYzl|rk`VX~JqCbc`yLb=x> zY`Z!ac`ixEw!#rKX4gb~TDl(@v^rrwEi>Ge6^@IVk7KQ&GWfYs6P(`P!=)QyVXELa z&e)F0v~PQfY|d)3YE&_bC>Vp(jy*xc7MpSQ(%)E5AO%Z!*XJwA7udt02#!vF1y8&? ziFmaQ=^yg|%JnMQ_~s3Qax*c;X2UHepx+C9oEwh!Oi_Wp-IuVj`wh(3;~`T+9+t}H zF&(Y3`1#ZxHnm=!`OFVm2Kia0{p?nD`vD(RpMD4@DzzbT8F6@eJ_$=L;&Vc)f}QF@ zB$@X10(54d8;*41a4D+2v14)=4;Fe93{E?@%glQyc`{XKvJ9JNG4~@Cx(Sj$jLnqh_%gB za_N>nna6vFggS0?Z@VQI?<>W{iJ8$4i%iL5sh?1@OqN`Aw;`sdSd!#xKq8z=ASALC z9vST-5uZ)zj`jsy^)!xa*1ABC$i5u?gw6cLH@3&^6HQy})^7UXY? zA~`;DY0iqp+}!_GaGM{jrxtLMC}=LHzZS9dTF(mVc_xjXA6BM0!+xYSw2G9;r_osf zopizN;D8yKQxnjQ%G9Pj-nD&v7Mm@1hKP z;p%)Y`+*mCYoQffFxiMpHl*ThFU65nbSqwzoJyX$Ehf6r3D6zHk$S-oik53~v1fg` ztA{XG*|MA5kLDRE69TZ-w|@e!{hoqAMN`&e?_6|uTo2w+d5E~aH0QW;KAe{CJno>G zJ?EkC1VyH=1+A&>_|BJ8OtH3=z&`K@ldkKIM3TAKTys2eT_?}|{x_Ekmo?|0T$8IG z-Ga03eh~!qDk8Cam+`B_RD7{d6~1k#M(y)=pdWt^W2m&H8pbbazF$5y!~s-HjuP>c z#<*zT9BjF~2&VxsO!)-{z0c z9dfQ6gf-*($%mSaWW1{)snME2CN7d8sOLYDUnoTbx0cbfeukX1$$67m%IwfKM>h)p|tx6%%-$=$_LfPCLhvJN3#WzG*W&iR^`r$t5H=qKcGUdO^IJROp-S?L=wzGIBKVC~W)c z0T)tbNhA;dA}kN=4r~3U4HOr73IbT2=qH{I3?DfXJ)zZnIhS4q^H=0_q*!A5$6c}V{9&o zIv)iI&%99bhL29g>x|foqwLWmaU-;(x(7)lXrYCj$JuEQo$-c)y_j1g21RP$;B~=N zESj|vtu<(76>2(BOVmPK(K8R+hxH(IUKGBwJqV99T0)Rf1e~tPhiluO!2_M8c(&LRu$2h7fe(J639*qV}i&kTt&#`UPg4;v_7s z7j|#I214<27<=s*><}swqqozD%VU4?&0r$I>RIsKVGz6}FY^0GXJQe*mPFg`BmyTB zl3UyZ^F%}Zy(EH+iF4z#?FVQ;O)iN$m4LHneZh+3g;3ZP0VP&J;6AV)vfj)C+8Y3> zamo<7YdREiBhb5bDUqv41v$l=Y~YIJ?7zE_c-`h@xXLdEeR(R6yrrL`Rk2ZcdiG0n zbml!|A610gM^bR`#a%e-XBse^U#v{I5_`s|Gi&9z64T{F<8RZ7T` zUBEtbE@c1lj-R`#XW8t2e|E&c0f|fRL7zrkSX7qeRJXg&@yF!5g6{S|Op#9`8z0$^ zJR?ick#o<`iC4#1o6ka~B}9|mc;vgFM1M3o=}{=~91wH5xWqs(YU@en;`0dRwPgZ3 z_1qy|C)bK*NhshS@0C!&krQm)rwYMqYkih&vPU0WF0sK=_?)#`853c!Nbvo12~#1^ zK$e3OapHeXIN-Mr&hL%Ix;ZQ=-r35!e7lGeC%NG-o9fxF5p6bl?n|avn9aV7n1H60 z%VUcTzF_b)5kEu!VQDKA$nOlKI7;E)vlSgzqc0|U5)-|rjeF9oyX z7hueS78pFZk~lhR(V@(8I)%xm@;9WZ-LXV+H{}Hs{X*dU@Gx%islzK})_`m7V(6N# z0=@qwLuG~}ndnkU!~67W0^-)Hww4xb~_>AZu4dQXv!MJ96HRlr{PmMjSS&y%@% z{~%iTBNqSnkKkj{mFUBZ7}PR)Ikv0x1s|jHu;ly=P^~S*pE7;X@c>i7n%dn?YNB6) z{G+DqvSEr&ue^m{1$W}42a{m!t|Yj2U=ul!B}=p%4AFoR|85$W&Ay6m7F0d;WIEYo zGG1OF=@ zaQ&rW*e+2=hF-iR*)!8X#=abK%_PXAbQ3b`$92-&R!zVtjJVL1pw+Vml$#EN&>@a| zym6nL*k(>Gw%x?17kJ`Pdvo!c14@v#dK7Gs84ET!KJbp0DVXUUgkwo5hGbv9ZX{+1t z3I5r)z@ZCFv~PmX@C#Ufza4HmhVsvaG7#MG6o~QK8FI+<8qv~qBhI0ngq?h}x*khraE05`UGn=JD}`9-iast$%HSEa|+wZ@4h^IndWha1g61b z7|&6PY{5D<3i!Yd|d$)SW|1{_{ldeRndWm%nz@9J8MJ#`j7@XOE#{-W}*q zUm?4-<1Vvg{37;0IYoSBTO>YkHWkaqEk|1drm|1FZwo?unwdm9jH+%a;a@|~k*nk! zr08*mJsK0u{&4)x`rjRc2QMI;`#S_(NqWM2p=4QEQ2|q@S8g<94ghIq&!L^?Y@qReP56HsV|C z%!DF#>E}B1SG|V!|KG#wW1F#jb}C*gL2*yQ1JqpOfH!e3u*IP)tkb*(=l-070~YZ7 z?qn@+d@%vOESL&A&&&e#Y)vp$>BeoNa`9Qk^H}+48SLIYiX6UKLiE4xBW3$}56>*fNc@7Egz6;%}7L$>3N>n3Uf(FgjCTos65uf08 zVrg$f?V|V6hV$zvr?-KY^*lg;U(mJQHSei;sXX%e>=sql2048GkMfCDav!?6Ji za&=}3ag$j={%xB>+;Upr!sB|PF)gD6!^;gitUomXtR6SO3#*#Jbp2*5(1+d@O z`5|>~5OYT#;j4w&us0_eyNb(^CqLKfeftEJG#H?>jq8}t=5Ls@}qa z)$!Q-T6ojfBses;4c*{7T3tL8@$LtQ&_n@6y0cyi?@4SCe80C@c-O_6UG(-V8pEhV z(v$7@_SSM-=k_0JmAi=!8_ zAKilS_SkBu{rm`=rxfF*V8LgVBC()&Ddf%8$2OK4_~c+1nz3Ld8vD-=rG_6w|3zKG zvzLd^-`)k(V)ia#&3F06DagZ$S`)C96fG-AnDy=MX5joEmA)NUKbkiv$K7_4=B&?`(M$S?bnn@C`t?UAoxZ7?UacC#9UE!MjdHi;$|Ll+DHqyk z$<=adF}jsLjQLI1tZJnP=Va67#$5i+_=^tD8>EYMRJg^19CxkCh5IsW#g!asr44Nn z^xWi2H0nw*4LA`?!x|1!N98OU&6iV{6+fX1b|`Sf&V;+5X2U7nFz1SveWqJ_Hc@G< zDmvn0E*+D!pJw5G^sQbdZ4dZFU7ch(&BSTk?a_o&)tkv#&oSn#dPZ=K(+*PUIhW~C zwZrrbyOgG>TT)?T0Cm>vr9ZVNai6mcxiY)CoVkx7_cTMBoA+FWo32npM}6(3i*KgW z*>~qs_p7G#xW<~|us;$)H~xI;`I{p@<4T75V| zkLktJ@|$J!xy3K4GFqK``Cg7wG*jUY>Wt&sqQ`M3{Zu&TjV-ifK|PI^zeE?qX<9n= zD3#fDiMrkRM)mc-(;urMB&HFU8O+-f= zsiTkVe^43spY(-`0yk!YDwn6Ez-3E(rU_afX~dLoR4MHjUCsYoW7QQniwz2#c8CIZ zb+s(FE>4GA^GbvBct4J_mKn?COR90s0!6NS@)ufX^pX0kSKuyBoy^@EVal1#G2<+G z*F)rjQJj%C?~6!L!(!T&}4m=hQbyd!8$EkA8970;L7qovrh^8(U^`{$G?i z&#G=(zp{yrBh~cQtnnOUI+b${*5FDHt8w4QC~|kSwYe+1XL7|xlR3}%GTiP{Kj_x+ z^)xy}OkdPC(_OV%-12pExLRLp&OgzDTRJ$33roC4Gkj0dxuq#|$~+O>6gNQQN``1a zWfyhM7@!A@r*eCKE#;2AT*%pNBHY(|rd+D$cUrNdggPBKPv1*+(`J6t5U(N4ZCmpn z_1mP&{oHTEneB1mLSM7ozvF+Yn|Uj>yPi+i51gRa)UVSL^{2FA2 zm~sE1S)4uM|1Zc~qOG^%s9(z}dV0%lI{8*4tx753bL0=`%GxgKc~6lWcDzO(9`&Q) zb%r!;+bFth;bQ8u=mfnvt(qq5mC=o3F445|YHD!dD_y?%FP-}B0iEl$gXZk)Ad{{} zkr~G`Nz4*N(=u}Df5$J;4$o4$epxMbSA9+=9+2jCMM!aP67NzvGlqJsnMUm-K9l&D zGpG*Mw56%~56asic_B&3{aXxjMSTA%&*jPNECbm(tbeLTLiz z(~*+ZG`q5n)_Pv2R_Ci|r}ufP|T?UnH&!qRSoTPOxPtpIp zE2!7OW*X*wo0bYI=(6OibjIEr)XS!xD)K80cs=i;YvX1Bx z9XpvSz7|#?*7i#kD_=JdFR6MdI(SlEym?NY*ztL__~)$aBDULgKcc}`^3)kQvR870K-qOQ24TvU}ayO1oD z3nmHa4WfOH1>!rQTf`3TmqiNbBXOQ|w=z>tL+qH7DQa$$q%Ljq$=liWWR>YRlIxpH zc6Nt}6k=k1509eUoNI{EdeYVkL@YLicc;zajn;)QoQ#euWG ziMxI)Rl9J1#NlJki#z{>iPh>As&_RgS67@>t)Al`68psDi~VYjiTjlt#WfPv;+@gi z;==d0#TTU?iv4en77zXWE81IGEG}LbD>j^BA&!?{CDxsGO)UOaAWmRB#D_006kBR- z5PLQTh(niEhbqwNIvGbv$kr zH#D6Q&vC30%XF%XEB6S*YLYqPZSA?@%WpS{llpqZPGv*l4c=$OHOZ;sAD!J|)m5Lx zCf!HGtM;!I?|(H#G&DR=b@jy}u}qkfIILG7dac+g>YX%8)aN`v1|Id1QE#`9ts}if z>&B~!uD_`fJ#~-~&ABNdlHH#y`g5^`j9;Qg3mf#QQ+F;o-=azPS^OpY>O4s7ZXGgq z={^$6If-U&`z^ZiMZW6gG$qOwWBK{xlM~josp_ledC8j zX(OhHPft22n*G;Xe7RzzxNF@g@d}q(5mPr)Z0;~qTw%Rm>{yj9w)t~btiSSt*k|Mg z@uLeV;=Q&-;>tMHYUYx9b(yk8b^3r>wbJ)7)kpBi>bPUt)w^w{R*x<=tX6XuR)-as zR40B>s?I5wum08QTJ4~|zB=~Jw(8AETdSK_?XCW;bD;X*u0z%DH^fw1ERC(+mK#@H zUbm;Z$ltuWb$JhSVQxC}us@k8($!~Ei#6HkfOaPC=uM_Dwvvf#xyktY>}IUDGzCXk&SbrM&^V11M9(u%X zxjB}7uUx=dn8YB}qD{amtRwydJK@oyAyloh8k;;{huh3fp%+{OD_8xE)h{q&{|b4x z=}1Gi_p2N_!S`g}XpN=wTvO?q+xsA?iDJKJvryUQ*{p)xES9P?ure2$nbRgGm<8)2 zS+B|W09E`UflLLR{>F`D-rSE|$R+sIgW-51_DAc;zj zF<_{xO+?$pV9->AM;!5E?fctV-ucbGn7x>p?ykbj@oQr~NLaFg{+os7+7YbUw-mlR zEQ=isHN=PitN~Y@N%)L@7+PSK&B{dTv)kwOG1CuYR{q8bcJk&9Vb=bg%)IAsg#krU zOi<2R_V#frY$3A_1^Z4zQ(XVDN@Vc)oTsQvu_R?Lzon+!UMbD$L6(OO7M zTW66UYtIn-qpwKn_h(=>E}BF{{eay^G)YqHPU!#H1_QfYNqJ)>X*HQlBsGh%VEJpb z!?7OkR$NB3PsPDSsYr5z&kfuhP=^CQu3}SpU;KSZA2#&)0^8ThfUIx>+AMX7?W;3I z9|qDubim9@)cW?U}>s);|?Y%35PJyZ@=M^07OrRak`=HJpIVKam9G zxD%5w5q7P*i=2(}gj!Q_ExnGVtIF$i6F!&l9^w#mKDeCLm@XUpzg^_ zc%`NWQRYP`#@j^Z8P)#0wO zG0Z9ci`)l{*i#$#3Tg3Sq1!ZnVNygl6YTVlIb?BxakbNDUr#kdwlz)Y>48KnG3^7c zzSoDGXW}`FU1-^hb12>K58+d=(WI?nD_`8<0%*VH|SM6AY@R z;_hsDv?(s1owU1*9bvE)Jsi6kf4FP}lWt9e@R$#nOOk}y!trpwy$tfLE<94E)&|1(nmUG$Gp?wWvxvo_*&X{+JOfr(_D2TOi;E~H74>#15qB(1#2=cbp~ zK=|Ppgnkq{AM8Fc zCQH49PY%@ye{D?TS>+h8-kJbb25CfVWD^ zN2csv3fHsW;n>Nm!S0a|HlM>}?7T>FeQPWchq#e7&JLWXAee(+!Q2GCZ&&&2L2i7J z4>#@ZJ^JQj32{I95)Nj{kVcUoZa-jxM)j{{^yYg&*J?j5@JldfRu{~DYud-{&H7Et zo^L0s>Tke@pEJR08G^Ig-YB~0FLP~KhT!zWaJEA`j1UatGMg)?WLGAep0fb$OG-yv$s>Gj)FsqBER8hQ$)OPuu1NCB zHWd7BF;4iG3~bbIoO~k_ryTUbBjbCKiotU<ystFr;ySA*^;x1E)jda z>w#tr5ycI2=-`d9^x~{RqI>W(iC=t>$Skxc`F!qqTVf@VX2;QQcq#2);!QZ$WYT@( zG|_2FBfgW?lZ&#}(A1JVTVviI%x{NEhE4-={Ww|ehO)8 zj=($KK0*t87Bhb??h=F^#{!+---Ring;gdz{tYr=87)~_5cykC z8*jP5(^4?%{&;5dS!1?%MYgx#|SHX^O$y%4Xlf5G;&#zf$#T-P*t8G+qPAjv9I4?m37uu_(nN}$#=ZX z)b5|bNIX6)y!X~h80@UTNDOafCmNL@8_eL@@=MUZ!6g4?xIkGx=EZI`7Ln4ml!Sz5#*qUhv`kz<8 z=z!x8q4poNnJgkz;|oa)_k=80+e)$xCKH>)V`OB~W3n?>hPJ87(A}5n$uE}!l^fb?=ot#0e!^yynN?|moy9K9G@*51W-r!GJ% zt3c$sDoAzRF0%VhB8eRFkJyS+s9;tGtvaAhjWo}}Uf*66u%m%JYh#a7{{)hKHfluO zSc=ZCIZlKJ8cD^N{JiEQ!@(Mnv9)j<6PR5o$)M0*x0?|Br5~e?J#X9BHD0i(j{8E}sR$Pst!il4( zVjRCK)QUlp=IY3Sm17?kSqabC$uQFAezMPZsYC4iLdXnMC7%wCAsWr{v?;3;3SvGp za$8TKA#wH55ooyB`rJZFmB zTCGk#Jjk>yFU0;*c~~w>9&ap~!hip!$YJGelr-Za-oGFWn7T=zG`|mzf0%&_Lk=O+ z%U_UhY!B~L?Uyb=R+l8v_qKy5 z(QK^X8IL_*_u((6Dsb#eV~p0fBQ&4qFl#ko>dic8p6CK*zhnSY4cK*x??iS^!S<*BN)QzMdPN_;~{U{wx?roLmCkjq2puWE=9lk&uDN4p5a}2N8biFxE{2PT45J z;NT#h_UH~C+SP%to-~ICw<4jf^e33tZUH9J5;l+C3fJcN!sZhR;Ncbqe#4PazC9OA zT;G87#$j0a$C9*~DUw~QpMYF(21tIf0>{fXut|F~#Op@Ele76?=XMuPeky~%l`in$ zLM^^lqz?hVn;~n#V!{o3k)W(zXuFmJCqg%af%qew`?8F1P6=cZ&s{CyZ1J-lrZ~pp z8$R>&12kAl(M{FzRP{y}*%QBvEL)!dKB+z6nQBEGlK7ne>Ct%mF=I6Kx`_AIN5M6( ze_(&>5b@ujL_$d?gjXwrw*3rz?|26G?didpOT&;}WfOB*_Z+irEk!dmH*YJ&!QU&k?5$NDUKKw4;6}&adS=- znk-|>O8;j%zQSN+0^HQZsvVKKY53KbE+GBZSqB>^y6k;YzWjA0v3 zHL=p;Q_w?=jaaLM!ZB|GW@;bt)xm>!XJ!a$8~Ks#J8q7DzL4ZuIe(;VV~HL!@u*s1 z9`>{E#(l@NpgUa+6dy>yfN>EXH+v*L)AoaBCB9+7-3#D5QjKT+u3>q-c5F9M1&kFG zA=y+3QcOSLy(Y?#A>{Yy3svD??F14%(Cyo$HFc`)%mGpxqBSxYa1Xd zURiM> zmrKaSqHy{yHj_rL$^`nh9N*r%0l$$`gwEPJ9CdXj4!G!ydQKc?g3o9(DSN}2Rl|zt zo9HCmUuZ=XRQ13vNFN)|iNpJs2ytMgBz_@X#6CzVV;j8M(BH>*(81h&te{Jql}s0* zS2@SvVB{1C|0M^@7V+IG9hoZMV8D`PC)r#@%R*vN!@oS9U8;ni zH=g53*Yz>$xtKXT_K4;0xv2vA^)*cAxx38R*i0t(n-=r)*chhC-+=L6bcGFz^TV%w zri1KMgx4MrpbyL#X1w`vVKQ+SzRJGOY&PG+NS=#hwr&?O3lCglGRLd2hdX{E<Ox z6b70jOQAR8A>44^}vQsw_SDs=1+Pjc^3bKL{5{tJB-@pL1^VkZSg27s%f|*+Shb@lwI8f(+-$b> zTMv5nRGB1%M8mr<6>w+n;X>UdxTG%!Rj%8P^*-Ff*}A+s=>)A+-ja$M$>3^RG>r(S3#BxZ^E&c1U*CvPg5 zaan_C7>^~8TLF%h^YL~o1EhPt73FO_g?BXAKVQcn|t_ggw@rTLleqz^}Fm!K;E`EDc0>Ay*i$X3pvxAwvEZd@u zUXI9M*G+!JsK(2&UW-Ox#rz@sz*~gB^p@b636{k3Vm4eGXvZrY(wGjsM=&}1l|VOM zp1Ckj3GKUYiIY#Lz>L4+;EY*3D3#XYHK&X5kc1o?ce4TY4{D;Qi|qo1){(;fO^U47 zCP{2QVLDbEy#o71oW*}vzXD}>UkDA_1(O1gvZXG4NDv$hdQtnZ(bNjIUA=~Rq_T!R z68e#~84f|sm6PzbaA_)0R!EMot0G~1cJpDpEm@lGPW&T0$*3XTH@faEYm^a>rg*iZ z<`YNJnN)ck>wlCuM@}QLykkbsYz=9g`wz4hC&7Z)Z&>+B6PlH4fzCxcBX4ghWbiA8 z%{wYW6Biqj2;&U+G-)@SdVCygc}}gAcbi>QFu+d!`%yu_Rwg$;0YUS%`nhXXTx} zh?;aYgh@ugk%i81Pc#E89?S!mp@pE)6AJS#*1^3PL*k?pMhbt2l9*a^a;W1JQQmtA zq@EhVj(d~9d~+f0tXG2@5&_V<`4?E4>?KnDQGDOyd9uRKgS-k|PyClz(xl#ZpjwH* zP+tvlcbnqf>=&e?yccKlxw{P^fMr7h(G10DI3be7(Y-=&%3DhcE3@F2P7bt{9D|P{ zjO&j|;=v+YyyI34+jDCllYTx!xU18YEqwV0$ppQ?0r&IZ+qpY%PbvZ6dMWI=>W1&M zg`!j94pzBXUl_dhn$@_%)q*8X^O>XoPptKH2ut4|gfofZ;PRg)sS%!r(z1HU?wx^^ zJ1WqLH^T&|NjMj>QkdMJedZpKwni`a_7H)t0ghd*d{GRbZS z*k+qI7%KT0^PTZLUvi8NZahU#J$M0=WRAkA#!?);;VZ)~3}wf(CSXJDR2*0R2JgT8 z0Y@?XoNAYd_`Mv(Ss(aEb@+MV_;N+qu{{n<`My}m=5U;Hya?q#4#KOw-LUxXG4LqN zCvKZw(WvPr+)cj;oT-TgltMB7HdzkxvY*4%)2h6aIvm>?G+^I42VxW$PO05zny)6w z-TFA1lkZaC?v0HDxs7IE6a5@7duNmx75i9XIAYMQ%^% z+{+a-_5pt{|D-|eyC)L2>q|+p8$*1*z6Dd0voL(L1g@iTJo_9@p6`E79^M&23p}*w zh1m||TgO9qn0gdm*FD956fR-qN9V9`TLQlHZULSg;)5eB8nD{*X%M&KB>cyff^MTd zS#fC+`4=?+YZJ7<<#`4+9}wfTi7#;ai8$PN?Fg=NY{a=n0yyS-9UQh7LiOZ2kjiGs z$+H!3mAQ+HA)-7f_|C|F`qa9KErjm=Jq1c?(D^# z@`})AJ{K0P z^^Zz8;!qR2V5pz@xV%MBIjz)6Z)K&x;nO`v{+0_4l)}({`!TLx&N~m5O+|Mdvr*vw zDY$2!KKo?5E$bR^hmDrF#;ja^K$uqI!I+#2K`%AsK-WJCubyI#h84Zh1PvejC+IOA z={lB~`MJ-MlSr}(++iuu4$2Wq2TCFZn>6gVsuI`yP{P}0+w%2Gs!UwCuP|9k8kJ19 zWtKT>TUwf$TlR|rtO|VY3P$yNGpBdWLgVzc@w|dNNT_VaT24$Ceh}4IhWL*XUNl_I zs@eealpP`5S#-=w*tyN>)~*sk{GKr8#K{fl*LP_=+Ik$y^L1v=CVB|NG$a`d^}|f@ z2OIWV+bUs2WR0L})gGZ{-zbKyUBPtm+K}NB=WuG#W;A+30^5C8pY6Jr#b%nEXMN93 zLW%RIqACev^wf$$WrcTH_f@*AS9cBy78t{NzTUU|))Dm3z!~igNeLhEdAFbvg&qh+D^b-5)pCOiOc6h8|+X%FGkS3<*uH0ACt3$}+1{}0Xfmw!xdb-uozOq`1qAv( zgJI)JSj%Ux?!EenBYxk*e<#R7`wIhT`hE<%t0rMH;}UEYatC`3XX3a+PFUc*gXiuO za7dOnzh6*>Wi^O=dyBy^*((W&TTj|T2hVR4gtGJ5*To=;=Mu&bIRkwfiL z{Ox`g6v}4d86W20QAf>rS8y(>SfYR$lS0tFjOA>qbGxvA^AzEox>&}^Gz2YcU~$7x z7#RvG1Ffu2!3_?<$g>5kPl8Et{;iX%*r-ZAXBKo6dp>?iMo z2#j*{$Fp)@OtS;CC}G7>5AcU9`xPF@$

Gg_^s8n(8Ph%sy6i77 zb9}oj@uoC^@sDsp$9iSqcB?^lWJWq%cwSG#T6h-k+H-0>)sv1T=iq3@UYxGf$MkAm zwXAuzUN9JzX;CRQVJdbU2bu zy(^&F={2OuA)9pVt0liC%F)0B$BDqG3yj~IkOzz&mC)Em6Qtbe)&)LPT#-!|?3+#h zhD;`7x_F=IlRxn7GOyj~F((>Yt3bXd9}3l5NY$4W)Y7YoD36IFlb5Cv=Oz(!KKhN) z?mc9?PP{=acb?!PQ**etC>v^)U4uW%mXq4f!(`9NzhE^h1^gV3f!Q2CtiAjI8?dB| z3B0e1-X~naPE*AAW!-Yv!^S|-zY%1q;ahMCx{9S{F2tI_3Fw7=DAUp5D)dp?Examl zU_T7p#vcx(;4N0s@XNCRpU*di$h9Bwg7$peHpc+ri}O%I`y+$|Gf=;~knLTO%bMln zqSP<$IRB43xM|ptKXGofInRhY^|Of_aW_DL^<#uZzbXVOo)*H^v~Nt>%NaO$MH$}O zhalp`AwY?{Xv^+BTxKi3i&6>Xq`t_L>+u6ZYl&4>Q5r*50;wF~6%`G%_=^huVOoX{ z_vOKlF(Trg=gUp)4CYjvg1I)lnY(WG7GV2J#%X!7;G)+vLGhOP?9YwMQ0L%tWas!6 z3ep_NrcbKet)Dx&U4uKh7t42W3s1SzJCEd0!^;GwI5d>q)cORO$?ZqY%TFRL-aRaB zu!w}|CllXgo9INIQB3{r!Ugn?qXQ>qL$uEdJatAGZdmjZp?~2>^64n-FmEE=>;H$$ zuqq{srDbV(Y8p+ycapw)CQFqs+Y-A=d2sty2q={DZr;x^@a1$F_|H(KdCRVm_bCrZ zRH+KBzdDwlxvE89JT##jS8Gxn|AfSe%ScpZ4A~wZN(w5bkpa|=$DTchk7Z=xuVXdv zg(L4#&({HzYWx7jsAQlkrIDy*T>{$wUk^%c0{kLK5>h%|;JHTCI5=l7KG)xlyi89Z zwUk=)`SyKOZ|H-fv=q=b>1_7!T`iQ+lY=->JYMZ~6SrqQ!n=J!aa#TiJU6-wy&hkK zhSrV3OMBALI-NW=YT{+aIlO|+)q8|O%+KPFhVIzahIdRX=ta_xHSnk1r%`=M3Yt${ z@!u;`k(}TNQ~z}q^ToQ0J(+n4=UsV$OS^e*T%Q4s2HwLt#~Bsq7qfrb&a-zlOjz}t zM*`;3IV*a8sbJlR*Fp=6Q^@Qk?{hAS!5*fOXt?$xJ1;JoxuSehxF>glaQtu&v#b9* zE3sFLoiEcNd`R1c!6Oc_cRVg&^8<%)q_h&&d#Z}Iz6@fdUknRp8W6sw_?Ok9(22s| zAMBaqbTZ4F8G*RZm(f(=WSAas0qedxghtHt7rJ}7Sjv~H2^(Kt5|(8w7WN+ZW~$y3 zcH!z;Hm37C>oZdl$Np4@^U@mNbw?77Ry7x%3>&f#RSN`8<%+E3A$8m%lZt{G-ZL*g zu*{^tcbU_h^N{lPhghI@20OeOW*_{x&6Ip`6ISiMDl}K-n19b5ke5)3Rcf?i`sSoC zKI;0cVOt~nMIgkX^5r|fp)edh}H zbk_pK4kNfxdJo(E%fQ|DGjSz@C*)n3yJDL*EYn@+cW%f$E`ot6LK0*tu zSDb+PyhCHn7Ck)AF9WTgERDl9EW_D`V{pM)KXlMR23`Ffj@wN=U=q0r4QF|NLs5oE z{omOgyB(Vt{X`F6t-))e+VP{EQuuJaH`-txjt+J{!;g11LE%*qBu*bkwC4T-FZFm( z=~ISzvHhsn38S!@aMoT^z?=$wCd@~xSmd0ER`D)`s}3%3`Pz0es%S5`7H@|S0u08p zZ2lDmq|$1G)e?2UCD@RxfA!_tV2Kt!{)kge62bCs71R%o#=A~F6P`ReXtDKlpVeH~9CmMI z6T0T3gEX&EcI=oEc8WwP8xTGL7tEOo4eAl#oIivLdmPzL6C(7BZx*gNrN;iblY~>E z8zkr*TE;7fe2PQuuJr@ec>cU-@Nw7t8E|g5% z0ZRhgpmF9Kc*y6(?|hJgM^Cpv*N7z$@8E2 z{k(^_yZJY~ev%H4mVAMMxfrBcBeBD{GdNP)1b$|^K+wtw&_s){f&Vw0@3#|z7fvHD zyn{#tyO@Np;5~>QQiREx4Pu@%5KcY>6&^gRvZe_4>sq5PmfHB>us%S z4t^^ggRuo2kVlQldvyWX(e91a1`=4DZO7Udc;ZFzh7^Ts(#Gr=Wd3|va_aXDXmFSV zl}9GQcpo`zW%(AL7(IgooLUJ#EwfOoZZPhZdk!v#L?rxKCgF}b((jjo=;F=_Kxrqz z$Zj56e=Jv!X)=LH@mRyA?vlV=x=uv4w1ccW{G532I6@tYRJfZ?m!W6DEGG6+I69!1 z%G&o@3M_Ul6)aLqXO>#H$c{J4oc|+x?#s+{dhT~Us?)p93N3g>@60po zu%Qz@F*Zj-o}oCQb~6z#tS3s}l}Yo&M*20dkBTQ&QIjk8$eqhpyg#50`zGs=Ia0if zsYHTE?(-t5^XC)qn?kZ;>lF~bR-(p%>xi|57D z=*beiCq9(4)Vq>B2d5GjVFFS8^BpuI{XqV=3b5V}@U*otFz@38a(2{65@0csL@wd^ z!+B<8N1hXD_`wprH3p>ILXLdjdj|wR&O$~~2&nGzfuuY)-qXJh{;q6x|{FVnrqN{~J$S zm{WxQ?gZoKL1bi}KPmRvPef=Xsf{Qh;b)GJ2MHHpPJ;+5%v^^L&-nt=l-y~$?kf6W zuP3?CQ$k|g(#V6LZn8_|H(B&die6t8N4``><1yhG?5o%`Hfo#-xC|_!7ELqgjVJv3 zt*9XNF`tQN_iDQ8&j|9w@i`ndxrJ>iell)@mjvdQ^o8$o^V#v6Cz8(TM~SLy6-->% z0Xc&Y$k1G0;`VPB{&3|P>$+DK*IfUFEj(*bugHu2?is_jNKM3z{?YI=>I7iDNSItz z03N|(V37CQZ2A}`oGzyE^InYmgWFhl?PliZqGX}; zx-=#$TbhNZVzj?P4UG*hX6~wxoQ4H4E}U!DBsa-#8Cm zD22luo~fF;OPTm@v>@jS{K@_$DrD$%5jd^<3D&%4Kiz9L+~)73oqUIl{0k!}{4oP& z>F3&#po@n5zZS-cKr*Zk+qNXnWp7uYN| zC)Nz#xS5CpO!woJlFRVxnpk}6p9NlYqyR~qO-3R6g>1yJkAf9*hpe8ZDGNIUH<_Xm zf-{33;?uULuy52hw4*l-E#51K^ahLBC-uJ=_QW=Jp^H0`**VPGEsJLZ=`nVX34gY6 z7z2Uv?tJd{D%!xidk05PMd!34*s#DHp_%Zym1{zS;Mu1-;fTw?ZX3RZE~c!3ze)%Se?&CrhWe8xWBju9TwVqdl-pgxxFf&`h$={aLs@Re5ucS%;acQzA zLz={1oK6mzc#{buJxRs$IP!U82r=F|hb%Vk1IyJdr|-bnUkrjtozB8fbo0LyP%z^2{?_=uw*&jUgCIcd^0BNYOlJi~h_ zhTqzz`1y};v}h#73zwe8ic;!ueSbZ^^UNH(ZOeuWb_>Zi&wSW%CI6zWt&hBkrDw>(yLx>X!#~bz6l3rsCkA$mOUKQ19;nED3%Vp@#9pZ7yJ_P4 zg~20Fv)|rb$3YSq*!lBixV&T~t~{uWw`C?HqpOJSG>%ff1Gs*Lp{4?v~8u-;$h}UG6pfQt51Rn0wt!`eqD0~%}%cdqe;B}Y6 z2+Ol%JM1e+%7`Eme#C>U95n_WA6U-niB%APV-UWTY_i&EsVKbE@S83Av=_?Nj*$Ha z+la!ZIW)iH7M}yog{jTvkn(Xl8*)F3ox|tjZ@fxGyFIR<6%5AQho`val?Gu3Z;>|h zIdmX=8Q7II<1zQuV0qh0{BZSAa8$?S24AGS4kSMoo*y>j0tTu)TWP5d(nTq zr{rdgDy9;)I3@HLUiP#HaWQ3R+IvOtz|9a+dXl)DTR^At1p;S>F3_p_&eA4BS6Fah zJb%||N8Q#cc#88swCslm{`Auxi!f`eJz)yNB$x0`p<~R z5_9rw$0$-zD@z=YN5P$S)^I-~0xC31;p?>)2u|Qg$^!%97^_78OCdD=^FJb&evvfy z1(JrZ3&`8x>p(iHpwL5}e7^P(A_|INj8Pio9(5wYw)~D?=?WR)+eU((T_I;yGxU&i zCLO(7ou0nH(Zc(0$iMHZWW9A4^nd&aE*lP$>YN}VZ=^`JoG$_MhE;ITxSV(>IMQ*l z10-x(0J+-i2!ZDYkc*-YzBwfymOorUZuQxb!1GIC)|+_{xv~KFB8H^o=MZkw7D86~ z!Lz1HWO%xpW&TV@JQ@unUjKy0qmp2wM=&1#JP(I0>_z#j48gf;8MY|t#>V{Ln|ym4 zWH>Fw>MK;S@~V*#pL_zADtyZ*of1BfwPrTf3VG+`Pqx?f zD4ITG1CMgf!yDCUFfYdwiThFnlg3huh&#Df$_z+M*1_U~dpnz%Dxo!>G4 zqv%Y-sruSDY@UV8a|0Stk#qK1&$G5BN|R{NM4F{EXwbijl0+gIN~usn6J_R5NJ3H( ziHb_2MnzJ+`~BwAxi0&hYp=DQ_50oTuI(oDuK7QzJ8?TaCPk#9vqQY#Ouxh7se47` z!yLsjWpjyo+-9=aErZ)7nZ;>byu{7hd5EZNSwW5FwbD4NMq=q>Mtm$a$+x0fZq2y_ zPUZGlvZ}z1?$?<|zxp?m`dm3OaLqf;*UN&gU2Fpqg(dX3*?1~?8BC{2!suzuTU7R= zrEo7F08=XVL&CJ#aI^J0y()MS)QV?9;q>G1E^;KG$2ZzA@ey5aUrR3xkb=z(da(JJ z0k~<}!lNojc-%-K=-61OA9)ZI*Y;!UnfIuvIg05BjIjQpn=nPZ5G!()A~{ro(W=v! zQtetc@}M7+Z25{#!{6YvT4gqCuO8cytj*M`zF^4BKFBJw!xgKx;I=3Q_ELEooA;>) z->J3Zk{OY>BBBvD_6uFw%6K+nuQt=(6owlMcEhQAsyHWe9xfVcj}8NbKG~H_TC_pn zKqYD6hNBX+4gZe?4GU$X%%(Dfn9+E6!ZpFU`I6dHmC$m`f7SQQJ!7FiNor}nRU~|%Qx=il4c(uA8$L8(}DGz^2d$DEORh5P3nbp zT9#lsX%a244kCL#59b~xc8gbT(-+CdJ{;fwMo$zMr^OAA=_9At$qD@C>-4S7CGh{L z49{e?k?1-Halg`n@ljPjMH{ms#Xmg#9MtV+iK0H6bE)l*$*}RdG{>u+tgw@X=HsuZ zhu#r#>Z!5VaQ*`OAKXU=gRH~E`%Nji5mQL6INaxSyQ8`7Pg1y_X>_hZrC}Tg0Bl%-g=IvS+8Wyar;=T zW|A;#zXqeejRr5%{gCAQ5mH+<@Kc%(p1Ls;XKIImx94g~L<`~Ti`lHT{}g*R_&3f< zT#sHtc5ZF|eOTi05Y&1HpvB>17_j;ZZjsu7Z|_QANQ4;QWIsgv?Osg&q%y&^F0fGv7zr(_<(9HzQRSBSJIQ? zYxYRHk)tJ$~3L| znX=j&_JclP;prXBSo=Mb{m{YIbUa~2s?XTd>pxhCz@&IluE@VwTF8`5uCdyb6xP>$ zg*7@Te_TELHll#7F3M*yN79*Z zSUxLW)yNVJq8?i#7Q79lzNm}Ki!-r4mZ%NzTfIghmDXReUv&-;91M#6XV!s0P|+|#3Am9B*U>YpZ=9`wu@y~nOnYzzYmiMiU)pTSFcUnK$ zg#o7g&kzm1`iLsOveuX{4lv@2&gk(Qi}d&zVRC$?+D}G3pR=Ex%}gWu1*_{);%C0q z<2!x{yayX~-bLUToZN20@2u41wWki?(?`nkgJTtW`)p}m?Y%UgWU9c&O&Y*Y=5+Y7 zO)3KCS(!Ity1Y`iK40#x#jj8n_zYf}e9~qu{&<`wFS};|KT$@Fub8FIcSkAl*HPez zHmmbFf7ST$&y@N2&5FG1Bz4|Yq{i>3?JOv%VHLPV&c3mc4Dg{-@deijaQcC zXaA?lw|2q+f&IGE0llapILN6AIqGt z%YW=OLC{9S&nUvNh0@X@>mU#{NGVjfEK3F}n(mPyh)=6_^UQd^kmrdsB_w}Dkk zwXBcjrJ2>( z++&T4ud@2&3|3QB!v31%Gp(X~?3AB6Z_-`MPK>W+!O_*sGW!PG`tmX>HOdtBj~VPB zrZK(76n6311!na*l3j2pW8YhTu_d)-?7+Tz?9$$I%+c4E6(3s8wiLv(%U+QJ{~?S$ z8MT4sX!)`~J2$Yq+Y4CQ>vwFXkA%ItbeVlN$YL}99A{&aPO?Vh6gEuw{Ljly3v7vF z?4;Lmw!1fu$u{0$28lnI-{xwj-kQVq7T#h{qN-W$`^PLRx}F_|N9^X_M&>r5jwNik z$24ZtF=qLO&CXQd%}>2!1sS(l;peL?+a`ytah0%|ggZ=}*us8IXk)iEU$BAyJ!e`= zAG5s~Ei6Lxm4%L94xRpC#hdCwiWe+zDz0BKy-4bIg22tQM>cl@a`B01HE#)yt}(+` z-PNGfFJ##sZIqN+&XsKX@sUcES`q^VGjt4fMYSs>;9B#V4stzQ)M@^^cx|XyVpMp& z*i~K@lHE>8t_~Pevi;L@iA_eH#OP~E@$iy!McdU1ibG5XO9pktNY)irO1`guA;~Qb zk<7B5E73NOl~}A6I`Rt!l?+=vpk#IB0ZBsoS;_j?YDujAfD#Wa1Az^pQ*v&Heo6b8 ztCG2y0g|{}OUdL>Q%lxuvMAYN@mg}dtU^*4s8r$}dS5cd>apZV;2lYlX^o`i&r!+G z=TtK3!sFsi$={)6dzoZhOQs}eYku*hd0!-sg;PpmON%6*O!rE1?Q$ei%t5k=jVN9u z{iaxFuVt~tP8$rJ?kH(k@KDlHXjD?TWJQVMrMQxB4_!*W7k5d{=^B=#Dac3)W*o%x zTh7C}-q_;qiE}Wpr=uus_ic&6a#rGKw4o$EC9>q_yGbQm@;^yFJyj_==lrGco5NUv zf3ymR$7eu8)h%AISHt_-^d{x3tO|M|Gz^nM%Av_2Fbcxxdc@o*=2uVfQ5ok58D-IiBD5*SkNHW8E zP;sZy0Bo_bz$GQmp@OOxkJ+=R_&&tq7tR-7?6ij7M~5-rqYJhwcR^sX;Pbm43Cm;W z;-`Dj*d#b2Bv(Q(YpMim|5IhW(nE~eyadya2Vt|&iFFj5?H2B)crPLe+r6?eT6QJw zF8c$uQBJt5BLKHfpMfvTbwH|0w_vSlXA!4)rTA6QjN-Jez@o~HD)^)$5jP&pX7{}+ zSY*{57A>96=2oUNhh3)`Zb@J{CdsVoR3aNbF_uL;o@Zf(87y5nf(~opV@mHQGMxxLR$o4a zjmZ-;*;fk8XP!Cx&(4N9E>L9Qk4qr=)iPXpT9dt4y@L(%ILi!l7P50i3ha?e7iubB zDGH2bMZ0=l;j{a}n7wy=alRH7Z1(mC8(ybVl>FCM!4{1jCy?BA_y@ug&7 zpH0btD{>_hO8-grFG-M$4%;RPjE+YW&;QteePdYp`d@h6`Zo66UxI73Ga%u8kmTpc z?UF&=&%oDdxNwxPW@oci*s^aRu#e8M{i8=QgZkmD^YUJNF_l97%o_MRy%A3=v%vuA z7Tn@$$UaR$w&9%M$qvwCNp~ZdleQXL1cR9Fv@*#2YF?Zb*@)6UwK(e16-oUyM@jkA z%R-Mh1>YW!FLvA&LdI!LVlHb(;>UxEc&0GDIKep>p5y{rkI=&E!7`HW`h(Cqb`sPN zu`5!@bCCkuoH2=oPN^!ypood4!k?PPjShe#Be6n?#*8C%M5#IW;dME>Gt^ z=NU7CRFq8O+Ba5<`S8`^8QW)zLpK|6m7hL|i}D|cyZI)u^Fs%5v*#a?e&tWmwK-qK z(;{wg`+R)4oq3nURo+43cxitzH}NYc4ajZu^yaLuj^%=KdqnO6ab8;QoL;KaaA4u+};0ji1mql7&qq>)i3=*_m~;c-qY8)v!Oxa-~~ZqpTf)B>mi#s-40HC zbrC1Nd|aGq`15KX_0pG!BNA4F7)2{*Q3gBaCwxUQRbIEO=~;z>0d zMMZNih^j_eiN|f+Kr^P?Bfk2t1Q)0Y{qGSa7h;ETKTPUH^Y{G~X|gZ{<``l==~l2u#(?m+v@UT9=qRm~k6*XNomfH91UF7RHa`wsY#2Msaa*)egmK z{SLQsOhgxR--~-54&$mzuW+ktr*f8t^Ta(zeMK=>mpZ&{{VMv)59eA})^V;@vf>lF zEW}yv!^AE+d$_(915)J@Lef|TSFl}{)6Lv19^igTlq$1Aym{DgE<$fN8G1Z`%T*UL zig#yn1N3*0tL}F6Y1An?ZjT#1QdLX(ZqFkwQdhZZ`A3`&E9Z_XdPB0q8aQN_4|!?Z zV7f;xEY{`m`AH$8uYDCi4sAwjvnQA=`i7Sx+VNAbF&@ZRiI?P`!Tco(D0-oZX59t& z;N?HGQ@D*&GOJMj#wb>1zn;A)FJbmcC)nG;<1r#F9k*Z+Wn+b-jyD<*8&vkh#9Xe2vZsEGk{)}a2EBe4E!4djehhoIVlf=_Z5{wv#x zYf58r{I6TM;(h^+Tz3k$n5AIjhGf{c_W}NQMhE|!5>1y!m{Y4Kf9auiJ*=EEmW{#Z zY@$~)+uq>JiY^D>@G>Q=NOOY;7u-HZ^XAem7fup3+i;~^zJqa86jtiQVMpD)sBFh|ikPFQQbiKh?FuENF zJC}`s=UF29p>ZT>STm0dcN8FI6Qzjr&VDZXzQC_s_>(+4I*QsR9--~eU(koL9w0hd zK(lw*(M>zj$$wW564i=r_=>8ehU9oP#uF7(6JfF8J`?v3KfwlKjlh!!0l_n$tkT^3=hJa(io%bp63!LR5gxnU^fbPZ3S;0K6$ zfY+V2VQ%YBP*(m5ZwA<)Pq)D6yzB`!ahqXPQ#CBi^2KvXi<$o1#mu&}giYBQ%Y3RM zQ9sBY=(q$hOpU^>b;;O&^)GDFk;gNYz8EAh2CDm(vSojTu9Ui%|GUziS6(=XzeA_9 zX944xS5G-}(>lyjZswzJdn6`#nV@>yM9`dTg1=@?WYz*_&-(Lvo})YYM~|lSm*h0~ zjNBA9qjnWLcx5Z@)eOVCKaRnIq!+~OX&#*`IJkZ}x-eaHTYh!vB>woBOlBYCgvA=o zG)%(;d@OQ6&*To2y;%gjaxXbx5l=)3g~UQT8Tz)CtTJDtA2af5z)@{Dd8^MoGTqz#|OO@><{3c(dhSi8;( zKin`!$GH;do4*_GT$}^(4y%N0(-PSE+ZJrU42F{3%V6fLVi<2Ak8i?Vao&{WSRm|S zyJco#K50YG7auXj^dY{C2*XoxI(X^WQ#ii88D9@mJxl&lz2oPj-jGAZ3(@Tp}$yq&ly79$hYNx7((}9C`KUUDMt8M5iuR3aN zPyj<}EU~>X4$>z-rFIkC=>zkDbk@gZ)Y(TF4y?1X&Olr8Z}vUTc-?bxX3|-qfR`z*|LDv0 zhpLcw!+Nwj}QgDwlo=C=1v!UI7=H77ZGRIW8A${)40#$hLC#U-D@{S-~@%Wir?6* z5?h>$7LVQgRD9p4ox8oofm$hzh3K<(v|Z-_(XX~4N2mTKuWmBho+v{f=lvvReoskl zW<61H-%MW3Q6}-%l&Qu1=OBM=57az8LZ9B!psT`;QaK%am^AnRtba8E{OZ0?mGTR8 zzwH(}(ax3*o0Lm?rVdA^g{nBpsRM3I-Vf6a(&4qD;I6lx3+v}}(8a@U(x9!c=#&%< zs4OKgH+nNPG#o)G)iivzp&I{1r=c?|Mrn2v6H6{3J8%dM(jqYM_d*=Fb~tLje+f5q zO%Ts~K#bI4?tc!lEn`ZUXjB{PbW3N~k048OPetwg8#u!1B-$Q2f?9qBLKp83j_c6F zz|p~&M7QJYK6R!zelojuF_hiyEN5Rjhp{CVcX5z^DQerrW9qPCe10qkpM02vQL_V4 zdcHsIy_AFl(ibqLMW>k5ha@((DTZxrw_x%STI{LpU^cg7Iy-j!B~D)#jfIIN_~eHj zTgf>xU$ygWlLoL~*SD~sYzx-usLZmv{Mh|GPxkm|2YBGL@;@LG+;PND3)ZsY8rm1HXGOCXF{`Y*&?_4Z^G4*Lmvt-K8FZA{ zGAGOrodYT+A80^W5=`0r2roRph2E>GklU-oY^Dxl2dCJvZjB@Qcm`i1U>4A#H< zK`UiaXw*F!sCdx^DHc`W(pCw1N`cU>ycohx6~i^tYhbn86!K2!!_sZ35D+r}?%KSh z4)I!oucQ>*jkke+`Yo_fJOme50m2&UKd;bTucT#X)$dpZ!c14OvNb3RTkE5#YXA$YyHMDS(i zQ`gy^aKOO^h77()jT*GUv`7){6F*|afJS_nvJn3X-0{U8y0AdS5axCDks+xLMDds& znZ+||M|Hs8Hw=afnZHK`bMX6MTdX*&k8S(5(ZoY<$gKLEWaUDh`!M&i*niPJ(XQ(~ zBG+Hb$R~Yoijj`6(=i+t7cB(my2r3)wln=Yej|z37|SV3HHzEg_He27Q^ohb=ZSq| z>dBtX(zHrxD4krKNWC;z`(x;iDT1HQY#u2+IFaUgOo0neCDeY|ar)QNhXxog zp_x`f?{uBuy0}?LP+=O zJ|Kxca8QPKrkh}O?NZoYwHEG1Z3MH}L}2$`fpbkK^jXxw;;1L^;qzTMIWipP{oD*U zd}qR`kJ|Vxb1DAyAA-tl^Wf<79x8|}pilNLEbQ)w5-TgrNVCMr6OD0pf;#^9#~PNotGyYRJ3sqSP z;BnKPD*ql%If2)b8f*lw_tis2?tDDIV?AEGI|;>ou{cCokrgh~MboE`;EMe@(AE)H z1D=T>)Px~w?gToixQm<{5J&s|2EvMWe+70=Dt3JSgmxxf5PeG#welwjYru9mvUUwD zaciT+@B7Hso?z;EB$PH+n9$d=#Wbxh3nrR8K;PDpxH8@zeeTI%WQY{anEZrjZ1EvO z4IXim76j4px2ov|q2*Axtc|Rlcm^8zN~DQDz*c!Cymu4a+j&Z)S9LFOxWAv2Ppl>@ zT2&yi;0BHO`j^{v{uNg|VIjDv%*KE{hFI044{sK0lJb4MV%=12^0sjo@!EZkOtj=l zo3G{TIeCJ9kuM?dW)_jQ+<(O5?FKTk_!wFDB$zlp zJ3(6Cog%$aedPF$ewtoT4fQwAfzsz`@awnW*=D8m%auhyz9mE9APG4Amk48O6QOiy z6)d`YPHjCyN=y1LRhEBy@^CtiTp(m!BI_j7o3`98##)q+RaH&`U+hBaT} zvF36F?z-)O=bk9Bia%HIlw&enoX|-#Uz>pT$P_Sr{|<)bti@ql{V;riGv0q~iJvFx zqsg0xQ~Yun)n7h@FN^fxl9U+wO3dNqcqy2fA)LXseSodAb#S!NG@L%(6Zb#h zk*yxfwyem3ol)oMh&m(cBKQOH^vz(_)CQ0^nW4A02z!Nh97qPCY4}3)zB&t~Bj+>O zliP8l*Hlp71ynZcAa!gU2Y#V3SXX1gE(wskzt$56JUE0?(wDO72?ufc zPlj3rO(1;J&-4j+- zFL-%XJg`ni4YwaE0F67*_}Q@uSH`BYBMvTX_Yx~ss$|Cs1ZIE5l1_HJd@{Q&Hv+>p zPJ?+#?+Cv*mkzVL1Cds9asB<@SURN~Tc57SC_fiQe_Us$TJ>1Sy|Kc4pbc!+rbC9C zFYNPGL-kkoI4DaN?OT^Zcj-#_cRvPthS;zbQ}(k@J0D@>Xu&_EV~O6H#yDX>J=9rc zzyO10s%6HgQlcSQczOmcw+1-usKKIVtzvs!V%fei3T)2BaadGy3x-K@AoEoSqXpp=v*K{KJ8Rx@-e(M?*e8Q$g?Nw3vlk4Zd_q^2J9|X(i{2L>44^Px;o5N zU{m&z*Hur6{>3)>Fs>d#;!IHLpe{bzXeK=WK3HWn9R7VB0df*I2rV;({WCt({tz8V z6*6yQqK|>=$TFxntBrPlhv2|dv$62II*!ath3`)jJ*GR#dzAW@-Dj3pm8Cq;6!1n!qACgTuH2k_>;Bke-ifE3^d0TK*6UlI5=elHR+8aW95c$ihIJvt@ZCjD-TcQ z4v6IG=D=dI!)O&ToK;CyXNlo1cOEvbQNz#&O0a%Bk~spGSGH*(_ul9^XPLNxIz}y} zOQfX8s@eo@uF-pL{gaDysAxa5%z20pI%Ux8>^1sYc>ljWE=PY1|4wIi6~PnCrs*>l z6P!JTJhfj^{ETTNU1V`1LQ#GY=FxD&YKSQ!J@7LK7c-ba!;b z2|}MEC*GeLZnS{pOY-3R=MmNSK0!a5dQ-PUOKGE1AboVYluprAfmeeSam1Z+d|NXH zmigZy@AKU0Y$p@?a=R8i6X`?e$VAY+8ZBh{>uBPB<^(x))_^*!af69=Qn;tLjL!J0 zMNjQ>rGX2*$qSi_V>32nwgG+&@cOGMePH8Eb^tgls8oefs@6u@8OLG{z>ktiSZNgLwDL&dc7x#>n zK{j&}9rnSD&K_z`^=qQ&op(p+Ur3^{1Lfe&K7qwypGkw`l5s?nIlmh$dH37r@XV%< z5Fu3zuG$9ZblVFy?Mj9ogLTO5_zLsNg5d1wCdgUViNPh&EN@>Yo3rH)=3FjvSSZ5UAXIVC`^eURW#=u%G86=)Vu*T&%h6!C@Yg=umZ2b<*=bfbS#qzLW zK@1oTdJQM$W#awAD(r;peE4}!4HORxdGbVo4fslg-5nZe{AHQo=h+M=UooL8U5oOj zap?U#3wK-o!&?`wLBrBw*wK3s9vo>0^J_x{Kj9p_D2l|kO`kBOWeU?#o68<-8^jFX zb>c0x40P6$!%X3KcCPp|I=fC}mnDqFX6v#s{C$iaa2Jo=(_qU_Yq1-9>hNR3F)Ta2 z1V0TLhcBl6g`Q(781B0sk{=0~(~h6?$lE+hhAySE(>K#kE8kNM|1EG(?mUc+uZ1Og zh{xU9plV+a?06djJ9lxAGjcdQjwz%q@l)w9nRWDg@&&4&F&mnkVqknzIs^zEy5yu} z@CrKx0UKQ4;EAyiyjlguua<>b1MT5(tqZJlRK)7Vv+=3Wd;KfUg|ynk)N zDeeu366d795I3h;avs<&e(v#BocDMp_wal!={fBTFBkUGC#gZ?so^y)Nj*+nQom4a zVjduRWve6d@(2}wopV^c?)M(?9hn3!BuroQAJj;2D)OeL_{YGt%XiFUneeL}YgHBIlsl z%Xu13BLPXWT+n}(qRro}9BkIC5#2a6hhz-CMLGL%aMS8BF&q-lmA{j<#rYk|=E81Tka;YKNVJ`3&*>2G%=${(z9&(`p^r(u_zwyH+efEg8xQ$? za5A9=LLR#sRGK8|x736B?5$;BDhY%&a%nK&Vv5i{Jq6zPdxZH$GIR_Vn3-QqQDz5+lP~*V_~&T+ z*A|KD-)(Tfk>#kkPVhs9D}Z=~Jp5U%_n{z)B36?@gF)P@nXEqg0{aN!D7iFSj=G(T|E z-UxOODgiHn5(Iuh^eTV5yGdXbgP=k#-SHMY| zI!SUw#CtdP0G-hodxXd8@Qvf z2AtpBrZ-PX!~JcZfn9Z&5pWuj*8|T1_f2`nBPR!s#8#tyhJ{Kny8sR;YA=v9=jUz``;nmCC zux{u?SlN7nRhiD`4{C1biSbVU-n9+dW{=%_G<-`dz--hbu>;B7+y1CSD|h1 zTy%(2;g4r7*e^{s_ks) zuL->DjCuUy@Y(!`6NCAULtEIx22-{p;WIv8x|aQp%4Bn{$??~24(31X8N!d6_KP)& zo0*Z|@m}i@!Y)kM$adfNVMiKtnf-)Us8(0T0`l;p+w^w%RBk73@`6?FV1hekz$wRS9JagKb&c<`yig zh!e8u=z)o3(f#{tB+$ZHogn zP0_Kx1HyIn&}qzm3~qXgDwe~TrD6$Nax{;P`*@RSt8HbMYlpCt_94h7mBGlrZSZht zBs#}7V9ngU=(13P+5eT|pBiK4>n$*(6FV?mV<4I==!ZJ(LD+x74PuOEzt}(j z{6lFd8JtcHw?bI9+jp)cv7V>s4c#bi=wD9s+eu2aOyD&4*C^49kLpmF@(XI8dsDS7&*`7v zu{2j>8|_wI34?|CiM=pSS!;hueBYnGHV5Wha)pb&Q$enFCdeM}2g?~3!ANw6j+#+PQ~p)arII8%Z09i=dGjnSeCA2- zpMF4^%g>M=VcA-~?=>w}Jq7axCf+CSO;pD%mR_tLLMH`TP=mqSX-1F=J*g{A zMwC}@J}RoD-Q+S^(RG|o3)F)8U>y*TeoL&kRG8fJ@r>9salhXj?p&KXt*ICa zy_Pa?zUCMybW|d)jy{}M-4%|%X+`P+&B$AuJDkqBlbr436s~W65$C$elvEDeM=dI> z;6#Bf?LJjO4kr4Ny~Z1fsIQ9@HabwZj#s4XMG^TlIhO3*Eg_+I2GiMPf9N~!-*Bn( zBJ?@#5qNhNFv0u>)eO)FORrWM(-TUc7_Xpj&csl;@?LsNR`7673xh)g)4+LY1N}X} zh7MBeqGc;TQGG`pAZ3f;ano!#)fWvF10R70dkIO8<6wbB5u`6a0WUp|)1+EyT3mLW z@T+f9re!W!OEWx>CdPrI`!%RYGFE- zrdz$DJ-gPxlMAkxE;vbU7p!N$R3D-8b3YvAri6z|UBO6=z~9ZvV9Y zyOhCgdMdEv#o+ML7!;2lqPqs>khw$6iKUoia=p$NJ6 z|ASBB9{OU>c)GVIiDcNz(Z=sN)YGdG&Ziim-`Fl_efA9+9>>C{A5I{-y9=}(lu*xQ zAMA3}1)1Lu>7=t7@YjDO3|*ZFjp@Mvl7ryXngz>3vcYoO4ebpx(+xQXqs7Qc`8z{;sbxFY2R%J%GK4x>4?@?i{4#^ES6 zI|GWW65-;Hp(yDH#kE=o@#@9r@a|dyq&Vs0i;Zdc^WaG~DJp_Z_!h_J+;V19Jx?N7 zoWnPf*|>G!0JhW0kf}XBfv?g5-J|#7&hB6M@BKxV7m&%mCzrFoA-C9#XWQ5t?Y+$0 zD3%${b!C4G2|LsJ8Ks&Uan~plHgr2p3yM2Zt@_O+y5P{#~I-!%MoC+y@K`yhYDQnL2x@Y9W7U%z;p{G zTqnG1w-QTCHk`#)ocx1EcMk}?SE0{%LIl-AU8t$6A^q)ri7Y>^M88;8(WV`}bc@Rx z2$ud2E5)N&y7^2NJwkz*dXB~=1qZ+?Ns(?BsngvLc9Acqt*GpWBXnx=YC8S48En?v zfu|!Tv0v>MnM$t-`;zbr!#>}F((fn9UcY*B=9>x470yLpO*PZV0(aPbMfB`f2Qs^ zpBc>R##LhsFf~92pN}nri}F2SaO(pMULA?~`CH#cM0N@4MhW{#BM}p22eLPReAuR-xop*U!JQa3jJ@j5p9ufk(m zBhmPG7HIWI!2?4=J!TH2{c0I>yRi#+ERO=G8)kSoB?6MwH={wZ6q_@r3EK0sSndTIgq(uO;6EV#8-sFlLvhpA8dOjP8?&&rTnxY|8>YgpusQZ(h>k!?mzQxc?cPJAoc(i6qX=W64f=3!*Z$oO{8mQr-9;a7mbLT!~&qn_}nF zam5p0bXo$;t^EjHpKa;0mKEING&e5b?jbS?WvJgBEzpg!g@^z5gZIjjOSvnF!qr|n z{cbWiomd8A1)l!&QLwBXZR<;CcW3SQX{b%Wyt98`)B?piwfib<4@U#3VoHXYrMEEv>>n~SfecKEj zx0gWvkmuCumpx6k97L}x$WfQfOj`dj3;Gv3;JVehFj?6P+AipU>+VK+%R&RzzZ3j6 zW=gPZbq0O0|0g+>`I9`jComlM8-RJ#E+A6->5jEO$(09=^!Oe|1H(^};Q@i<=3^`R zFZLjP@X(m{R*$BjHiE_}4W=XVuhDJ#cd38mC@P73C9w2PlO;}FoT6O_XD}m!^9h~E zag_(Tz~|1y$Z;?!S)55i{0`9Scju``wF|kVUdD|_UC!mg0r7#QgE%J*ZQ>RdNY-2Y z;O;eU;?}xab4wlPlUpy6saMNTYMix;duwFM{hRWiz^6|Y9n`88EzkNY8athGk+}o8 z$}6?v(EZD~`HP+rqoEA^ztxj~ybiI8`EKzq)p)Vo_?hB=HumCU38CV^yp^1K;4g9R zfA>Y}Q!b13s+Z75YZrs|^CUVc;wG1Ab47ehV3&3}gmY%m37ppVr(9x505>huU7X}) zE4p}QwphN&jLNEc!6*eyu$s4-m^>XtOe<~3oz3N3Ww{|SF&#k!1SV%2=_`(v%Mvfl zs^ZSOE}|=9Z6ST#G0521Ot#qXBa=Euk;#{=h@qn_tyuYl6qrsSpI@sIC4Wa^G~zYs z%pXS!*8HTqolGz}PdMXTDW;Q6ed+0q`gG$!UC`dT6Y?&7pfov#5}UJBoU03W8=YY6 z>?Hzo(hv_kSH;{*zo1C_4y<`m4Z9oU@#KcnaMr{U?nEnqrDP;LE=vU@pJ9q#Gpw(& z!yPL-&^9Fo?V~o}_EG!s7~V!q9?ewGc(84OnoKdN9F6~+#K4+E7`yN@4D5IX!}AZ| zD>@HbmS4xWk;1wEgbC|9bdueAa-CV1Yp@fKwxfxB8o1~t!^k}g;70fpX!=nLHwPww z!Ae8;wABtS3^ft%|2^@>#m$(dRtJH9OJPe|8hi>3gU)7kST%b;oOZB=nNxY#r}m9* z9T`je_bvsk^S#hx{2Ru4*@KH-5KJ-41mn^JpcoMV$2#n=gSnuwwGI3prcWQ2N07+v z`NTgqhti!&u%LW6%-R|bCY91ySvmo;gxrnG_Dr-dyMVL*%z!)h50jGN67HAlS+3@a z72Vla4Vot`(C^W0*s7|Cqmp01jG4i}Ce`55nZK|t?sO?a2i~if1OVIk_?&C5Hn8R z12fHQwA1PisC;NfZz>jhbyyiU&$IRbg|n`4YG4@ zQRC}PV3@LIK%2PYgBoYfw^ z?D!F(?~xFWlLDrr>WVg)a#RMhcw0==-G~cLnxk@NAhO$0n7TM0-EO$B&(@ci`85yb zP+tv~EsNmPk1$xyZ2+aJV(|SPfUQvyJQ`}mT3p7k!%swP$5Th4->1r&=Vzn7@*U7H z7=Xz!F_?Q|6uz&22eV#(f%pMQFz25!T>8^MpJ&(6EzJh733zcxI2^qD9IRHF;jOv0*wl3n-gUo$X$1)=3Ym;Dmtx@LicD|~jfJ+O z0hm-*g>AQ{;_}!!9Cf@GTekc|Z|U*)V^cW(7@mxUuD5Z*@yV#XNft9yuhE{+VF*Rv zFzuie6TMu-);`c+<@stX>cJee=@YVD+a_X=-zeO4D<0U+$8?LA6`lWPG^FGR9JkNb zxUYaPb=g3{S0(WNZ-0f|Mh-}BDq_&7)99Jh3Xj}(QeOu~lauxdT()%5{C)^EwG%-z zn?&dA52EUWBjMEwm=RM6xAMcO-`7?2*1{un=pYknHC+#eDeZ>Yr={r6 zt4oNt^$B`bwgFAt67WU3I%=1#fq}_c#3*b!>GZx#m%WtzUq$!h)N~!j0sKH{i_I>E zB3_KhLr13&NT=-kJkMu11eSt7uqe8ATt_sqY#gaekPrrm;8cDrRPYBVx1EHJ@FtSL z;=q_mnkKNCgn_3+ews1F|2w%2*SN-+UT%B1gF8{|0s2lg^!S_yXH_lggd35+Ov8}a zHMsiLUl8;>46_=8abo9UY*MVix}pVWQ@G$!j1;57-@rkKam_5y#jD=^a)k zyvoGRgRCZ>Hv$f**>L=68s!G*xwVG!j#T1QotYbOi@=s7!q0QP$aLu_{k-@X_049| zP5p;7vR1%$q!=iQ`+__jr!Xj@8pD5i5uTY0P*JlF9+8tPHZO&D-?8xhNiN0=&f^~0 zw>0<6iKVm>m$}+Osq5w(<#*+!Bj>oW*-hXMwn3^zfpf7lL~2bY8DPJ zzX6#MKY>WMlv^TsN&bCWhCFb>D32O_$fXZ;0lUxwpM=(eZT&x>;a$1$K@;vBU5=ag zi?CQ=fSG&DCnW&+m-t#=@{t zT!{DYwP6VplJ~V%{L-`rZ&*$>>^$TgnqNMq4dfFM(MrjwDD9WxSAEiIFk7Uzz%`Pg`Wze*#l;-?fY0JU8 Llm}Nyt+3O7+ou8J literal 0 HcmV?d00001 diff --git a/ngraph/test/files/region_out_yolov3_mxnet.data b/ngraph/test/files/region_out_yolov3_mxnet.data new file mode 100644 index 0000000000000000000000000000000000000000..b5336a7d5dcd40c9247f05931ad081dcc3f1fb47 GIT binary patch literal 307200 zcmX7vbyQW~)5ZbmR!SO_lJ2;0=doMF1{(!MK}4kM8efBMun5J#Ku`=2@7WeADt2OF zf{Klbl)v+?_usqjT4(KZ_RKuV06NRfFo^p=Wwkr$S#vs-e>zL<=TxEd&;rN8RDE!2|d72jfm-GZ{_Gp@5KllFEKaX95hc>YgXU+Fg<7JrJ#0=V{TI z4AS@8PD75qrDb75&@@JX1v{MZR9X=OHl3u-&1vLNUQV<9f6}V=mavxd#;t93*tt!SJt{B+DaYAM=%ojW>DSC9n|~sJq?~T z6p`Tq#3?$VuuUEtU)IuczcliBx|_~z`$ZK;te~sqjd!1IaUo6;LoOU7$J$gH*m#KA zM@d0{nH?@DdBbFg6}C!uQdwU)HASb9hU`hIs8+z`)lOJ4U4W(4L-1$TJMt>oPIlWf zNGJ6iC45lFk49GvYWIYo!T|s5ACcC+e5y6hp%1prwD6`5J}vjaitFx}KTsPPcbe#f zW;Vqt7n1a)7WxAt#LV`>fF2jbYN)|4N=%*~(?GNu@!;BeW@a031~9(bp}&&;Uz} zZTwDwSIcO?@ic0wIYn+a6v29(@Y+HEubZauvUo#tO1F`OQYO6&yg+U}1995J4ZC)G zBJPMjRP`RvuV;BAzLG<-_P1#E?ZH^c@%M2Be=p^=@bBdna@&|i-#-?T%)4h4W^at= zxn8hJaly%672K+7plMm@bT?)f-JSG_YWvJkP%D7b3kN(uD2ru>YpAd%m5#62OWB6q zlwxO%hi2aBOSeJHihe5U+)q+XsiZBgqWaC!a6N7h`}YEbXILP1%U7DWvy=jUr_qHa zbrjg9gvw3M@cZM1q7Np}?QW%(1I47UDU+;6T%zZS8ko4x4VrU2vFo@Vx)kowV8cB6 z9-B*ZT<_4~Te>)|?g?FKcQp5EK-}3#6@N47#GNA2RD4N^x+WOb6QK&GLuw6sG~q(H94-su7^G%?uj{ej=H~LI0hin8Rrl-Bv~di68W7l_fs(3NWP44qD>} zz*PGP_0*-(s-#NV7$t$vezvgUU_=^iHQY__&`II$Rv4k;jf$CeSZyJN#?nLd?RYAEZ#zgS zMUsgAXNwjMZ{%&Wg5QE)lyzn|70ybde8m%VNm>Dkx=x5)DZozGq3GN9p6b1JP``f$ zEnCBA^JsN+i(D~Y-3$J1hR}|9Og~Ku$em@=`rhkw!&wIcXAiUw@WAe)+OX)qMm?*u zX?l7AZ4f=7&(((Lbo9chQGA`KSHrh09Lbw!&@Q%v>MpcVdc{!eNETq#Ek`s*$-~mK zmKuH1==!`p^k>~~((bWBwyrnyDsA~3E{Pcq2k9?gzk=o+rU`XY_;b(>>t(#rb%+1H z20v-Vx^n8wNF#C0N$N0FL~o@Nf=39DHH&|TQE#c=Z9DZfW{}6&^R#BpK-hM=!uN(J zrVTg1EdPh}az;LlSdv5P-OZHXJ{UvOJ#cNWJ2VbxVT3{x`98^_$3cbEUGkLN%Z+e3 z+zaEITyU&H6_dngY09N^YPz+Pws!G3KG_U;JmQnbPG#aUGii_MMVcEj2wp*ato-K*8n2J(WA2k$b{=h2 z$tCl?TNLA=i!U!dkUoMxTZtz8^{&vPiCMINY!U4~_neSvj7^nZI5E)$qcT)b^!E&% z2u&wJ`!4Eq>>%$M=161$d?<53vb!w)DAdr}FR66p+g=(Y+e7tn)==T|VE0HH^sMWn z1tI%s^c_AA1{|ex-O{-E!yeVo1eoS!0jb?zh?$j=v}QW#Y1EU*Um4pQoT2{43tbr| zsMT+!z;VSCUzbT61~ig+tOkbOc7w8~C!D9~LFjRpHr&l6rSrK|By*Q4-Sn{2(G$a3 z+;ApG136BOlzS#;$hMc%6=MSX7hdSQ;tX1=j1HfA^43qM&+?^I7y5-V%`M>H zBEUaM2bgQiKw5N^7T-%H+41|Sb3z|gIN5;svl*pXW28?HeOKQ{sh?75@Y`xSpf8I> zxejo>AV9!!b4V}mpvzJv^n7VL>D)R)ht{Ydkz7zy=7HL&x(KnmP3?xcbUP`J95wD!x}!dPlRXg??gsM>gP=9_B2|ygq|~9? zs9?}*n!3OgjcNic8o}QUeR0e}MjE{-c8THdr>q8|BTzp!c_nO3qi%8TZT`)7s3*o1XaQMzs`WRnG5!bWmUCUJp&eB5WPF^qNd!XBw z*Sf=Q(z7)=^muqaapn){gOvd;HhSV(zbjU)83?7d=jr@yK30vk)9T@G$#wV;Oc3&V z?UWP3eH0P-_$2MlOC$NEwy?})+c1(;Vi6cfAJsO0eus?yIO8&dm-|&A)1drp`B#~G%GNhyl-72hazpL_PFDbzyq0+ba2+@2C3f6rlBeY zq#}Gw-bRKnR`fzonk$YiRLAxbM!ici$j)>JWv+Qo+l+@oyi9;`mb@l4RzQ){2}+*J z>%m>SDcj-~d2hBt^&oHDRg+f}{sVZ0T#M|V=7 z_ik!gnMP^HPtcE81(eTlLeyLVRzDpA)y8-9{nmE6zA=M3Y|oK%tvY@lbw$80PfWOH zfN9$vQGRniMf}UA;HDcCzEcMgvpsk{=Z=kIw6VbD8pSzh(}`CF)b8Ixq3T9hI?)Sq z8m>^EsD_s}#k8O=okDGQQq{h83hfz+AsGTF7B~Wu@^H*MPHU{w=-cx>WE%2^W=t4{ z1%}?J477!Ojs)^cD@l{D&G&yCrV`r$nEu%gd-?^qxXuzyZa-*?bU7vN;>v4RzI?kbAC=H5(q#odNlDd0!5> zHr*tJ(!n@Z<$=&-cXat^;l=Q)q*InfnOq@h#y_JK|BR5c*$ZEaT(H+g6%ul1>2+B; zeZeluefE*c@0lUKN`T|N4zPMHi`m^Zlq{7-6_$Ic?$BS_B5w^TUNa?C+o11hKTQ-I zptX&uG<8iCwNH`8&^&wW`6$5k6BgM0=PSK_P)hFYX=JKgN9r|7a0qoquLK{jI;PMK zc}0d-soZJntL>3WFGm?%%y~zx9MYvE*?sFV&h)866-6|!ZneshM!O)r!>nBXbBr1G{RGX9)NFD^Hd z?;8!6ce}xAuqU>(=^`Nb4&RsM(lDhwl74cR{_N1hv#EUlQtgI|W*V>vzeHbhGD$y| ze?N__wDFz^s(yIEFV7iYC6(cuQAd7#Y1AMprIKl1sV~j~5^n`KcHSO)cT1!ARTbU8 zmP-Ej_EU#bKN%+4z~0;&&alRRGrQ?g>Rt-#;?K6JhHhucBK4L75>5$FJkT6d)_ zW6Q!@r1v<7yszca-uC;XU#O3(JNQ@}>4vFVgD^bn0-0!M(yh(gXv+83lvrnqvLOQ8 zP;kbZ_lh|6v`i=1-ccW`)^`-dG%ChxZGm@V)I2CG$PMXW2oLSRjc@&u!tX?Tul~3RiCZqI-XL z)7eRB6jfJC1vli8+Up3DNC7Gr4aG^#Hafp*2l+~=yn`{CL$WQwTO@;)S}jc(n?^$n_fWXnZ)&)2g*7_7 zj=yFL1)(Iq#2lor{Mqyd9;SyWQcz3fJsf#&O#5pI{p_El#+B37^=UM0!Abh{K><%Q zobY^t0F&1b!SUB`DL7<1)mQU=O8YsI9WoFHpSz;(i6^Yq7{L9)Lvl#Wr>qe<6j|6z zjsphcLA(d{pLU1#Q!R`PZKAEevq;&ikiNM*CBYga{8-|JH?LgK-l7Tq0Ux@puESV0lfx@c_CFl;dK#tS`L$m&a=Ri%=m z&+>XO?Fa>y55N>hd#L;oV2^_(K2QBlNoi%Yit!$s!fCRSRf3AHGg1u&SW#w*=e4hC zTIeT3Q#8w>;oB%N<#F zG+~u^h1M*~A~)S4x;E=MwJ$VAa)}p)2D#woMHQ$`YoLUrbc(FmMe^r9(Vb!DI8x8| zmYf3yhs$DCKn+!PrczVYURqqxO_?*Tk;>eg{Nms5WBUNYNLy(t2C1g_FbaV z5gHiY;0CR6o^TiGVP45yqCdH$SCUHy%kNP906n;ydZPb}8xA&VAThg<(qCj!yi_q= zjDJZJCYWHzeJ==KJHz;pGExibX@zwqlHuE|D7kSx;5xJ(tkns_+W9mRh<@Myg* zmgnE5sUva;bMk0H)jeADPY(s*o|ycf8#bICgvP@c>Hfk@^7&az!4~2`#SBC&L>+GRqCyh&Et7y~3RO(!GfWGMeqwg1O zkTBF6{Xd4`UD#iGC$*Q#WYg%A#W8v)B?lE1NBlb|fOmr#I!iv1lgTbhIG9c!JkL_# z3{~9O>w?cBFDSn^!W*AwB)6}Sz8=jYpZKdZJ3tE)x4NUJ!UKoa4+fWYlS=b*=zwlM zz2hFx^e%loI_-&QEjPFv7>LB1=V?+~29597M)|kjP^-oe+;-(P(_JUbh*HEyyHiwF znnr6a%gG`72c0ao#8wG!yl=5Xta%a!BL*?&EaNR35)89O1rIfL*VLV&3w0DplD@_vJDuLz1KIE7TBW!uR~6ym0cj zAsh@_$hfJ1JVs?xNZ2(h4b;ZYc6W@N?12j@I>^nwLBqcAJ^$N$@_q1#@WlXfrt``V^Z%C7-vGs^ok6^>YYR76~98<%A7$6i~3~1icC3>(8RyWOK5URxPrE zy1F+Cob1r|K@veT4pA+ybv>L9QRf{=%pYopkpp>8d$kpQwRiG*WH-euPNRp5PS9g0 z@a4w|i_wk^~`Kd5(jIc?saMjPc$(S`Yn2yb*koTmUq7DI4F z>Mc!?+|GNq8RUH6JZ;Sxh$%{LXb^j1tEvHHKRqBb>wF6PFNZ9HZqlZegYjaQ2f}mQ zVY5aHL3^&!$A&CQ$}6M;YR{ely`Wv=f()K6w1I+rA3ZhQPl^vy>22jv`t2qID@_NKKjP1(YJqCI zFLb=Ugt~^N)BUh|x{{}iMGu`3``8P4^Gz`0@Jlk%E++BaOfp;ENCDLvD17IJE0%nY z*VDuA>vzcONG|2R&!snO?oxh;9u5rmgytnTybRC)nlDkpp-j3QSWNkITS<4n2`;yL z!KBI=E4`GV`Kpdul+x+&m{QVx`GtJ^ED-ThfWTMw2y2l>vcpk&aWj=ZX78tV)<;X{ z*x(vpzjTAF5pt=UG;i#s?(eBI&#i_8vt_a4m;gz)H(kYFizuT2gYTqhWQi5Ocz=)Vv7nykB2`ByPn=3qz8#=$NeOIFvV{rnvEAH13@g@m z(ejiElF&}0q^4tZHd+opCpw~QmjJTCW{_<7K#PyD!4;yrs{phQN8O043X#Apu6j` zsm|>>?=xznn`d_ecy$mWb*Q80vMV;Vd18CH0Rn6uk#$8r?KH_DWv6B`yQ70mAs+be znmgLnw2|E0L{l}gsZ*tpjG9_#(g-7T^6c*TUl-&JRKtJ>F&+EB`?^baQoY^>s`t-v6fM1}h{^p<$g|*C!wpj`8Ty7+?%GCEl{3jO z_yRTd4a7K4H>mRL?&LvzL}))Cqo;XPay5s#ooc&*#5g8YLG^dd8zy5n|{#q>{P_0Npg*8+q*cR=LrJ}po4qw&@qWhnG#d;E>Zj%(`^k!DcgxOKQO#Cq)YsZW{k;HB(k!66 z{wvMeUP^L*(x^A6j?zCU!E%c;{{HqtXqySf{%NIu`-&-Ib0$TNzCAlZ8~OFxgE#pTj$&pVWSR~N#8o-iKZj@3OH=>OJ8mR*_Td#{K_slB8ldM3!@ z+1-I2X9T}ihS|q@vK6G$>Y@^o%lJ&+@0!DpXLm*;9I$PR3?>e)CKI0B%{sS_`j+-m z$cO)DcTM-KF~p>o9JcKvmkbd*XJpCdO*kohaC$6iy;87nvp!<;i_+_Z_?b#oD0tr^WeFZ;x$+wY-kN0z}PAOiQoZj+2R$Njt% z#%A7F$wps4!P(Y4p)pF47_~YG#u?kF!M~HcvMz>=^IgDtBVsu#Z8_XCNI>uMS=gp7 zBrZpTy?l|xPF$bNj(n2jK5R6`un-8zLr1@nTCE_MFn76Tzt?A=6Ru=c9E08U1Sj!Zqz2Tb99ioaIAy7LP4oAbQw7KLmw=F81Rdj~1 z5fhGZ8=k(RMTu*%YVHzP@;dAFm|pHjbu6R)xy<->Fz5JD5!Q-{*!N;O=6KrBP10d` zsmUx@X(E$(^iupvekk_XilDG{I8H@Q6P}!C&uqVm*zh^-Y~J5b-cesX;eTI*TNyUk z8oF65^?f9p`y_>Jw=iMTm-GqaeJ8_PA{kSO^^}`Ark3tTuf!vbRS=084gPkMGZ+}bMkp?4Vf7VU zedh=I_9zPBE{n0xVhb%(lVWr|j#cdPXZH+da)WABG5<{>mX7vAz;s;_x9YLa#wn~- zFpizQ)F8gJj_(OYBAnVN#8n9e;p##sw)~%n?K5>@75&$JgbF_3%0#$tWd()&Lt=|3 zV;BNcm_e}tdsV4U(K`khRtRh{6}&dA_gI#G6`YZr$BYF98ruBd$&7HPL3s8_?euvU&!R$a-TSVbb-8=HPs^p9m zL)c>P)hv7OHLhsLB`W{93JDulVDIgtbaBZ`PN=??Dfcg7N^A2tmoI-wX?83stLLHE zCX|j}RbZ`+2`qWv47TGxN6ux4HU2vxFoZN=~_%J z%9Z}Q&cyHB1U&sFk9O77T!!;}R;aRnW1VEe|K%WX! zn99CH=C3}DZQWzUJ-(ugm|w~89yFf6_m_na6U^CvAtDyAl9)q^k=VJ|5psPZhzwk@ z>|gm=U1M+7dQ8MBZHBQ^JIjOy{iE@3a|)EJ4Dn@_5*O;^%PLd&JXkuA9T+{97M`39 z+7S;29T_xEU&pPAU&s_&qFJlQH}1%Q5;7JAW5$>@hzWc^r)}!F`*oqr?P(};dRfnX zICq~0nXiH4{$SJ`E}`?8U%5r+qnX6Ph3s(OI!=6D8dv{|NASDZh~UOi)2x9^F@k^g zN?#_fRpu-M4Uw3ff=Kr<$ltj~IPZfM)2kA(Df7J9J-H)i7Js&<&m2+PJy<;1 zfmmO-h*@#wtgrr}Q2*&TY>`O86-_TXP>Kh(#irh}*Gno}semWJWfqO)`ace(nS2)0tVj7d!>=g!-HCX2<<(Dq$~9s4)Z z$kGAquX{WzEt|s}-cIJiT+~tEnglnusR)-HM7eVe*!t8x_ zNXriior&m@Qp41X)49}D{>Bh-OSCgQxZN|*{b_!eIFp7EGriy8j4Hn!Hp_`xaSg)$; zL%Qxv^@oThY_ez85tD>l#t%oOiwJFeKal7DTHfCVdl?1F<5ZvTxCR#_0vJceK4=BHkv z;U(d?dLaZZ%Ma7#?jxIy`2q<`j9HDrBQg*oPv(4#xSaq;97Ghv)iAO*yG0T4C(GrpB3yR1dr6p7OBT$)#OwAnW1}{6SeeX@6-;DXZ@d>DxvMr zKLW4A7YS$Vw_^uCidf5EcXqTj*4ulACx$)~;hLu{Vi%-{6BdnR&RMO}K zi{eIxQ&)eq6ju2s>9)bPS z7GD18$Rzqj%<_s0bM3G8X^Zg2y2B!*J+eZ%e}%Ym;uxlokiyKy8?u7~RVXsf7Z+D1 zp^Be_zAG5djr}p3C5YnL?iy((@4cS()-6QCnP@y&`jw>HOE{+!!7NN~4ZB)+pIfMO zhPJeYBI-yeioVrRR>lL4HxbytHNkA^m0jG6)!%4eYc!6pT8Kccbu`*phK<`D&!Q@4 zv(mOPoTzRfUTsW5nUODg{wPu%Gh|EBQrOcEquIUEQgNhk802b2h*I!IuY8G*tcxqV z(JNx|n;n^6yooSXM3}ow1QueBmC;wkk{07x^!H?za#WYOOBj=^(KL9KC1PEI3J(1Z z;2IYNu)}NOnA3F$*4dp($387aZfF!n%xR;W>-KZThnKUfX{%YH?+tEi^aUy}U&U+H z71$PCLzm7y=cKMgvZ=vK8B7Ye+GSmIRW}w<7V}Y%A5MFf6xiBr3GAfhOm5)SP6>UiPH0^4<~b#rr7n;1XN(w2JWXg9nrso)g?%>cQ07M2xxH zF~R!~;hU5ZIIS$g(GF9%Z1^Oe{Cy(ZFf*BLl-FjGuN~>`@fk>A3FxX)fXsnV?q0(@ z)-g4fJvsH4Yp%|tTIHpDj#>*{&zIz*T+PL1uV5W*tJt;Q7dhSA*Qj#rYSi^E$C{1< z|7+aj(zy;~uqm(BRcqYFLcW0H+u%Dl~ zUQsj?>MUY5)f>3KO43O2i-*n9Iqe&Kk7fj}Jd~u2sZuBUMWwr~MREmgQ2)AH<=g$hS`i_IP zK?p`zZm%BULj~#oS$hd8)tT=uqjXCI#OBdsy)hUVQrCYd{w3rRJ6UC;U{lKmJ zRY7e!%kfe^0`-?}(!?&tT{m9Ev{$TT9|qTQ=Vm-5$$&`S*9=1F@gh?G_KWM`V%V4m z^O;`rS}wC)j^~XD*w{7`FBf}J;Y>}YE|bh0hEHb3`+LPRVS<2XDOme#Bqko+BwSu< z!%`lI*yhKcOxE~W!<*&q*#A|81Q&Y*tePSIylXhCw-&MOzlSjMd#%D}MiXJTDH&BZ zI=EkG!IzOv!N!AjBS?J+pR+=<{{oVaoTzY;ec(sP%Z-KHuBZad&?U~ds z5fdGCV~O1^J~>%lh`TOAP>VIX-e-!JURTnzr1bkc#V+qmEHK`ebhBx{U$!WHwo51o%!;!8sq z{;6^#-hZ1@2#R3xWy@Hu~-Z>wkG3*_>#LNG_yoF@XQCQvcG(Me4JSyuXW!?J2RP{ zk3Mtv``}WZ2;0|M;+4{Iu|>>Sc6DqDoAXkiwJ+2pi>#?ws+R<>i)zSppUMfp&tV3d z@l4HN0K0zVKQh?12o0{$IBncPiYs<=gOirA-J%G#-s=um6VJ=hk}$M&@#ouhlAPW> z<|chu!)!}~nCg=4+`jyu6tN@*TQ)4f2kCfvcS@E$kWOH?_swE;2Z`J3tpVTWB)n3c z0{_YZwAR^}$u*=fkH%3fJS0cFuGAVXS9vYl$orRr#``>sc4OhcMXbl(f&Kj<6iQVC z*<(al_-`oI4|yo=pE!Xzo=Rr7h7V>D<1P4EyXhz!n~01)B^>**m^(OUF0<>8Wfz0` zxyX=A(zz6f{O4!y!9#1tuC)5e<69nax|v z{-!Nq!c#fi=?y)!IX@PfuJh0syqtQ|6`4V20y8$6!7kmk<*t0x#-I3PSolrE@ab=a z1$9H1vzdrJyfd5~T;nfJ@3O3D|2ZkEVi1F89cM7H~g?{Y(1Iof}+4 zzn(9})5u8tefyNgZaB_0j9Q+Ymg8dPUh=W~z{S=_u^qvS z+3o%<+|8qs;A-Nac;6obPt2mS&zG&sfUS!QsB6L9P&pn;Q=QL z_C8L;`fm!EuB5tn&uAyK_KWZ{l;`!m;a!+*D7{C+~LrFiT$V%rw-cT(1z&5gJ&JRdq9&To?Oa;7d?|C(@Y z`82jZEs+@-s<1KD^JsKM0FF+N!~B&JD88G<>6r4q?wBZcHS#@|KJx(8v@D0i($zS; z^g4MjxyU(RTg7J9tYBj+s=2@eFDUtIB$}M~`aOZ)Ww8Fs8G6Su*?05U^Os?q{YnL( zIsx4;XQ0x^ndV*5Vz0(0vk^lku?~qZ;-I6ZU^09TuN?u~kE?`RdTiPCw<7jprw3dA zBFXE`F%K+%BSPq7Td2HQEgpJx1nZU(vA9%I=A8IdnB+DI<3}f>u1X7bEzVqZ_YC%+ zGJ&n1s=%JjT}4a#=7F=1h1bt6nzcEf^B=yHMYl$>vS}~4{V%FXAYOq)ZWY`VFOovT zbuKV`H5=E=Yu&*Ixa(KmQ|XK-C=3e3*}^oc-6X+$7RE7$v;fxVGLK92P{E>{Lpx;VyC1snM)GFNOWe0+l_bq(Dz)+Kb&L#K z`7?!{jai89-_bbN_l?MY7q@6iFjERy!^(^va{K1;b48hJ|FOfwx251 z$Qv^6H7V?2@)%b5WS>}PC(q;#h){CC8+8h2eBNccumWDk-?MXKo4N-Io!<&^FIt3E zDi)X}JSXle7{^wqrm!c=^_cx3eY#uhhpeVVRC}vpa`3=5b}DPy%Snu1&PF|3%_3Wxxt1&ZZp?&L==EL+nPL3SoZuPvFDa6x-&)Fi zpA~Wuw|~>`))=Jinvd_gk+giAJSz!F;I-ULHoVe<*PEJ<(oV+bACvI0s!O<|$b{W` zpTf@Nk7Vm>){7_f*&zH0?`NcW;-<-7?;Ype*`W>*oBPU+)vTN=EPp>7<4r`koj(Mb zQ{IXc&Mbb*Ikc^1_9p|`QJYLI z@N_@@RfvP-xVf0Ub}>l}R%WNBCbBaFr?aZ-=A6ge!B}=C898K2G=ra9pEZxK=#xxwS_)U*5Fdoj<;BKBpoH9HiPE6jf~3S&>Dz*2>u)iRgn zZkSGC)2=45YE2DxD+uIudKTpS<1t{DEOv?GxxgI@SW7?*lb`>STUffC3TuN<`(zD1 z9ezymk56)IWI~xcpL^s#zk3e^3_A|j_oxGvq26&CP$F2{#i z%D+1+v&|X5d99nq&k1P!?K*p<*N2Tr=kFxX?q+N~DZG7lEbe=x;B34;GG&`enO8_B5^!42p3mvqpGM*?i|nVd?zkoe$!*QLqp}@VUhsF zkXhLB-G`iuHCVu_BsT2AWTxIB$?2pSW6;!n3=pVbhtRQzDz5 zr^Mu5FCk@~-A#HOi%EO>D0pEOH%(#*TiLLdwT^tlRo^;9V|RyO`k8Pn)_d)MfY4b{a7%U{znnxR1#6kvpYLo z8?qmx!y40*S-Ipyc7tbkU8+Md+D?S5jc1=W}vQX>$|} z=h<%qa{QUW1{E^$j?}*7ySOQ{M|!h z;hQlmdQl2<V$Y^_hta|Qrbvjp4~~u^6ZYE zou5)u#+@}>#u|BccfIv4=X9-sW{eKQY~N7q;Mv`>;g7iPU2B++Y%nwH-@)~Fenou={Zu4!l#jvabOOU71m z57k513ZC657hU5VoGwxF-&OdPxB~ZhcGny6lB*lEmL2=Ogyqi6I3FaKm4dt;aT!5@ynF>x;B3d=UJD-Qc;=6DvmRW2yIjx{{Vhermbo zDt(*Z%h5&BD-TQpzX!EL6MHqTP~G?}>Y7+Y4{toDz1hYXd(aCF<6U^hrh?M1XXw#N zeuuVW7ajBJAkR7GFg_>1j8X^WILqR}fEs%KncpYr+DkUdJv2Je8ruAvS~_f?71c-g zg7#A$zjya)(NVIKkio*=_E`N)fFySd_-y+^-o~X=tCdcLI`uSrzB2ZTouT{M3wO4d zKvTPwOhy+|%$ZCoSLXNb_bK>6G$aO7+r%wzDVh zJmUY8sezx?jg)sGlRVstDdXo$ni+3`*ZkgHXQMNItm5aMUFvDAUOIhLE+tXa7m6Qd z!Oyb_ut>rI?t^47GmiiEcT>r1>VA4MwU10)ZQ#zI?dxW1EO6|hWR-pNli#~*`&dnR zCbGycaKNYY0>lKFW8#7iYVX@cg~92x_ud&wj8;MQaDMjhh!^IR8pHJKbDAGeM7qPX z=xgU?I+&`7AD-^m``!a{Cg~!_>^4o*%cV;xd9+0DKHYZJM}CSYT*KV3STqP(<1f;V z(V4_7w^6U*YudBO6f4yQkcTtEXDFdD`83(z<>%ks$|(5QH|qXl@jsHzI;_g-2|)!>Y{6D648%soL`6mH`qp>-{&p^%bI$wj zz4p4-{cL86cQtVt+_7`!kDjJ>j5-_^!EsVFbWJm2#MsEt>li~X8m{b9;u@NY8Ohwz3&v4t; zo;@auOXJ|nf6X-c(`7F{4h_S>tZYp2`H1ArE^IPrM}1vS>c}}XrsWP)%PeyBe>?D4 zM@44JJvl3m$m;FNaLabQKiY$BB9u5Yxd0swY(~nrB1C()VA5Mp4)`Ur%qyMw z+qDUkl_gUoPduY>MqnCt-ZsfJb5jxo1KMQy-X7ea3ZEFb)$3$6{!0 zHSBZSvOqb67A8TQBXx^z?=x^}9Run@%TsV!-&kXtcdmyWfL)iauTWXKFip&kM7^WMCACIny3uVS_{X(d+OZ;OS zL%x`J3In1d5fh&Ty-N=-zN&b zJ?6!ITU#>Y_D*DrcV~Gk4>#KVLf|TQTF5N&A06D7-TNEN*KNb^FXDnmmf&ct8n<8d z=I4Df)2idhoQ1D2J8CNeH-=+q*kR0kuEXjD{w&xM%Dhh2G}6D1pdX2-UJ`}4^0SC< zGUAu9K|FIRnEv&q{A1`9RMv=_b}0^}?{6S5)tu|oLU_7kAPe*jXu078N)011Y;!W6 zetC=$M{GISUh*x}{HXjyi#M;A!*qWbd`dH+p7tJAe>ijASMepyy%@H<1vQ2iLrJ_l zS49DusKlkd9-`Gt*3N2IareH!EhHBeR$<7o-vh%^bvjJ*;ju&oj|4eT8!xeSNgAd; z4o9DUN73(>F4da+nK@k6vi?@AXuJb+%S3$F6h}AzJT852!^bCsm^&+&e?Kx|dVtIu zg|hQFOBP?fg&y4#Fg!OJP8k;vsbaz++hC3}3g(%6#&jET0ed5&ac)8a)?44kpX)98 zO}x9hp#h9|rANOz$M7O30_P)Bv1!|1WnPvYH;cB>$=Vxbj-uMAW4`gBe+Autr zxDR( zY&omoG0sm(#uJA~{H!{G@4*JV*e{S@W5rcnWKRF>H!!a>4rdx;uz%|n1ZuQpU3D;5 z_77s4i$=6xbQYT?M9Fiz1x};yqjJ18^)o^_ahyNZD|9%n=V6pDlT4MetvGk~74~~L zvgaY0@jfhW^$a!E^)Eq?cz0!cx1np+H(VX)#&}sPlqR^-vFW--_>Y{MbXhJF9XVnyq{YqXkJQ&yp;l zqEm3~VaPd00~y^tglCtVF?h>0%={25^INg-*mf0f)Xdn+E`+yU2hyR^kO2{=QF$*C zdsLHPw7w4deQdZwyt}y%{n#_F6&Hmc#1)rtthkmAdzD6%|90eq8U@>0d9$gPDzBf} zh4R|Xc%_+#JM}-XM9-c7$r|rj?8aW(zT;Z&?NEQV8C}zM(?Z zmDjKiPDk?qS>x9n!pIIfyyPUc?*35ry=2XK10_dj;}(=VMB#Yc8Q2Xl;(Ny+YKwO_ zdR1HYkEljrhgdidkHhtn>o})k&eYi<)L$FO_oW65D>wcUX}H3Qr54qq-iaq=4ludVQ6TlbdyuhUNS7Vpk~az3s*{}zA4 zom0dUn)1$7GHt%f{*a40@$Tk$l;XIWI(_t|)-6=9>Y#&s?+ut}u@wW0B!j8*5p)`; z%ME${v|Sxak4IMA<9iPqaue~aBnm%QpToIP;$(ycaf*0%&a1?`+jtocF)=t~8IPqW zYEb&!oYeTP;JdMENx5?1HSC6%(cC-jo(6-u_3+HIj-mzR<`7o?^ z&4TL6CM-yG<`ePmmehK(QnLk>P8H#!_*-i)7NB{b5^EQEu#1KGl0L2sv-*tLWjVMg z-ksv~9vpVmV2gYoF59S}!y|ifkYC`fW*Tm5M#!w+F)W!bd1`h6oZUH;r`}uAz~Byw z&x&`}EgGHXSE1!(V}>^fpXn9M;N>RVzUdKWF5v19+y^2~JdWk!EZeps9 z_h6oVE3VK&V+Kc8q3W+_>^z)+cOPzJ$P<}074Po8O8}oO(_^~BF$~p4CAtMF#6XgymoYDg?T%sukoPi86{>vEWld%-G1*X zLe5teI$rkVJ>_=XvE7+k=}jn9WZ|87cLR0G5j8=J{m%K)BSdn$Z0+d$y&glpC8J+m z1jg++4#zF6d8<_*&mRuqB25dA z4@BWdd?Id}-9zJb$x2xf%KvuyGiQn}{RfI)S`v<<`dg8{r~!9(I8dcPvID|kd^a7 zxL@C#^J}idetaCx`^O?_Q8m2hwB@Mx!Bn^fQFFWzd*+`(Zx6{iSiA+Z@Bp1Et+`;I z)Vf~&yxUKQrtyamF*F=j?&*lz{Tgl8IP#ZxcRyEpQ~84`CzkF;EAj54u^k%gzGKoh zH_nrFFxbMK_CJ0|y^x18@$Np9?85V4Rn9W;<{#qSE!J>ivuz`eJWEHEV>oWlJBT%@ zt!RAPk5=N{T^ek|%ei$(*Gs~znn+BYb{chi4XN@xkm-&gRBdC%dFQTTSwSpJU&f;I znro<>X-1DuA$)c`kiLTq`K96%YV#v8VSExACq4uVB`dBWl;d{zab0jL>fJhkn_A(h z_$vdo4UNe8+etE(#Eq`=;@WF1xx#rD4vKfDpOlB{vp+F*ggcMO{&jPl8`mHBhUI6s z;cnAr#C0fv+YmKY?e*sC!wO~vI&#VLS5P{;6;_MGG1liW`jqLg+i-t=%MNAsOl#(? zypLT@Tf`BK!pr_=@v_oL<}0MfbR(DxjoUJ_@(Nmi68|_a4w}(7Fnx+S>%_a8H87BN zJq-B$!3o5$mw!3oU!Y`5olnXNx$(p-bilW8hR`-#d zfUOFej&|Uv-7m3WcN#8NhvR+o5&T=F%YL{0d17)Xo!3}Np2A(IcTYsm*HPHq-0KWvb=CYQ@v3P$3-uF+z z;fg2HPqgER9t!T=;>&C$E#@!YhZF0=WZ%z1;N%aO_|lm>)Z1}Hq9+@oRjB4(gulhR z^B5zwuCw^al^*mrl{3l{7cTJsgcSifnB7Hs1~bdhV~Yj@>V2rQRKeHl?AfsNIr?R# zBF8TR(<_hROPwAA`v-7=YbfVLTQa)CZ4}N-fa2D5Ez6ZSZM z0Urya5u-?e)!|z>GQg6b#k(_@6Tqu>`b_ev!1`Vh*ghmxqCcJ?t*<>F%vP{S&6h*! zHMsm*8OHRLGyAh_oXh`+h}|w+B6a%62B~#tDKkk1AKFQ+TT;CPH+rgY!KC#*+%MB)hXuae^tXbp`F0%ELwdI1`l$kaoKw~oyf$vyk!5@3?0CnU2e+%SD5C_AW$sUV(Kbx<_=Z3qH#SQ@qt_-m9}oD2 zt*`R%hj@1*&hJE4L`%9IkTw3Pf(ti0aqz;o_%l)R)V_w{^UMR7_ez_uHv6&nr%;+b zvf=Yr4>4+65;QhPLZjCyTr)DHRzV=Y4Gdwy4l{=Sz6KxTI7F|A#jSQ%QC8cQ0}Vph zT`7pKKN_<9&1p>gC|PVbw_uc29Tr*J@b&pne){6a*axln_5MMGhlFEXaylxL-r!P= zBQ>we_u9dm`Ri4szT1sc_cr7B`R#Z-`v>wG+_+8F_%2u7*g^jXoMX3RretPL{ChXL z?^NZ7!QRZhs9@s(N5+J{LD8Iaoa`2kZqA3`qpHIwU4O=(kX*?xlH+mm0eY5i!CLJo z-0(SrKszIjY8}KL>LEP0vn~5HSEGJZEEIupSk^$W#{;l`69)TZ8K}Gd7S-FG*#D*YlIdQ2J)tFM zZQh9^XEr0uF&~%8e!(W&oqx*j*3;OHw=RCgS*eioB{S3EQ3-HK`WYX*nX_BLk|z#S zQ+kCz2W&-lYB-9YAI2n0UFt;p^ITXcCkc~%mvIlPs}teAB?>n!&fzc5HjG{r#Kn7q z*=Cn1w>MtK&D}9@{1gZKAvMz1Fz4zW(uZ;hWQSX=+1~90hJ2T-f}@fJyPzIDUfI&i zLqYEbU;bIB#g7ZhQCk=Wo7b6WXx#+edCrXcDLpWm4cgJA1s7=*capRvAKG8>(wR$j6P6JM*deYj*GM2WZ2%7LHHy?|%OG-Q7chyL!P zX!h6R6m6;H`h+ss%8H9u-NBEh1ZcQMi3M0!RMP7hfWPZ*@bNM1VN^gcSEvLtI?tGQKU87Hv}l}Txn zV|K0w&6Jes;82LR(vN>Sr3hseD%@P`$=}kie{ji}AsQdBY)KaGM@n|^(S11TtVM%e z@;r1@(B>aI4jKOhr+reOR~3Oi{~Slw@YdY@Ie>Ll!V7{exbx4OxZE!ulPAYWF56|C zkZIfzk-_Yf8^kvmZKT(94%#=OBzqzef9BmqRK68|&z5@Zlw^)=)#cFgBUo@eTxzm3 zylT~eqiZEYJ6l1WKpzJ8Q)hORWM=A14p6>$`!$kPRpH83!bv_ixzp*$Z#+AakI|Bu zx%NphTpqRHn}58xvPtr6S~zpY`FA+hJrj$B-DOPQj~!jK*(+4etita8oo&k=fse5& zFd1tZi6@>X@$U=+Zk-&+{cB}CpgF68Z@@Yt4vhw}IILcc?H${))#G6P7It?h!H9qA zpG94_D1`OiBKgJlu`-L**A)s{qL!$DzpZzu19qlzl|(Tk0D71RrK z;@k*fca|A&7j{=*a}b?Jwc@x+KUO>ndZ*E4xgjTB59EAd zca~kvSnGNXd#=Qy>`W}W>tDk$m@!k>UDT~W&fa9m?art1q(btQf|5}BwoZD_Hf&!P z%HWfJ((i6Xzo3J7Y${nex*7OmO(W{voOtk#^y7bek@~9aGG`ZVT$3!*-g&q*{3k{S zy3<2G+m7{aoEq>QYt**G<@IJ*eb|jnc4}N1FS*FV?o`J+GW_q?`1E}%eoqL;gL{WC zVX+Peb@As&VRzp%t$DTnKB@+9!Q0MJ=>FvlE^IWSR=Xe?3A=04y)89ttFhiZ76;eH z;hW_Rd<->bo7ExwGD9*pmKyNSZYN>dITABnlVN7|7(*u5@~E)8p!SkK^0ziGuGx>X z)nQN#$wbJ}cNqQEiOYrEH4pP*&$lhO<8?9G2)m0e%13qPZ?tc4r>8uVhUZ<`?fna`YR!`#Uzru3 zRo=y@g_0vw9fdQ`&f!O_WM*arF*!Dvow}Ja&hj#nX2(FehpdA>HxcM(!MSHc$e#gJ zn%bJPQrvQ|tnD8#$>N;FCHki6%1jMZ@E*Il2mbWRSshlXLUO&Pw7k<84K zKK#5|7*Lr#RqLO_?^-G}gxw98b_^G`>G5j&0FDxN=XuhSt~s~SE-eA}lZDUSt-{e1 zV>bQ_;?Iy^9_nPmEbEJSdO>DP3=^RJ$1O~8w&Y%6cjGooKVy(S4HGJ$GCBf_oKxYk z^cki)*)w9Yf|qT58EvY`26xF38WM)bIoasl{v+Oox$v&AyLDR9wPP z)<DHPskI)?7qWh6%lg2D7uUyX1Dp9PeC( z=|0gI`yv6~$KR2haVtI$b~mPV0M~WW zLRfU$oF?;XaPD^;{EK6-x<+z@vQ7DSNiZiZ58_9^Hq;0dc9$N7<);(Db@wn#!%f~ zrQ^odQZv0Pbf;bQFQ|IvsTS?TKe#b4;s;t^ z-Hv~S-8KB!g_y;vvi-HPgx$4$(w2V>yow3yV-aW|wfxg-7@c88#XlkJu``hI zW`;~3cM3t#kqA#tLQVBU_}{bP17UZoVk8&&t2T%KcK|WpgaK{Hz{sI*F=Dk7mk7H{ zJ?zD!sVzC+-cBfl-L1Kkhci8XVeV>o+DboTN~jxm|M3kzE4Lx^tK=q!m!K#~jb+!o zIb@&k=vIzQTlxywo3`Sju)FN|!x;Edhi~Tlb9rJYH9K1KQp@|;@huTI7Du6?@+`(T z8&Q2i5CeqW{d!``ygpYDcOwQ#SL4v<;|(lLHm7QO2wMuf)7CcNgS97QmO2vuMkS-{ z$75(7x8(t0cdym`=<+~|FRCR^YoF8=WtmV-k_?vK&P)_`cg@6$aZ6jUba*jN$=NZb zeF0u~Q{qlP52nf4GGw_c@2GykRNq{vTZiF{%^v(HSEu`QA8I8k*eTF~zdOCe*Li8s zs}IMap+^y@q(`^+vVRSe9NE9E7+HS@>E@EFs1c1lrRS0QvkmW_mj0QrJDYkFPWkH+ ze!0h>eM&rDYf4`?+JfcvAxyg*z*#bzn_XCmU&8JxBT~@p{uEPk?6~^>+1+wqo;$9| zQ;z$fzcfr--7HMG`2q9VxNy6$yKl2S`F*g8xZ^u8PR{&gd4+gAS((otNd}LxJF(@mN=zb>@CNZ?53i6kGN$ zevHEL$=K%@iR(8`;AW@+)r8$GjtODX0(1VEdIL5kvQE5-!N~k8_@LRATdoE3@;^ar zx@5%f)6XJtd=#o}x8UKp`!FA8O+}{6o{aTp=Yu-z-~BN9Ef0tD!L2B|@Ct2x9l7F= z%=8@brecyB?f))8_y_5?m&=;4^&55#bz_RG6&=U8)8WZav@OWP9$|NLjdo$=MOj;F zy=eMKSjBKBUQiRCb3g|C)x(i`;sB1gw&LwvKem4r%J985{Jciy)Mh6kF*_2)`%Xcr zry=(o3FPOl!myT^aZ30#xHd_iT4t=|GhRiWh8f@3htTeIAXQHo(sS);+_@Vm_aP*q zc2gbZ^tNG4jr`qpvX5l7;=D}_s#TRKturqqkp-c;(YO2<>X5G3rb zTPx}7*8M<7LwEiwYy8jxHx9}A4*$N}ktyu%Yx-_He67mw0Uoa1f?UTa+}(Nx%>#|75Oz1>qvY$ZXv>^` ztFfScEUpiYL&@RmSlQB?&T~Wf&%c4JDKp@vBTV+U<#lxhXAJdY z=mu?;U)_)Ygx%>Y3HR;%4guGl7$NNL<4P|&IJV?t?8GNw329UFanAlX?woVyXkiVj zU%S$9%G!tVBaxH84;Gs^bm;E<2xa-G|Q zYIIVsRUUTJc3t@K-okD5-&XTv<+=1d`0tR%C zM#{=6T%Kag$1mm1nV!L%v($tht1jZi%V=0XkH^>gT6Fke!M}dX-kcdg+gN>mZK%MZ zg%ODTl!6)h&+z$^aHlZ}p6}?(qkn1Ac-daKN@nJR$&#m5*$m%FE}SLou9LUaw2zf} zv)BLFU7z+cr&goPRxXmIDSxg#)rEeyo8h)F8x3Qn)}68!={=>Nq3~tRSn200*;Ci$ z8HRmJLA$vT7+hO{sP+0Bk}8?TN}*i)(}LmOYH{OjJbpfo#*IZ6v3iyXRfXNvz6s*p z`O?o=SB1*n(eON!0OQ}c@#Co_+jW$EUD#dC3O(AGACvP@1Qsi$p;gxxn19`#!5id! zw9AKQyfrxC_8#e%he5AE?qE^*j0RU%Rtvkk`LBn}zADk}K>>>7cZ)5ReE82QOu6dG zWAeMM&vvFwN)xKvXTd)$4Ad{j%1K)6c3%9iV0q?kq-XuP9vNSf(e|O-L9+8Wl#^RC zT_=!#3A;O^E_Hl;4aywi@o-}d4llfn*tw=u&j@B^WDqN+OJ-*5Iegq7g|&%^h_SjS zwZ4^TaG@+H_UDmFy0rM`2u|(}M~=Z(EML}u{>2Vd2)oNS@!<`9b@?7j;V0~F$~(!5 z>-rUyFJ0**@6)7ece=IzjU`L+aY){`Qm>u(VcL?Wi@i9nQJ(3WPAnMq4n?gpu|?S3 ztcv}xTdU1I{rz}HQ*vTE*z)c9M{xU{1e?K;m>+f$vkDA&bcOW(gx$5!Gv~dl*Ku=f z9DW7F!gg6TUe9gI*TU{Lx&|?5tjxdWp1~;hDA+9B0?*D5aOb$S+%qISYfpbR_0geD z%pv?35)K2;bSx};jj(?m>3v2anPcA6daugeyLRLDbD1^nxEVU14KQT!kyH4R7YX%=Nhq885Z8CuaILVrrG4s6BNH3u+2*xm8I8It|j2<`q( zG?m#A(+4ukRo#*|9Cjf=*xl@uJe;`j6B#4jxnK6L=&^2$+xrb(r?R-X&^j7Q@b~nfWFoO2!&~b!6lZ4%=&9LUHrT1awDDxR3qtJEm zSsXrL#JD~|thye|{YGtRcJvA!HOJsObtm^@R{G*!z58pk3<8Cr% zJv13F5*}kxj4iEX-!HcH<0Ug~cC+1&s*_>ZwJa0b9`A9{#+l6@rSIF$i;H%*;E|kS zOb~Xb{VgBQ)s>ji+Jo<;?g-xI%H+9UP`o%7W27$0xlxMDrRs9$h!1-TyUQHuz;#70 zv3+M6=3SLF{>u?ex~NO%JN_IZ>~6y-2DfL%V?^FfoF8k!cf#(pgx#41v}RZ3JP37&i(sluf$MfgMN!Gdv8>$)m4{9|*g9>>xR`dM2zndI66MqVc<30?r?k zS>!>MoN5{>8Gq6nw$bNI&k9uk8G(7jQZeDPg|+8i&W*#qDLCcy&`w;3A=NB znT_(Ik9b_-!U$n^UN5EAogqEy@@Mu?nxzY~8$ZD0V-~nX zX3!1y;nF@$1}*euM`3rl+wHiy%Tr_|rO0ec1Rf_=V&O-9R-6xD^0N>=O|jrqn_AHd zCCAh;2F<~jptxni0n>sBVRw5pBtPoRd4%XiW4b{i_TIgN+dZs!UfA8BPyT$Ot;fM* zj>1sb-HOR+@N;_!TMq|@3%gq}*N56C)%n|Zk35HA*y)jrh&P|%FyECu<$PtW@Sx?N zN-XSEfL?O`D(zK_$us3Hq&8k0^G(6p0nYT;{T>FDnYbbB?$WVxD6VPIL`Cvph28yg z%9ajB^-xPmMwnhCZp}IYGj#(FY!@hVF(G`NW6l)q8ce?thkq``;8VyI1l%!Yp|HE9 z6N9+rx8&8Y!t5!=6>4)E9P_f3Fok+&Ks(VRyq)(~+C@2JLDc#rcr?N!oj}!walrt`hn|F+mSBp&UC|P>{2}aaV`3oxd1k=Xs;4k_ek8`7O@c?SM^fk8a)z+G{qy{| z=Y%#N+8sdN>oB-gX5dWiTLkAjap_Cx)ok@*^SGA0xM3%@3cGvjoR11|Er;j0vy1$0 zzl_}Y{M=WpX}=9mh27QHm%#su8cUnKIYZc;qTYedKN>LhpRH)0CVNQ3Vf(=D+Vh+$Km+!8g#s3 z&Y!~WhB^mw?9JBv?Q{YW-(-g2STZgzuE$l$T3I3NuH8#t4$s%3#=LUL=MFV5y_<5-*;)A5 z@;w;VsLoCiJ{%OKU{7u7)t!EU{z#LIk8t?zIf`3>dhF0DfXjv5eYCLR*=2XI_I(2M z+$00upbF=MjoIy15F1AXv#!vDpBgUWg}T&JmGS5~w-&ojTkx^4J2hcBJ1 zZI*f6V<|GD@Dyt=*>SnByNuz!tO(QO`g42HL)hJ72e}JrU^8mkx$wTQJBz-a)Ur|G z)tDWqm%eV{=R!QmRA#ppo*XCaE-2oGxm`a3CE3_7?5>a5UPQLix%;UXIH_$(B!;pybNm!tUngc+mEj5_i}Z!a@4+^QRY~__zvp@APE7 z^y~k<`2Wt$2aNpxtgWapR95c8PFHyjcKh-dC7ZaP9m7UHfrfXA+-nno^#hM%<;d1t z_$7c7gx#G9w4ip6o0!u#9^q4BpsFE#-A<@KXGxABJ^> zxp;r;3-ph>(nL5(#d~)K9Qcjphw`yR*qzn$V%XNVV5b}Rb!d=2wq|MTuzrFwIlmm2H8?8ZD{cOAvM)3x}H>XqVRNUf{aLGBF_ z@2=1AJUkS3mpW}1EL*B_uy}VT?kc$5U(VN?gx#5EphVbRq5VO4j%md|kIS8$!tU;g zclS!|A?i9Nq2)2T>%#D~cuIz}xDm)#fgzmO*^J$su3?UNcg<&I9@+337I!qGvwsL{ z#Jfw`V8}rZr=cS3ZgEHw7JjHh$!fVvRM=gg6Mi(G)`~vj-Hi}-_d-7dUh5js*TacV z?6^tM2$fCbqL-m{7jV0b)L){_#wN!_}UH$p9NO-n*cSaBIBV>^Dkh(-6>-!m` zZ@9HqLR%F8V_&c2X_CN2gW~dkcek0FVgWUTm&)nCY z`8b{X8}nbuT%kOZXU@8^zj$}<`*QJC*j?qOQaE%~=MFC)&KGufIov^J%^KwXhcv{C zcc;ibf?F}V{4MP6UtxC%;@$Ziy^Fi^6VZ4z3T_SOP$k}7aApvdh25?0Vk+MGWz3ru zgHeCPqieuTJn^^SYhiaw#k;eg+?sBaj-%{C1hkw|a9zB+9e>zyiLg715?_}2Yw@&r zcVl9NlT6Q&eW(euPdoE3Sqsw3Jo)f~3j5D0lI&GEe;E~`;e*^6ywyYg&32@^D>aKg z$r_r2q|h+A^Pmi`$7yhfcz64S-HqF0Pmj9i7*(B${gx3}GV>UA=IJrFgVd=3Qg?`V zw=Cl}PNyWmXG%17J*dJ4@$O=O1#z^nyAPCphQ&o(7VqwxQ3A?(-9l?uOFFlb?`2Z} z2Mo~XvzQ9}IVu93TvMSe-rZ|wdsa^tx7fy)>BgGWbKQ&0!tPpa&lcbDBYH%*@V&6R z!J3{l+o8;*-wJU<*7yqX?ryYEp=nP~ew97odS@3t5brMBI~zl0grV2zy(n9w$w=|; zUJsGpXpJ2Wi=X1d*%T;kh`>fvqU4Z1?e_+-yRf^y;@!EetHqbI@z|jz_aEQBhy&u? zT@ZGcepmW6p^}4dUxo4F-Q9hefSr@?z{=c;y?RT3P`tY=>T$%0qtFm`R~e8dF3=04 zw{VcYvw{;N<=Oe7&T8@QF1UoD`dJQc{`(o7`pRs$?7bSpWsTpY#HgqOjF!E5^S2_b z3U0ys?>$)~dw0|_nNxFXg3{Ma_~y$qIjbB)(`6P}ygLIgaTc3x#hx z@d=!~DEHk-{^7cvA^aiU-I7@~nEfjbBX`C?>GoxG%QfXwVRwg?1<}#B4GVqG!Bf1u zmS+?3dE-5N(X{5!O`#l_;7tovgtY{k3Veb<5gcPV)Pqc;cMR->hO zcaFmD4*P7wvQu9%Q1^e{U3q~!eJ=h&mRml42)pxGx)Y~|v}F7iFK(BbYuPR*eiHAl zs44@F4Pn@9eE`P}Xp0x@M`vMo$MnUg?fVFyt|UQwb|f}6pTyaF1}saEdydA3NLIKR zlSf{MdxtpuH6j*s9#q3byt_xj?)J8kJFJ|HIKJB%xVMhN!eY7auJ{4MK3cP1Whk|T z-6^%!;kVF3awZK&_=I#>Ya}1L#F1-+-6@87bK*hq=*I0vg0Q<&Yq#UR`VYA?;(y-V zq$W3Z+Vlg5E^o&!VRxzG-ECZ|%D`Z6PLofw~h2S#k;Hg`zl=5#bSk_%ox7B2IDL< zRtUR`ESCFSObz+Z=u;Rb-koo164Guw#OeDs3~ZM7O}x9mKWocf5(i))?C#p840I9i z?*DVfvxMEPIpoDN$t}72_D+}xyL)^u4+nbv!p=4S^X{xe+<2_(H#}Rm4GVk>`qJDk%z^*+qYpWCT4pTdF&Ve=Ocz=shidy;P2Wgxw9=mx*_&?@{uXGs}hDl^T07WpN9dig#Bc zXGbj-;AsyfKKB2gclT1Vb{dqwz{@)qe+#>d74Pod0d@M$@Zn`So1XP|po4gK7IV|E zPuSh95l7KhGIy25yK5R6%HI8~c)RWnY|IkTtQjpcRp&A3cN?xfBcA8XV2=2I@2=-1 z)VakVGc_L3I&v41cz0e;WKQi;0HZ(4nLn=*9faMjjgq^ay`Q2n*N*3f-BG-|+6qlR zvXeSa&g}XPS?Dj`-DP7JRtmejHp`PLgH(iB@4yH-^ZOPS;^|an8rOU9lk^0t_PbE0 z@*`4TWuxm~a<^t>8T!|1aD$RBXGl+>euzCgh|8i;qd-z=0( zqb&LS#4SwiB6pgKcc)T(0Tya9!yvs3TVZ!s#k=d@=K|uxqLDl;0d-Ed(NVm+3SoC0 zhRB?Cz09dyKZZBr-I*q)B6!?$9Q)Uv%l@C;iFem0M}z*W<(^excR!qR;4R*r;XM}~ z6L$CFga_>dlw~HX5Ejy_`O~}zhr(6ZDaw=mh233xFL%7G{(yjzEcE?P*q?ZJh0U5g z9OKJx(hHil%#OXpyE`%}1t*2w?f6+KGr@9Z5_UIU*xiC@7M!=^CcZ8Zo+{p5QNblZ z%an(O-IeYQ;$HFY)(kn1)lK3bcTPlB*Sjzk?=ECwDAzsk=Z4F=w9+_=l=cXYt z=_Ni7cc6Kif|JF&JMy18Th^CixAe#iX5=DByt}38uG}L%v`+@ovo?@fN%8L93cKr` zSd5N4TX1xs7xRVPeRFnZ_m1!Jdqt+q!G%Fbyt`_BZCqu;|fZ(+tOKjht|UGY{k3FnsOG| z!&;41BOz!=Z-e-%^?(}{569>e*yD99ht;sG7yxfvAZ^@jR^iD?% zle$vnEmZ!=z&>GjN#flVxVPeoZGL>(Aa^_Ov7xVccPnQmVL?tLz8nz0;!i`)5bth| zu)85k%y@I-H9UP6i!0*Yt=w}JW3 zk!Pp-JEYujVyUpZ+v44M*h^oj{Z8Bwme6uWJ`Rg_XISOVfx;RBUb)hC=vP>aclTJ> zU9)c~vUSzj&A^943l)4?=D-zE4LE5eb$qeh?JVBiixIkPU*OO6!aCmEmtLQEcjMC& z@nBCBQX|h{(zrJ4CElH`u)FQcq@T9*GHN1YP%7S?-qjlD|2F4IVRtF51BLI2cX#YK zoP^!&{FID>`g$x8@9rOAcfpr^SvW;UwE)k>yUSlLYlyJBN$##JH2y4ig62TeSA3JIJxKJ_pjM#|S8P8`!Z zlfS?wl{ENjN8p8ccdh5^@uYnK%Y{)bZOzN7!A_ zViWFLb`gayqOnQ5yX@z+c;0NmDoeJ?|JdC%$efyZcfqcnd@g@p zCjVWRnr5iX&qj}Nau59Uy@-~-ue6=azm1h1-7h<~b9jb0`FB4FyIU{b-K>rJbW96i zwfz6x`fkA|;@wSal)mnhX#6RkquU%4W(&Iu5O&vko-t?3=iS#!?5o2Ga&P$^m%RavmP(-PQ1HC!tT24^x-x6o)d2FL4RR)-FL`*hT3Pe zm*3-|u)A&I-8mjp;@-OjI3&ND-QFT3ig$PPnkO^mcXiBiW^<#~A|?CxgCanz->=6KyeUX$m_M9qRe4{I>QJ|60uWp+lM zzy9-0nIY`1jj+3SQ`@j*)H!@DkAiwqB6`@}gGa5EINLIdT;$K13A$|A_Xr$?-EETR ze(0(ORPA)&Tw!-xjeS@r@5ho3rFgFqhT^7N?D|9I)Z{(#miNi?iaSF>enW3jK5okU z_P6g&d^Bsxz$IS%t5IecZ5hn~E@Sp)pIK~rkscD8gn^9ZZt zJ^v)^?qK9e^w?n__gKjN^|BuH(J_~Nkn7kyCJq5Xa_{HLY81?G%jHeMtZ@nA_|Zm8 zll5heTNL&#lX13vHT`5A>geH5&A)W05_t&Ih25#jdNp+aYkXVlC^_8<-dXNV z!?&vZR zn`9l2Yi%ZTzgO{ddn__v#bT1I_ug~O*sgO3=T*u%<{v|jK6nbI!tM$uCCS&OmNcBNh#Ck{mg?GyA^Ga>;(=Znq1wh233A%fr4a zKXGrAJNL-`)pLx@;+1^EqZ8ZETlP1#E+sG?p~e^G-rR6l!Gp37?yq|V`;%MIb4fT> z2OWl$?3XP@`ZGw_-Lz@e^jUNtF|w}~j*3F&u(Jq1C3idb4PvP5zh4a6GU1@?#~)(Q zt1u27WuLB|ZqA3o?%wndTTrdd5I8TXKY?zFIWig41e3*rh&z z?fqI)_g5v}?Tf&i!6_JZ_6f8X*zujzYo`J`DO4(6?XU3 zhvCBR-bmfqVcv7>+?ootfCwBsa|}nMKD{aIF5X3Wc7!GUgQTA!b*#gNX#C&>jHxl^ z8hx49m3lW!M{@KIiHnyXjaeNMuu$q^?ID)DE9`E;tN`j-%gmcw1uTT!bsUk3_FtbN z{U3XdmpXfAOJB~f)8NI6Whm(rhQU&g=k5N8kUhfWq)u0UA^X%cWu}xBqMOw5Cu?@# z%wHT(PMtfHwa}(piUPnXX@(?hj&8ia6`wl}r)tiumAz#M*&&F%Ie)pr`{l*oMq+=n4A)kN zW8cR(q$55@O5j&^p5n7vq=qa!8UBymX@m_0t+e5DHR1?n~ z2QiE6E}i$^p1wa&>g_Cwd{$I#c9Hh|no;v?H$uqnc3132WTc9W;eEO{+1)O>0JqP1 z$NOFylz8ucv#0=l-)YOa5N}B*yKCb8eAJT%Xt6T^=YqqKGWY^E@E*T@FXx&=0;EHZ zg&2Oji7ejt)z(GfLzf$Hs56&2Ed%5p=K@_na<{j!6iJ`LaO@7}r*`*|Wz$A>mj{YV zleZM#)sZ)xGc5HF!H|?xY)yZIy|?XT3g;2WI(SIo234ul%EwHyyZM}3lr8%Or#H@$ z#b^AP3TGLq@e6B0vvHN|u6$TNd=IILIp-if$nFe|+sPHLH#jvr74@8#40S7nil(j% zHSm$!=K`gebCu?z23Q=4$8g^6jzCD}87yy}rG~ia{x;<(%u*nnuig$egC`&t~V z62ptkTg~}j(pNVr5M}A7 znul!82fJ`i_;{Uys10+KOPn8`8sa2>Z)`%#_n8PMyKBAnFndqcnKSVebF#Z@nzqvI z+;jd;lJWT)?2EKFS@P;6X|ILo!tCc9-?9 zmsD)ilx)sp8|ToqW4{xtM{-ZClcRj)-1cLCH!-lLM<{$B9&oPf!uf8)PDNRu;wJsc z?(Ro9$}!G?jSlQYi{adXusVwMdYZgbd&%4B+`V*fC(WVHaN08ozML!X`|k|a_A_Aj zvae{8-F4^u`PaX<@xme&JuXI|RQD>1IHxWryYnZz`;}=T&9BQbnDgwzF|n|YxPyI# zR`lTnvbWP$LJsIl;>y#AY!ix8my&R#p%D&w?If4%uHllW=*`lQl>g1qgMwi>B@<~$ zUvWawiQOjLY2E2c9;_fGa)KUS19mGhe=EyVk&P0DWpR zBC$cc0=>u=Lc;y!=T1KvoNO%pzg>dv&2WUwiNmq|b-1>VyU1jB7G!rNiOd`wEQS}^ zozI|Lw*~xpYH)l2d)WvlBA!w4_UC8Af@!mI#KJO$pv1@twanbd?=v}5w+*_o>vbVMPYc284=L~@j|v~f-p{i^@+;9p z4!Ej`>(_jAB)bd$u?x+uf5K#)^Z(~tl^yuZnEx9eM)0gl{?(lv%t%dD<~1=}UB|hw zx4q<$mvuBxgRcqMo&8CePh=L3Tuq1UuICo|Ae0{fl7MGqcOfRFc(<0Fret>$$?o$0 zvXJ98Jl|epCgXAxrkV0Tljj{IyR#y@E7{ai)Z3P#4ga?$K|C)v-AA_#ZR9=K-Bj{H zkLfz%;dv6bO+(OVn1+_*h=p$4o#UVF=?`}qq@&9B`~hs@pMBwAdNL;cM!mnw|L2me z|D$8VmGcb6Y{Zb=g_B<{u~(DSIG*GAchh#fom?X4H2Ri;c2h&p^r#Sit8~SbJT#T; z?qW)78Gf%GyNAT%2D$0?=8KpYVkC1q`pM@fJP(qu{xs)jWgdm$o1@`oSB=r+u;%O8 z13#<17%erF5|m&*d2P9CA`Y~DgeBA3$|SP85#+kN`fJPMl_zlbI-g@5xu-_{>-fuF z&VL}MAK@+|UMb50a^gq)&P_g;gX1}Wko($2%=n#LdC^HGyr5&^a29;@nLj3XHt(S> zM?5?wi0p1yu&s1%@e*$Lli_nZ1pn+V#;IsMxx?Rr8QI8vJN8_(wWKMOxT-C zNQ`5KpX_c%q`wrCi^rK%V8EP66!)gH#=izp4#kLOxDBhd1QxWez)# zIq(U*``E|@vO9bJrc1oEr2Z<;x)Gc?&&fcQ@+Vxo=pZxsELe7!cl*mqQbNA}H=h+F zO}WGHg|4_%S5f4%S7Fcu(>;P?<&@0n#$CF{iW$Hxod!lM6@Z#DE9~qeHDwbv+m+N_f<}i-5u9w zcCVAZbU1Sg&WgjUu5Fa<|afB;%+$K zy|+fXisKeKCPMdM9^cJYzjLuHP+9(bWG^V+-R>*6r)K{VFTb+ene1-=g5&6&uEj2F zejnU|Yg zvdv`0A%D>&yBo^=q2BJ7;G4|v>!mmx47rCm?irmTyIUXQBb^-dWJgmG2J%cZyH5%f zUcZEGy{&8~ySvG~rSY|DGUnYuJmZ;2$!8blaK9;C-&xx6%(N>%yn@#^adoGK^!SH9n%wsCg!_7$qv^K}3qwtE0>j%A&yO2+Iw?6V9;*-^+qS}MMnIMVRw3E9FUeU#wg5P9!r8|mX_eNLt zF7c70v4K+Av9*l&eGgGhaX7ak94pGopz@6zm3xtTWOwTxnn`-k>j=0RiO$@g-1@5; zb;*{ZoE9MK&|Zdc&vL?Q=0^>~@FFS^eN-O9hWnVq$?i1B?)u)<63r{e@sN9)Gft%A zPTB{w80a9iWOtF=@61@DEE|U9A%%CxRh{?XesAWF+q=qo-Yt`sJIRmVP58up(Im3F zQTB)Nv`}3F=Fl<0yJS={3q&N)RwiF>bpEo$*9x|L)<2@nmkyMx@1?T~dA zYcfL7J(4~(pQl*NJ=zw$llNHRC5@*vILlaP z=r5XeCi0}u75uk70$SWx4s*VPXIreq!y!;+5BC-0NBUA!c^W}vcQ4|TpgXk@ow)y8 zM0Pjvxu=ZE(2y~!kD#I}pV3a4Xw~{Frq?^l4YIov+^2T(Ruq*v+*9MMrpAhn3GQ7_ zk8qPfvbzQE9c10APcS}^fusKgF4YIrERmNgD^fK0d2*(KS znGfq#N57`EWRl(G)%(a!?xQQIo`MzGU3E5ns>v^~fP3r1lKG7P$5YBSFq84%5N2>j zHfU}Z94352SsGn&oS~iJ9=x4_f_SLxp&yTXH3@kL;J*A?KX=*08C;^hgN*6;0dKii z-$-`%+vo(&7;4K|EAB9GhIc={trYEggdXD)G29^xL%F9P(#cS^4{k3@$?m)t&`S_g zjaB>Un0Ob783(RozOK2Pw!~O}}owtq0h?(^mJcV~{vb(ZzKC+NG z0-wK%U`uwVd@2PqtLO-0-rzCWU6(=+dPmgcK;MI~hs}4~rIfSN4 z-jnq$_mJXf_>A2w?gLxm{wkonKa6dfh z%yeTOD8IuW^t<9B{mB~a-m#aV`*-9sKh%ya;)L%Z?A1`0t;`p7Caait*j7@5Un0aJ z1^HwdMT3h`F;-8^_WH=FwSjVmc_r1>_fVfq_u!Fm^#1P>wlLR}OGdIH!=JxL`e-** zKxJDbwlN2Fu=*BQXd%5%@^fQOsw4AKhDGNv_Hig?e&7z?*T?9{TvfUszjN2TBxt)`$tK77w6&`8Bl%w9y~mwKwil`HL|-|9!_$Z zd9R_&eT^Wy+k1_DWcWjBNbl-dxZ5~Hp5A$oiU)3V=YbzPk93Tsr zZ@a{tTk?}L=uUR`;^Ph^I6OmZ=HU*F=a~s!QaD&sN|~4YG9eglvv;E6(r3(_#LP6= zoEP(T)(z}Q?D2o>ZY%S5RSy+qqBD6V|NDwrjxz7uXV^@qe~9ePcJ5JRbk~%9e|d=q z|M|Ut+sKqQ&!E)216j=d#WVjmU_1TrWOu%b^vE(FIQvO06yL_G%SH!J2QLztg@ zSmh=g__+?*Mc2-TkFfU4fMyDJAWV*9%S0{FyY40W{9cT-vXP>9k9nUqfbt&zHHV?K4SI`?8e zXCb}ccUUw!$yolL6fSee-}eu47Uf_)*&Tn;cxJ02gIBnVBY%$?ckD&2>w64TNXIs^ zyOE{rby%w{gP5OfP!E!qfo)~X`G-hQN`&v|Fmz(xwoRU)1g>o_eHI1CVHHbhQ(T2k z6 z+Y3=QJcK)ysrXd>8kCDk6rW?4S9*y47gZUMmXB=iC~WGUjr$QlVUXo4>-ij<#vF35 z_rG9bvKw3ZoV-zTfZjCb^ejB&0-vK_H0@=R+FKMc*KF$$f`_Y4Vs#>SCYXCp;SPn) zKW$`l;(ZKkk$}_8N0)9W#kj(j;`oC8413O{H7sOI{tcYj6~+F(C~RDP6L(iwNMpAE zDLB(!@&>h(xg{64ejkR{a}&^M!2|BywvnqZm>Xbz`x|rI#@7oli0p1x-!u$Xdxss& zd(Yzgr?HOgu3AM(mN#yO8mbxrRXp@q3Dz zd_CbI{}gea?{6pVuD-%#=F#ty-Gv7-lW|B_W-+hcgYVDl(_71;g?yhn#$y?C?J>-^ zkG)7QGTGhdYChw&%;jp%b!_+;iPU{$cOli7xY$yrMFxlq*qcUuXM5mxXbz@yel%tU&o!G z#^xN{)m9KIeOKATbH@Yn~U+^B|Ygv&p;i01Kn0x%Y;F7NbVVjiS!Yy9(5Vb*Nr9lu%D!@^Jjj`Oj77C z*w8By$@63IJgo+=XR%9`yHYD3(bwZipRM{im~oe?JADWqSDqkxiH+#?#!pb7U*R{|U1OA+{EAW%UGrQR^E{Y7jb~jqMX@{MD*Jg}d_iAB zj>A{Ha?6A%&yxo?9Dy>u4(*?LN*}Vjbz9m=4ZRQRVz`S;cK7J=X*AdC%V+MGjpDA^ z>R>DO^4x*@yjW}uiNHwuBpho^q-_g-akBT9Q2Hi*?YV;D-4UoqEV`Yzjj8lfSkOSM^%*lk85F>~3vYTXFjR2*<+{ z;m{%sL)b~(SJ_ZrbEngYyPez9EoHpQE!fvaV=lcd0Ryh%NWGaN7n;U+$q+TFd;nKnY&vBfUQ8%Djvs+}|34$@Io7nDGj8)##Yuy|#*8ndkJ& zEQ&t}PqMr0MZ4fb|BTQ>qt3H#;wBgAGNKvf^wjuq=e6&R{n!?*B8n&IisQX{8@)EW zX1qgC1bc|S@f^Rg07~@VfAC$iw*f73-G1z|W+o`5-BAHs^>r3HTi<^FtcxpP`uzB?A6;02$9 z^fblYe+RXF_TtPrQc|3|Xb)GBtsD09*}EO;?Q*c^dNVH5`{ctp)NDg%`Ca)Pr|5+m zrO3UR7YAWor6!>txr@v>)>L|=rhk5k$N_Z4?VxY;cM&R`^&~UOM|Nxv6f^zSa)dsr zTzaWYc7)@rJ@?dHjAanLRg<^_U%kgnT0g0P-l0gO(QlQ#;1+h?w3KgTcP|~=OWP|g zWHx%#29(A{)0JfUk+D-zz9V=!)OEmSY_E=_i~`?#;@oa9`W9qgeqRkxh8QjW}T+VG<_Hq+*BhIWt_Mx70-K`(@q9#{S z#?iCp&$;imNJlx`?+fg+cH%bY!b)w9Vu69Cq{>?$Pk@RNTxrp zh*_M7i^mY|#{ zd&Zz@W+eXTRp4bOGuh0H&`x^({@YJ zJ3WanoE~9;XCf`ObY^xdLD&jIsUy4dCcD$?Y$>y>tMQ25#my#B7-U!p-ugxP8NF7_ zcuk5h5>YEdU5{`i(%(3m9>=~rT1z?EokC9^$)M-4Q+**OlHJvRO2MekuW@^#o%BCP zZ&!kcoN!i?rOo+pC%+1yKk`$%pBS~?S*-XR+)BS>pV7Y&J8U;RZvJm)=l~QAROQ1j zcbQYi{FbM^1V_GwqkbB0n6U%I=_FJp>&V;SnRDE;ZTobu;B(mEx;x0ty*`Y+ql zgBgCSz1;C-ZoIn%{r5LfM_=aB@+dS}+{F8i7BY!G%~e(H<@LsvQeaieJY*R9bxA-F z{hPNpa`){)pg3J%m(*AtQEht?|B&51GfIQk>bH2}YcDVOXS?>DxkoKkF<5i}r& z1sXHB^OR2XYDOUR;-?MKIbDkHK*+R6SoDp-)($oN{dUh(IlS>6a}|HGyC8aaJH&j&!x@?Az&(LgR!2}kU+;N( zd;R$=nwsB^uJlF>EKR~)W`s)?oyMYl`tqfNuMG4Klo)z|trG6wUVJPxr$%7jr*ihM znMe)UoekMtLRVAit$r1q=oP+c8VmJ*ZnM{pUJC9%ocNEsZi5UYJopUG)0t^SKd~1* z#qXWl$w;!hS{pC9t)(f~OpoH$aCXA&-O28{FPKE%@e)46o7LRJakrwR{Mq~ey~vs| z`%q@CB(wXvi3^>XyXjB1`uj7wxa@=p_dBMaIf})hnzDkv<*_5!PkGZuULARgj`T4v z59U0j+gZ#%&9m-d=EmO#h(G)QW}FPDv?m8-+2x_&x)hiUqyDevJ>+r`sHwR zia_OSX3OW@#RXI5Z@9Ozk>2Rr9rVSr_!K7p2*D>w#(MgtW7TXWmh5hIgr{V^R+oBu zsC~%py1dFn*@`B#r>FV}-@Q9W@EN~JLE>Wepp@@sJFPs-@1QL0KD&u=bC3kmdp*_g zBdR~ALzjCw=cgZsF8$ao=*eD8c6auE%a#@wmOehl`Z78ZF>o4X1|o) z?GDp8H~SflpZg-=|F!}R`^`k}u)j=LK}V9Wv2apoC!0SP!6`KadP7nW`sXEfG_YrtXC9?*9tzPj=W?q0v^qOy> z-+UF>U8*k6@xveDO=$u|s?%42s6Jkx{t+sS?rq z$^Oai`WyMlEhi)C=yefk+TqB~kH_634e0T)wQQ&3VJ|)HuDZJN(x(sxy+SaD9(S|A zH~4(mPVV!pI(M`OJ5W?PXUm5V&$4s2W~21aPqd;J{(pY>$M2kF)cRl8Qk9J}JPY5` z=MKXfRT&%TA(zPRe$X=?koX2^8L8;)7lJW2xQpCCSKK#mnNN0SV&6v8-0$Nkz4Zg= zufKi26n)+@OZU4yKkoo}QEM(aeQx0J2A<&!qEX&_6ASlQ$W^jC6SBLFty;=F`tsY+ zy|Fqq0gm?`Ana}%>GCyD)amG0@kv{ztt_BxH5l6T@7FJWhq(XjmyPSu+v9oJ4+HG@@FZ|ph+S3o;@dx79u>*v6n4$Flho-7Y-7OD^;C>?5572k) zE8O46PSfonm=Rfo67~pauuotx*`05P)^b9l9v9xlA!RN7@a1K=_1#DYP4W{*x=r$K zn~6oY>(IFo3C~JCAe=vr`AVF{#5S6VBY!N5A4OADa>TBKVv0l0?!%-{S~Jg9OVgTF+)pT#nVwy zR*v5bSI%lCwaLYStxDq0-jf5I#!5Oy>J$LLt<$DWknZ=6|2bI#u@NL&}# zi0|1a=s0Qz`W&U7zUeH^uy3W1?xqD~cjnVtN#mg!?3x{e^L-dD_(pgY{UQ8i#=TDV%jhoo2ICATxxyLR0%Ojs)fD6<`)FE`-K|Q>L;JnT z;@Z(&ZhZ+7A3FzG6z~B-8`9xC7zBm9y;e@m$8vo6{-iTl)&J9(MAKv&+{<>B$|hCZBp8 zMA;|yrm+`Di~T_Vjd7MeoM(hh;92+5Z~V(1q1MbbOlx%j{mWHEk3B=5?+3}&;r3$w z^DWvAqvu8|1cz!1u--#Q;&yw>j#uoR+1o}YPkjKtnF;XO6$Zb_Pg6eMm_sZ;-!P$mn(fVnOGc*|C<=VR?u6l?Mgm4;k=?OhDU(C>74UEw|W7*%{{Ud{VJ;WC%`17vfPb z`r(~@MCnAJEMni(6ZTHotffcDF&zKyxQOc$jl`DyR0lu#%iLw=GGcQj-m$;x$HZtX zyI6%JO-o55yE7%bt6<+%b!-VPj|@Zo_XKoiAJ%mCVpVEz?mEm{WSh2B7oUJD*`23a zI=T;dkK!Bl5=~ZMz0_SgwpI~k|NSr`yNhEF*G~C^%~xHd8(G8MH%`){^LNa8n}x|_ zcPstrQ`1tHNHb5-B&%pTXe(#7yu@y^6bv{Rf*V7Madf<%d?LFWPIlL(+FJTs-ox;u zIMf{F{^OQQ7&gb4a|C*X(*4C|nVD3tuR#44=0NRYFz`0B|4PgQklnr4X)oj0Q&xN8 z9O52@!uC@l4*z(JQudj>CZp+p)k`vGYso>~Cx=F)t zB{^|27q`iPhOj^F?m0SX*Sd-_8Igs%liXp?TASUO$RWF1Udh~@lZH66k4=G$$@O+S z={)E;`l}_QD|_1>TAjx2rTQ|<&Q}sT(+B#*O78x=gR0Z9SlBxPiYv=8cZrEid+R4n zJ(*=)Y%13`T!jXmlPx~P;KSQmbokavZj#+)vsdopT%7DEhAldXs)L)@-cyv`-Tsf=X%EbWMeXdC_T-6OF1 zLM+dtck%M2m0S|$?&uWVJWpQ=TAW6XPAIlDC&7cgfgkJJ$!Gq(cFJY$&Pzk0*)O<+ z?C#(J?q}#VVVk3qbmr$WV}+}D6)H$s%^sxkbIUKu#U@o{QN86RllZxQ$#jqnYd%8F zoil5)yI*FYIV7gGkT+y^l|=#4_|sCl zl;1*Tn-~lYiG)7;?%uC46NgNHIUT}1@~Ou1o_%+!hr%Hl+(mY}hnaV+WjEPfCVL!* zjntFdy_hfEAA)T5-JObiiR|O_BJ=mK&B#+88LG=g_Cn4eyYp?zLgb+Dn8Lohsr)^a zmAS}Z&p*I|9Hf%nd3D~8277k!taKM;{vNZ~cV{h}XEZaD(ZKzVODABpURyf->n*`# zcmD>omDtk{@k)W+k7L5{-_8>9Df;2b?#dPhNVAfqY%i$7hcVGe3y8u@_T4$HHJ3YN zck>3F=63Fhhv+pjAJ));;Qjte?_hju!OiSXP8v927BD-5QxQ$$hy^lls3COw^2II}8 zoNKg{2C};>?qJxcTS#T@4Q$AYg8kbl7_YyH=v5Xnr+a|>;SNX5fRvE%NC4%wZKqmGzfEx>i|hU{kFU4Yg*D37t1ShBl{yY7;7Q$<=C9zZnN zU1Y{?q}2Y#-|V}K<@+mQgtHXy`GH=ici|4%-FEi6ewwBxswX|94c~u<*>~6V(km=I zk%Gl5LXg^#&deja(uX~M!A2KP%wk~xSoCbGL(8s^e- z*LD2;ArkL$qj4|18ZDPvN+a3btiRhcQ(-7}_s^r5J+^jJ*maok2)c1?b3*dG1h8 zc9PO5-|%667EHKP)baTtOxw!2?%$r0oJ@}^`|kQjzQFv9WQ?ie?up_lOlRMnIs1JV zk=9V+I#{yrZZdm-AC0|?D>sa#>k)Rqlij^lGm|!%*D$_kBz`Q2L1<51L{;`xsov?sl;6u58OE zjQ;E(=G@gf7s)Q9NF~`|k_#Q42V2eLS=U2RhOqB0iRZ<*JC1bde?_rNCT5Y{ZP|1L zU$ZsDuhCO9$nO5I@9xjPjmVEm!p!zOYhOE!2lw@*ly3J|c7al}jTt)kJ1An`T}&wZ zn}=MX7mWKEWOu>rySt!dDk0fduqQhLegBHZhr-*?V;^)V+1;(#zS7r{&s5JdsO`>9 z(`iX)RA@x{&~|e7f9$T=Q*x^`q*d7wD3aYhWZ&KVvM=ag=qQ(YPS1YoDjw{++qi2l zrtutKb9WyK1}n*c*=`cfbAD$dc7;6pghOvLP)l~#m;KjkN;IYIa=JLl?mDvXZU=j^ zeM8uHmmZ3h>1VO-lY#VQpLYMp0rD`m74HMJ$V#Qp*)Jo>Wa=(k7k48uopsEKbIS_Ae0SNuL_oUH6%u z()}2FWSr%p`YA6TT zr4XMIAhoH??kn8Fja$)3xR0rXYQ=NU9t-f z?0w%B7p*QckVnCvc33Xx!>%jc?(SdrZ+es3)U zvm3BtFaKQn;c&o3Iz5d<&D2kBD+Ng1ZgXk-tP-EtcNZ_w7&5tv{U;U@Om^3weRqTI z7|OV}C3rkP45P1d?#;ft11;Oi#Xo`aX`Z*dF4mSmb_M82cDK4B4XYl%!`xhZ8O%A- z+E{mKIaEcStlbacoM{&O?vkpSp_1z&E}UoBv~U)kYu~Zdbr*J!-PyiAh?ck1B!PW* zZ}xM)@HThR-_sA@F9k_SAy}$V%)W0uSr_dizGQbR*?0GI{XOmv$3Z_SoZtCN@N+Yk z-(+{!^Zlh|4zv9aD)1zq4p)U3bX{@_gR3oNabAGflihW@+(O(9&cm7P&g24T){7qF zU{hPkCcB&Pj6KCWwZweMaZDn+d;KdNacw^$YMFyXklhu%bCYr(W~&wQu#xk@j@$R3 zHbg=CjB%A*&JSDt&GSca6Lvq!WG_H43jaHd_Mg?oBg9j7ligJ**h=`x=O_(GMq*P4 zI$Syhx2|OKdcNXEcDJ8>cc;hSg>hpn?A;;|(W)Gc?M@H^n_qlelGvmQk7-~jh zM|liXLuzrbyp?ok=An+xOxF_z5*%9~C#?vmYUeA$c2{fZKz&8!CJzMc_|a)Et! zzcO~hXC&SEHb*hZkavFe-3=nUGqGddWb-pPxFw;8?5-~U40n^510=h%A-l_C-(6L= z+ZbrbbNrt|c=DaWyYcs!MjEd9$@4M4U zauwhA3L+NFR&ySIVR|lNuPe!gV{Y6FVK2=kW^UMbw|-g%Y@>p)^fI6E9$KQxzPp#5 zxKGvHhWmw2Q0cS-s?75E%|3^$nJuK8?CvYGKHJ%Mx5$lsciq`(Ix7-u4J-JYG?P}* z{_;K5PmU)T%jOScckH`6yfh9@7wVW_u$Bz6yIS_$^$gQvPh>IPGlTT)UNQ=G$gaa} zrGcE|rKhJ%?xijl*mt*p>~8AGER;O?hA$VKM4Oz%`Gt#&O!Q z%iB@xxc|U8({G*!FTY2CUphQ)&=0@t1Sa&=7GdArVzRp*6X^uEe}wbyiC8XlX7(z< zv{i=eNoX$znbG=JETzh#8ZjG~Sumwn(zp^4f0;`(+1uXcW)X3<>cx%lJWCCnmZ)G

vv&ZW=}ZYkYs+RI?FJD+YAVy}A> zNu^OZaU}}dtZ!luEaVm0UCGV%VzQy7%(P%u?+o|Ux+b7m_W|Z?YQro?pmZsrM`*N; zs9B%HbrZ5XlQf={-(rBDy|lbX7VynoCTgh4 z@`c@vmzX7;y46-(&0nIdDjACB=@85-MsU2ISkjrfiye?{c3X@7;W`YSN6&U;IP{g+ zpOaxMzRa+8B)c2l-AtA#R$$uHNG$Ic181p08}{Axy%Hb_WOu<6TFA!H=dh7|cm2F~ z;Ly}3=-Qth55idt`|dRDwb<)$3=R>@R4mBA<)7@kyUw$&DxEC{*^gGLBuWc%q0eW< zAbV!izAH#wI(Pob?iznPO23G&I4~^}p=5Uh+8n{Y`5JQJil=;K2Dov5JMm-R-Kvr# zY`0+#{nFDImZvXe9ew427ad8*tYmJ?9ZZXhh4HiqG<+>bn>{A7Lcw1)`T0v_XZqol zufnl}?qKs+92|BV|FG|Fh<>2F*yt;V1{lbMt!JQ3cK4!v64Vo)VWDd~Nt@2!6Z`I- zYG}$QqoY_!b{Cwx6Bg{dD~xlL5I)0isJe;!E=95UO%Eua@sV-+aMn^u;`_NtOR~Fu zfsW!a;4`e9c48^nUC;AJF(q76mW=Td6SBLCN*ggd_!LWvcEB%$@0ea^G4QN`^k?6l zD%oAw`Bvh$q81lQVz8clcPl?!h1Y&lNhQ0>yW=PM9Zckfb~)NQ@*Mv*7Uvh<#WZtk z={}I}2Q6Ru9H=ip3r?Z%dkC&|PR7Wq&tafpD>umQmW8ux>ZQ6EEIo`yhhVIDlZm5i z*eyH4Nj~x2J7+kb@f#J`k+KIhd^Z>BP8RXb|0@(tt_jVGu7#k{n$TNg+6}fzIf(}+r{3`%6Evlkj7aOyIa_IH|V0aEL!X>_GEY3I&GP0 zd5CTo5}>st4C9(hFzAt?ydk?&Cc8_#$==Vut1uui8mgnBFsrE&1M8PFPT_r~u zKS=@?;|JN|MJT8Uxd*-E^ z+soY+0pd}^o{XM1uxx!4?AUiVMyU#pc^2|F-5R6zwU>SDyR++c0X?>b!6rQcZ|^@q z>w9e^k?hWDJ2NdGv}N|P0wnyPdodvm7nZ-n?@gSeKMj&SMegz}n7O;M{WwB)r}bnv zVtO`X+;$gv$Iq>YFa7WVKhTeTcP2l#!*J(8Y)q#|sK!J7J;rl|f}PC%_Z2>`Pl0=I z2v)@uVc$1hc}8~kCWalsfjsM~(2M*w4l(P(;ZsqD?4L%`Om-J{#$R67m`TE4*Kz-P zB(|{cZi3-0nD4ffKV)}5I<^-Z_T8PEeje&%cYnt*lcD_>U(dFc0Q(@`VZ3Edjh48U z9>*m*SH72|W9#t`s2R-X;17DpP2J__3}vbAk%uL`JC5$L2UYC5o7|E2W!^1+E}^sg zYZJDx?`{~`9sJqba9W)m7VNl>;|$HoR=(K0V1^_ad&%x%MxMeVeSLXBc6XRgnW=rP zrI>wp8TxUsX%T_JXD*{$g=bx|J8!bP7uBY+Uan!2LnNHpch|+C78iSR8{f*`4Y44E*`>3HL4N!lX+_ zbH1CH^-_|h75mVacmDkc_o9k@cavYa%2dt-E+27}&PTs+7iA}Nl; zDqGw6OTM1JaCuu)x?RDftr0Mq5sOQI-NEBXD>0yh=*m#~)Yx~IbLuqI$nHXuc%Giq z2$%Kkq$kcI>Dh}O@_9BgshE9tBX>ZD zj;8+XyG!xs`}8SYo^&|PnbJzS9jHOKSuuFrFA~;AIM2{0L#F#lm+VgIvat*pa2YG# zv9pGKcW1`dA$EhcEFru5UFRd4*mvjmu^2*j7qd4RHd!w)i+y(mDa^$8_mqY;>XLW& z5WaIp_GncWelP!qi94O7hBLGd?7Q<)QIOSNa^ORDXOx-;_grP^+sR#yd8-(Cw;<*gtTWa4DcjETIoO4n!bKJmA954F8rgTZHww{#jx{uj+XXYZy`HY{zzPsr4Ke44_HfECD zUE7n7i|o65v(|&Z_aND~$xfaYF?YwlJMDi%P!d;&D}8h&z{N+hjl3t0zaZle`K8wJoLX0`_FABA4A|C}B}0 zWW-_E{*&*(8xPSWZKa(i=j}tigW6ZS&UxMvCV^NIolm29P z6P7S{x26IoHb(*ul~1Ql*o?=X5ZZxGM>fP-Q?*{<`hokqMQuq$|KIKOPJ?c=PDoR zX1vV4yXQZ?;$>DQeVf7PU3D1S*mvi4&{NKB=1if6`Qv`iv6OvxAIR=h+R~@CTwlcA zSDeW18XsAS^S3(~eJU1f`b1#PnsNjzGZC$Ke)2Eb-N=QeGGgsjyn7IVl^+()~sC>J{aAG?bhM4#GIMJaG~lbvLDR_wcrKKU8dlXhY(*nLWdTA3HcF55?#AXP_5mAR(!oHBI!^|kPL8H4eS z%ndHRiqh4l@`>!O2ie`>*(RdCxf~O^(IZzHi(1{g2!CxQ?f>F@uq}7B=jcnE-f8@x zYjhm@?ldPo$GrRP#AhqBp8NP5WZzxM`NOa$yL)yp6FP=XxXZq~!DM%S%Uva)KtTpo z|6ku-my5YrqpmDYHE!(1>LBs#yNh4(5pP^FaG&h%ndNazVc%U7+1--%oEux%NbBd1 zapYSfW;|o2ud62F&V{-TD2nl${rEp;zk|tPYJt1pDp|uQOvG80Twbcbg^~ zOC`|jrP_Y{87MMk^+LCxG8oaFEAM%VqQbySg%Rqj&% zhV%9sd-3n^9vyyh?nrjm^U4WmvhU7il(+b)({<`chgRW3$e#o}CcC?|s{}Xm4dvK6 zy3)w*dMjGW$D>uq9vzLUKz3!UsKm2%<}&T8znrpXxARCNvD|eLPutP?yg45B%=?15 z-%7GOb+Wq%(Ur%{1s4ws;XFST1vg)#b(o!eB)i+q9C17L-91Un$6sW3j?5XCCj7(> z=8iY}j*yR%~7T|x63lvJm}m-*)B8&2YH z=AAn{q!+n5P#!P`tq^q|MtTX@ULS^O+e)FuymZ{l_F_(UH$&BeS>GGz%3Spf=Bsnq zch`H3g?u5q>rQs}hdJz9%wvB{4}y^fe)EyFCNb z&|2>uW{1AbKpz$4`2@2-TTblh^qUI2HCGwOy<-bC$T3Zjd>I1*MH5dhd2A~ z#!U%FpM_=kbJ<7+Gw*(rx%WIZb2*oB9X9Xjm|!k`Df{j&E@SR4DnRxzH{Zbg{1fKr z-N^24Pfdg|^YzKh*_-pdZ(-prowc=Pt?>!`JQs{u=JL1o{eW*S4ziZ)?jUpf5zOzG zhvgx^nC?a8J*d!EkYMKgBYEy9*x@AA%>N&u2Ve}(C91CuVf1!&@u4SRd@}v=^ad;p zC6`I#Jfk`UD^*Tm0Q~~*$?i1C?i80>OB4MAN!{pG{1T2{6E7o?eu7tIcXQ|~a8Wg5 zAL}&;JqDxbGw|45gC6r*$r-Y{m-HQE(t9w6eRnYjxkE5%2X5Saf^_;3vU-xygn5ba zJ9;2z9K*%`f>FS}yCcD$Q1JgqI_t13)9z~niU^X@2qN9xsB7Cj=9rjccXwec9U=_^ zih+Ql2r7z&C@PBGfrWt`fS})czu$jz%rP_01NU>?``YK;=lWqMUSxMk^ew1GD9KOV z95iP=m@tc3_a2I}fqsT9tQR}ovzH2b8$#)CC?&fK+Hx2*d&utScet>Wdw29bI1GP- zCy_}o^$mj?eGuoEcNaIxTUL7htEbq|FT&7_I!PMPd(y0Nyz(OY3jcGoy938l^7FsIT=%vfjdZE}}m zRV}64dG=W7ztE%yW8CE*$UMQEC)Vjp>B;bA-d*#|eVENUe!}DZC>Ww7OX<~EM0WSO zwY_|zZzGc4jlg9gaCIugoHA80p_jvq?5>EOj*Hf>uwYvvM$_Msmw6gv>GROP&OFj; zfANkrmD}`v2)!Rhu2J|rgx_ zb?Fxw)Lct~nRj>hc_@}GNk&@0Td31VvX$%()7@p@VHLSUUr9FET{QhAqEUz3)ef?T z_p5&NoA@&CPT8NiT)cm2(SOp19+YwPp$sFtE9`A2o72DGJbfvZWOp;}Fw^$Anmkpe zD~9ZD%6SWk`caMkp$Rxo@5+spXVHuvmU*3h#E|UHHl>Z&HqkLbU(3ZiQIMh6;Q8E0 zzLVXppX@8YKkLbyK&W z7)FoGE3&)d>u%B%+d}H24&oTuoj<)ZMgtpgt-Yh@@vLaK-bsR(cc*+O8_UV=;_l>P zDg8A*$LJ>~yF0qdR_;#t1ozN1tga8ikFCXc(AY|X>A^`RyGx-Lr}WY@ER2iCaQbqV z1)WDM{W;lr%(y4JGs-lPp|v;AK+n#REzzh8s>1MR#-dFRPtso>8U0F^8H#0i@jp)w zeLbHop5nGkJMp^8JyQC7n&|a0es~Ofy+R@TQ{kCYi*Ju?#E1O|BX2i(x=>k8(+~9G zA^WzMnQIr@h-N?k&m;7cKB3j>jnH1Tn@%m}XpGH=5xqn8Bi!Uy1$V1+Y^0rSEzc;< zBGXfJqzn5d1{$J3e^Kda`bgODJPq#Z%u%>{h5(vxFkcYVq3%y%@;k!`ZLfR+{TJ&hZH6?LE~|3Uj(w&o=G}$g{R26`EN=EQ?$HBf ze5oGpj=OMAF$6C^9)#-M7P5=psH3^OU){BqoDZyZ>6P-PUn;Cw3DR6OB`DTQuF*gB zh#smgs~<6|I}WP!Qyp=+h!*r!?O)|9fn;|>c9WSruEcSAt@&Q3tvlvG&*445Eq%!ZWeZ7TzCA*tLe^x3zTCNL@;>T{zV*O3WTAQ!eCFL<59eKSth1!Be|VYxu0@-F@ovL8HL|-cn+xDT&zCoS zU(?9$hX17#fgZ5;^nrD$;|%%blhEl-CPiOZML*vE%-V|jmiq)G};4t&< zmOb{BR%CaVGY!Sy@fG}3VcyP_So$65%%rF61lgT7*(T-lY#~lsDHjp(_qpzf=8px4q&Z$k0L|y83 z=LrMdj|+Dob+C5$*f7Iu#q0R0Q%^19=t)`Q7f5Cc6Xn?I`7GxQ=`8wm+UTi zQ6^?(|HL8s?f!7@?qPznwEw6e7Yz4d5c}~%X67KGN=e=xb&;lSOW zZm}?>$FEVp5_3Bn$zig){$zJG^!-_Uya?l)+&^9xhbQF^kxMUNOZox-&=WWzR8s~f zm0$qb-Ge8|*ulKJmQmJXn8hBNo4d@VZ}2a@gRW$EdrxGcu%-@O=p{5DyZidaNv0(H zMJ2t3kI7N~ygq=&ugyebjH_HCyPHhU;gPb>aPm$^b`^crD~d4SpH|`)=qZ+Dce5r~ z$WY5_=(!{?UpO2y`<_GR)vaYDy^8L^{<5cI8}T>1g>J!|MK+4YIC>cWp^wqx4QKEI zm|IOxW7yvdXrQ<82nMty951>l?$0^diOXwklmI3NrKjT|Do zyG%diWqKl&>+^A*?Cy@*ZrpSFjW_H6FYiv(%Sk5w+k^-7O|BukTSounXI*76|LZD- zWOw!SQ=X)!(y>(<9{!KrxgEp9spvGiTmoeakVmohvL)s;VF zcNe|5TSq@;oazmXI?wa{N;Hh=&z#W7L>Bt_ODnRwPHT0gjbRxy$nL_s$K$NdGbqx- z+4w9#M$yZ;XB6388+wEcLh;Hd4UHQ<;+UT;-A93v{>xQn(C>L-b{^)F-I@KDjj*{* zNNmrX*8k1mt#Fh>+Q0FGo=~saZ8*ZbyWZX{q)jUOx@31(=@orQzv$F=?CZ`5g&Do0 zpMy2TnI6)wdzmE>O)gON7+>fqz11}WaR%k+mY^qV=`rn2b~me=v9wXVj(qKCbf@pM z-i`dVeH+puR9K6CTW>_P0|zZgP)t2Lj=!6zLg{%Re2UeSg3r7J_>z#LUy;C9@@CBM)b8^Mc2tuxbsgeD!P!HdvO+->@M)9x8%}i+h^n% zlw79I%sUb5XS~Ga{+6r4igWS;+l^|K;6Xw-aBoJC%GoCg{^$ zNUyFk&x%X-`%v|pv&b3$mv9uE!BGIZYq%S9*c1rbVJ(Lj~UMGvM#aPhR`br`E|( zCMa+|<7_1M(=Qw}>K+>H&7_>{t_|5;6+Og08%r@{Y#0jtlIX~Ljqfg2QcZUEnEv8# zEmXz0^%2&;AvltoftK_fw~nVDo@e-O`j5Rc>4$G%UNz5n!-V}fW~L;i^d&d&9q`r9 zp8m}5XrNE|4%wX@{mR3lRAtUs54koxQ2Nln-1Pu=xsN5{JK0?y`k7tnX*Q-aGvbrK zRG&7LsKs|N_AK{gT14U7_bd37XDI&kJ*$x2y`cX&xMc-Q?C4DXv>o%8Jb+r8wsMH< z?w+c*X#3M6M31yK+1=i*^r>F2K^Q&LS`mS=Fx*{E)Q}A$+atMu z+?M;0^i~H3D@bm{Uexg2JV5IJCeUv^?Wc>};k$e3T<)8weZ^jSuxrTflIPGdaX?M# z$nNer2g*)*v%4|xPU(08?vma4(5vl5zxE<}wqy4(8?&N~{Goqat3C$(bEDAyM$Bb)i^xZ@dz)Ocejb`ZeF;TY^C>m^v4tU%9`fc@D%h> zd5?|sgFDj`{{DxXL|x*}?Zbm`A-lU1unWD<*W<8}ql{q9beX>K2_=7UivICmtf?kM z|yKUj+fEuzQ0Q!Q>3+DJ#TyI!N*WXC>b8QL$O z`{LVRKo9(Wl}2RV{6BU#o}Tz6%NjYqyc;HuIS-|ukBRijZ>Cp%7Hj3UH*I8oR4w+W zq++aJD5lXv?^vxaSq+|2RKZzjOU~di?=JWzGpB!rW6kR_eEXs+C&}&xklii3Wh~)c ztI(d_`75XVoW6p^v{a*IZz&U;cWqRH-N)8D_P?;j+GI7tnEZ-40b z|LNTTrIoudnC$L))`NhRJ6Upb?H1%st50dV%%KLD))mH}2MWOY@-m)yonmeyWG8IuJCJr$zdMF9_CWyg`I{M*o@(ij?HT<^AZv zJHON6eYnEByO8*|xHxY*B1g5JGG z6!SS6$?l4ocQ?{jTjJ+4)6|HL3Fdhip1X%@LBdxu1bQF@?D$YRoVB$sCi8*6hs$?E~1WS;d@_4Cb9|ig%GEWOqT# zK{+<}8wN1%ZavwZ!RkWHWL`=Vb5okw3o@HyDKW*&yJN1(HL|;P%)46=!1w8EZ&^cj zSIQig4Cb-CoX*+7{+uB^dKG;;`tyhw$0ZmL49~ zV`O*f%z~gsq3Rm_i0%bPyYck!xz<6~!jLJfA(xC|3m~#`; z&Qn&ihxaMeLYlT!!!sxWmzj(6{_a_%_GvA5$?kqIH>b_4Hu9J`Ixf3n(6=rM^~Kka z)7n_tv3Ho*&sWx-)Dy3ua@-snfxgV;$=UcAyXTQBWdw*Lb9)vPX-Jmi3A7-)t2~_CEp31fbAk@BpV69mcT-L`Avq=+)5-3N zEb`ES`9kvCRnp1sjxcwq|F@4YW!~Lovb(&;#Td?fqG`-2dduExpX_$>XzVk5VUE#P zvb$@==dpXFE@#YqWC+>aiuv>+Z@qzIubFqZKN`iStI$KoMCM!b`}E33<{Z+MB<3fL zV~&z3^OPR%e~JsE+R1R{EVUxL+n+`k{p@3iwhqON>Qn?9f28BdR+7o?Ch5Cz_fc8S zF~3QZ>@M0O8xhT#_|G`~|C}cq=H2aF^&8%S>~oOat!SGM|vF zp&N6$CI{5x)!Qt*BD?$B;SjcJs>p5=cS+hqpJ<-7D6M^mhk7XpJroLspc1TS9@t*y zf`yQEytv+0p6EV8pZGXjWq#P4@Qe61PhSp1`btrnpQJA|lH1H1i`p24Bzxw5KD~{r z%BFJiIBSwtKC(MThrZM^oQnxV!p{WuJYM3SwWVzKV>Tx9&CW9C?1{=z{7nqu+~7`x zZ2yY4%)8T7A}_9Vkq`BBOfWC4gzT>KEB36*6nLk0mKS7qOPH@#|MMpbGcwVgxogIE z3b2|vY;K2m29e!)-nNpcUi1htuPseG40hJ1m~F!h5Ib)0Cq ziTlA7P+D#v`JcGo)Pvr!d4^KG;tK9Qjl|1uoX=o>T;y+4DIvR4C%dx?))wuDrOfJK z{%~C)^t@hUUvn#2Pj>g1xpXg?Pv<`V2%e7Tj_!gC#NYakWz4Y?vb%h17xB2KD4pd0 z*q#0m`qbVi$_p142_d^X&wp-r;deS#xvxidcYMhaEa87It*eKOCc8UTN7s(tYwYEp zdyDMO@_Q*hMQF?LG;g`X|GxzvIj?*FE-uu>Vp$E{_xv28YYZi-ub*Tw&+p9)14&s| zf#F^0OfKJ!%{mX@#r(f4{#+v3dCRGpTC%X!DKcix0`uorJ*5U8o>|GfEzBuop5QzF zoPU&Zm)$J{rpy~`)c*x%{yrv<-8En8EI6tl513DQjK8^ZI5CopM&>R9%4s!S7KzzoaYy)=kwzECJfgr&R|f6jx5#jk$Zfu zw$!x|rE|AYYZi;l@F-~V`CG8gNCs#5$EDP$g_3-j(Y^CQqND-JdukC4sW#)W*I z-|u4WGE`G?nCp0e`Hpk>+^yQ_6g zMarEIoU^tO%W~$EEOiqX=H0pO$cHA`-O7ISsU`o$W9Hp$=AA!V%Sq4X*N^$568;oRc|Om5 z5{{zKGCVESm45GiB$DTNelrs(*j0rV8PUl591Z_1H=wZAL{|0im#~vQ;?P%DLXMq> zF4^6kh4Bbq^$Z2fNxjYYhku5rcw4KB$%SHER-waaP#Tn5f5M${wo*fOmvYZla`-;F zs-A~FWOoCZ%ewJt6Rt4tE`;wd#bJ(ecV`0*GRL(y-*10M9K@5EEyVJKn-r7XUEzCh z{pt5eKAeJ`WOpT9_@3nZQe~2tn5VJUoyb~u>SJW_y*hXb{T_?UQFU2Q6bJiCXTE&ft6B z(b!XlsJ4=D=HG55yW6!Y9rFf!L6D1`WRTrO`MAms))5||2arg1=fOPPavcRZ&Aht} ztUG2Tu>KfRheflpkj1*Bvi1-Lh0^W)ue;bL^ZUxYySS}y>3~SaZnC=_DknK-rzw}7 zv)-8)Amf*`m9E_%!kBq?(|^*%I+@wdxAl4Ma?YA{)6`~0;*fk5ZM!oAggL@D_TAw- zz*PM1bC;6r?wq5J_%ZLUDnAUHCo==&&P&*>u#{TXYxQ9sVqL2${iYORJlWly{GHet zK_~hzI~hQBw=jZPNMTAcT_*>PtabI~vexxdl*6nO7n0q*zr)=$<|PN&WkQ$rq+7^g zJlaQx8FQ64F6HdXdMg<`}KBGlpt?WBfHzmoaSJ=`?$wC_DBS0 z7=q}6f5h1|T|eo=de`T#f$T}+-^+@G1M{DIGVgB8WHXs-8X!J&%x^W+mRXL>673d- ziWy0WYxxE{nI}D-b@tvyI@GVVlsRV(V=&oWd)DLsT>F8#5@vg_PVdcp>JH4i+nl}+ z3;EgJJ>8F-p-Pg@`hFzYU5JjoC_Vg!In29G;XRphoXp~e|(<2VE`?{4R?wh~EpXG?ZBi}~LFjX4Pqvb$l+lCiQQXBoY$xjV~eW}3T< zKBOY8tP3!P?Cy$d7Cdz7(747yLV3S3^yX({-d&u}Ui{(xYtM)S=)(N*wY-=4klhXL zWhbf0UvQ`}9pL?K)cvCd7 zWOtvL#~%G&LjvPZz;HA39V1epz4$#YtJ#PN@3lGC+(ad&g#<<%#2T`@bt~wY__qN@ z9UY|&&x)7KgP+px53+7%Bbn?j@P01)f6Zk8@6*a;cmJ%km5*aS!D(9>ev{pO2`xtX zAI{J+f4&>po$8Zz(opscm9g9_iwMWLvAnA^6Ovu|(U2=Du!w?<>r zxGJ=+HkM?vyI1TBY<;0CgRYdJ;Zr!;KHGsq%)6WA)=nbH?&803Cio_EKkprbIoVyn zfm9gg*J8pG8=1&{L~}1Uc{^WObQJPYNOm`beT(S$Ms)kd8kcAMkxPye%DlUh<-5_J z>@IEs_w$Y^i#q!t7s>86?Y9xJti|(5skp;_NlEwPm}$g$5VE@orvs#leU++IW;+Ex-wFmb& z!I$*lrm9J2=G|>2yE9hYi5U)GF?fZYRI>lKxYkAPyET)Bh8#>_f6$73!iq=*IXKQ) z!pZJd4sejo%)9e_l8J9*cSE-oz^qP10@!zSBD;IgWF_|x)j-E58SlyNR#%>6ze`J| zYSU>xoLrJ@T1rOL4oq9DE|Hjan zS!pUB-#Ayp{^_j4+G6i{8tusLc9+vJ!Mr;>v=oI=%!(bxeUo*na*_Sm*E6^`=a&JG z>EDsi#a@1}ZyVRmMO-wM#BJk#+-6_575lsA4zbqN;2uir_Od?AUTW9}-j|Yr8nU|> z`y=RSpenoAFTOB^nLCzNvVS%E84gKsVqe)ewG^pA+A?pex3nj_8^yf4bsg_vsn&L+ zS41MN%@ynxLpe`&XF_&&JJ~?GKdr!6_OoZFb0_1#eaIOzc|>+s&c64UbS>#Q;S_G0 zg+cRr5;idJ&bio%yJ3NH=RC6oCbpDP_R&X^-Bm8h#OVD$(Oktrw7Dy@Ki*k7e^8Kr z^!H&j`|;JYbLi+&63t>48A5iKQ)VYI%)9G7aVLI|-F3fKh%sJj@+IFxhLGJAcd?We z{a#|2Z6cz`1NJO9gRygTWO$>uD3aYdGw;sb_73#A#-h_4?wc4_!lSE^^ojM8T`9g| zvRz+Jy}JmL8xc6ZoHO*7AJX;RR_2i1*^u3>+{WiSz65zE`CWdNj756yur``}gY53E ztGle|#%E~TA-vZJ!Ml^3@BC1Qh;j#MM^1A5HS59ce=#YKeO+>tMCRRn|IWO-v97X< z>`w8wtz2c^otbAkOmBo>*y77l0IXO;#|8p=|OLjlY zNAlMC%Xjjgow~R1V@(XqOrr6|^g3FR3l)&vsgvE+Zeu^A@d8xI?gozDf$bk3!z9gI zYRT@#k~6)Y&y2QP$B{Fcv%>W$_}lLToTl1H^%>qFw!6tK>lV`gXFejy?rb!6j8IB{^7)zOwizaF**qpoBZy%01@YIjg2&yb0Nz=P{H| zR~K7ywE(ia$mQ+Cc@{h;EQ{k}pC__=Wt{i2Sosiv~?L<#-iyJUL7fl|_+-e*C zHF;hm+1=nOADOXQSFY-o!HNH_YtMLW(tm~>TiVI6=K->Se6VB$otZ}bGxh1WFiAu8 zmXELru$Ad#cUykC%EIQ#qBWDXF4^6RN!bWq*o3_aeuvrsFR0Ak=;FyG?$52A7gy~9Vp|^ z{-bd@43qT4U3|&F{p29|>h`8IPDJ)@*akFoztg@5AWOvVV>6kdHBrnPL8`%5o;(^-^wmdI~= znZ4yF&x%aAw`W~pa~RQ!T1xp4 zj=kYN+C0Zgk;3_ibpc5zNPmqbZdPJ9o&9poQ79>^N++!&C?6Jrr}-K9+4TpeCfLhS zvb$x?T%`R@MKP?~htE9Y)06h&u(^`d^mmcxd~6w6-i!SW#6YN>ju)K7RK6S6bx(%Y zts1DbqGN*WZVu;P%3rIBNysl3DdM}^V2+(+xA=;fkLeh;k8?H5yL*_YChtCYNC?@TeyxQpX5L-D;sj(h zg<-z!S!mtior&{424r_fF47M_`ZkJx#b8-p6i(Jv!u+ricXa$@6ti|`ael~J_aer{ za<{sYXMEBl=AN3%JF>f&Fy=g&Yf7*8Cor5f&83kkuy4g|=W1*DkL>O#=a!PoTS$ld z2l44IIe&*;2)tO2Mq@`A$C@cR(@AC*|G`M+-5Hi|L$l~yr2R*KO^U0mW6gCT!&Wjc ze!|AnX^1Af8*5n%hl*CxX9=B|WOwgcTF8yQ&++{WC95|xK*;W9MeL?~vk@~7A3W6*o4E?a(k z$|$nC!4~bL%F%jl%jl?9xyJFD$4$8H#lLZ>PytcgYe=kT_u-o%c4 z=7J1m9lzH{esf+grt^73Y!1iVOgcE;JVRH`^>zOhAU(Hu%Hi5pvSfZS5_yL(P3F%x z_!BC(@Qf$BTgW-VpPQS@nKQh5klkIc$;PSyf9PUxl4}0mj(RvsbB_ibPZmiBIlm+li4?oBr4zeOWXB)hxDyu0?gTC$_wOCAjm5Q|=IW!R$!$X1Vovu-3TE?mOImUJ(0p0#Nz z&xo;FeVl1z_~>s?IPX(}7Y?y4^_3yJfwE4H1+!lW?fu_xkrmsc3iIoc6q zcXQ`^h}BV5S*BNrfxNSiZrX|aP2Uh_#@Ph2yW-jGaduOZNtgDc6Yu;vh5Jx8Q&CRW z^1jTTfY~8?nRws_wsFpP4B1`IgTv5%-co{xwa z?X_j{0?u-g-5nTaCNr6Lx4?HhG;$-+@#JMR*D{m~>}AZ-@RQst24dauGP-Vx#IKp0 z;plZA?%U1e3)!6`*_{jX?nV}!LY6z{oYRspbMYHEa{jsEf9&oV=U)<9irW0cxWPH; zOM#gv!%rNqrtg^SuAKAKT`d(wW#m5eJ{roq2aP zWOt2g3o-hSszk>z+nK$f9kVRu%#oL98k&fhqhTmw-rb1yI->K2yVYcOy8oF_5 zPK$-zfG8LqzY0xVBk8+_Ikb87XO!v7#-5k3v^E0WIcIJ^MMCx zhggW`=4z}PnSc~W?&xw(e_6lQk}%vy7L(mgo6$zP#WFK8GX^W`qp-H*8m8;9)@AST zZC_u>JHcJA;pKQbhW!ld9hkE9F*+^aca-d|?`X0Z?h(Y=oxuFXp*VS-dsfx&@zKjh zg30bq9deVUqgu%MP6uJe-llQoE<8KeK=+BG6tSOioBIp%N}A93CLUz}Ch%FuEA7R40yMNT_+Id!tZa(TVXrHGWOFoXY{u$WvklRM~(%dFdAEeX;U<1!vQa` zBJ1dSjk%xNk5CpDhf785)kIyynFabXBFa}R$nKuZH@0#pnB&eSAA&`bDDq z#tK|nVIYG)`${{qyQ^~yW#-Z=D18!1-*qhB)!&7G6Z;to?DNEXOV2gh^5SkOM$Zpp zKOzxted#G~VI>zQ@ZA~cAz!&CH)PBaY#_V4vM2+a?|nzrGWMf~q5N=b+R zV|PhIIj8nkQ8e9LWC7XTEbiwWJ@g%0#$~{G5@#TmAHg*4@g;Wi5JR%N_P;DePvz%Rf@_eZLv=0ZZ+B6r4OcZ1^$-z5^Y7q`P#{{ilPG83K70dm~jo0$|^GO*<-WU8^B!M(+j88w(*&3?v~K=Iz| zF2A_nXmhdv+sN()7iQvv@h`059^`zoyQ;;`((MR$;IHxb#oyc1%Q=|aiuvRBT%;x0 z-Cgcgo}T{|C+v3ODB0a{i=*hq{mW-JJmi5l??8r@vhLXnW*8-K50~?em1p3Zr6V0$ zb0>l9?%vNfGO_eFVoW&~84<;O*GhPAFp?UwyOM3>lH>K|3im^G^CFmw9EZo=kC^q_ zRvz$q)+W2l9HJ?)Qo?g46hjSCFe>96R8Mf@XhqTHG6e6_2@lHJYcKI~Ml&#>BX|^Ddi91le6@YcDBjuOZh~A4kei z=Fz#QV)cU$P>8aTA?1Oxeu^4&wIg7UEIj zCL77_MsojrXUTiy9!i1l%1~VCegYqh>6nBSe$9-fDDfK3*GA!0ehhLGZ^3aT_wHi+#h&cWKCrd?cl#{DESZfmBLO)% z)hJA4zAfMT0Y;q9P-!KpT1C(&yDMD7T6f47oN~33b7Xh(yj>-Y`}+x74`2h?-OiSK zaYk1`esNZ4DeDf?cnA49vW`CLEWBl1vg7k1bPp$g9!kcZ6ey+t^9F2uijnV&^zIP1v)ZehFT8^`(6wXFkd9b}1T3Zv0g=b&Z1Q zl2|Oxy@Q<#OywZi-N>ij^4gyHJikvvi|nq))I={E=vO?n7)VQ1KyxRSQ3K$!m#$vDQuw^!gsW{Bw2HIa|3;9 zR`)S^`gW8>M#6a9W%PYwAlYPhIaaK9n+(J^`7-8bMk1lxc6=?n2j!_|@|^5$>I`qm z*4HM-FU9e$VMv_CT37uIEJj&L7VGSNzv)oF+EU_A9mel|^r_WmAn@i7^gm@UEev>8 zRI}EdrYJ2__hC6d+oJ0I*fm^9%ICTW$nM;98?-h1yMtcuaPux9>De>$PQ1mAAZl&e|c;R6^D6Vi~<3!`%7Yjk=1P)rRt! z?5@7dm)uuh*6g|jGu220wTZ*K>IdjDqOCNL-3|D{d0ho9={o8pLZ5}gYeh1Kc76** zA8S#FBR8DNeV%+3>1(DpYK@xbsdg19TU3w^pjn`hZKCulFbO7y_ zHly$JXz ze-_8oTFZB`J7coDvk7ga_Qx$$(br;mKMFlYU&Eo7^k5|>;d#fQ1+fN{PQz%+TrQq|j_kg;M^e4MZtK@kW#W}UmgXl(f zSFvgr8izMvJPlbEJS+CDWnI|o4?0(6;}qH5vxm8K12>l$CtT&>i$Hm`+*Z^^eS+i?ZbXA@m9J;isgD%o?>66 zE?T#bAKw_J==Hzo zDDll3F>UE?tR=haH7Os;CzPcv{X>alcLw`xB&cmI-v5`1i9JJczSnUKHPMg@di2AS z-T8kv7u}>MII@dg53)O(UKfyPrzh=<>G~wQ+nZr5>mFW5pP*qfq{({66wZ%`wI^#O;(eCXB|ZwvOD)?J2BMdD+*TH$w&7827hpoXD-cTN?i^H zu|F6ewHN23739TuXIVpb=h)vt;@14a--ns#Z~4-fv&hD6W&h9z*i*e79o-{w)wTk?=@~mqc6V!-pL9y+ z?Cu@by2??gtfYfCj?T<$rm~mpZUwz%;fJ*4m+NUv+!ThHmlDx2{}s+XvXq{q0_EgT z)}(7y?bH||5pVMWo_a*?NG zcaEX<^f3Lv+N2D085M$APDc=6M8^a@ZR^PHqS{$;KJYcl?2@=A8pb|LDOQis7Hngm zr4_#u^uFB*xQAAnoX@x%iI(P9FpGY;3GMx4>2Es6k__bD!wL+hN3MG3cFa3?A1}|E zNnao4=yu?KcB+91?$&(F3z6aVD> z#E6y-V!?j<@*U37>z#t!YE3UP+1>nkIS9V3Bwoi{#Fy;upYwLIr|~OXC+x)7m=HMM zD8xTLYEr`;ofc$w?wu_qr`Jm)Stnu`J$%m=^R6^cN6ebMWlOogtfj9n(((?9Iy2{K zZWMl*R^nSXBY94C*O1Iz*BE^XdUFwNxI@%&WgPlld&s?)wsM#3t{-=ewr|lC^X;s4 z$?nptlQG!n9e%~o9h=2@T^D!R)J28+oUC=p?wn3%A^vk69$s`1CvuYYubgCU)L+!+ z?1A=a-izrm{P44x^d86jU9!8BdRuu(-=U6sI<}MD^$9M5(;%|<4xX|~i)Z{;3pry_ zjUtBx=B9m7dTTCf#+j5GxhZ`j-;DQ{{sQCx~rEo&D9XKs^c&uyUX~Ug69K2Aa$CJ=$+-e zGuBO>ShkQI-|`VecDGn-H%h#IWBw*bapE}`;?Au5eoe6WCmUDE?keZz;i;jr{G&+M z=7T`7aI%%}!5`tGl7?4gcey^tP%x9{E`63yAJbj6l$`DJQ?&Jq$H~%gEcGr!cqH?h z@A*iPC$l}pM3ySu!2L7PSaFT~&fy08VJ5Q4pYyucePsJeT?y7MLoe>^UF#K(zQ)hs zxUHQWB)bba&7GEEWOw?<(B*&Z&NL0>p&t<$Xe+T~ck_O@%1R|=nLI5ImSlJLretH` z(k2+dNi6u;l9xKl)t0|8{O2yDk=nVkRl zdK_mr(zQ=^=T3IlBf^|~>@h0)?LZ-YrM=9`QIo7E&&lp=$nL&&G8T=7Yv`*PjaQ*D zaP+%{dbE)PWOs#=xNkD6wJdTyhdmMwspPDb1k^LO|}-myF!On;yk_xAy;`u z_ejT?i=T0}rJc0=Mj!PMS6TDCnf!Qp0Dn(yL+=ZFu;RpDocZb`ntUdY9cQ+4Ssk+X zWMLE8on6EsTpg|=S^@52nh_{X>p7>U@eY=i$#_L}S5;Vo_9>dO=e(E9B)jXB(N=1+ zAL8PSI6UR9YJ2rd_^?}FJhFUcNw}YAcQTT{Kd++kzbM2EjD=>OJN$g6QcQMt;5*OK zART!)_zXPA?l$=)!fL@wJQ`#v9Xrvv!d+Vbc50GPP>4_A?8h%-t*i14PB-mj4%uDF z0qz-}R+4|G=in22pCvB)aQ?4?%+7L_&&JI1tGAam8-BuYLMGOe-JQ2TjEzfL%Ed}| z@gcjLxz9>+@4kWJsU*Z(hhb#!Df~U8B^~MgjdtfVdC*KIZ@mxIXrA#iB4Mw18LM** zWW17}?Dh7Ol^qOaMg3*eltyBM*>>blxQG7q4d;^Gtqt}Tt=`)7ij~s)MW0#wBy7og zjXHNLnK3<3s@pP)zL}~_Q9pv&WOu3s8F<tdkxOKE@UfR^UB2UxC1;q)?p{^!jNh&*6UTc`R~#J*T-Z!JmAKZ#THNssQCjCXfyutA;YAlcn?dZ%x`RFVC& z3(%bG&iZ>MHf{Tb#iJd>ob2xA5NGMRiWwmEShqU14a@ZoU`W?yl3V8@@qBk5on;q!Vu4*#S*f;s)({lnN- z4fSFkuBLd`oIptu?{+~coV|UIxi76HF`q7N`oedYwUAYJ4&o5m-9+qy%N6!B+BnKo z)=a}PoMh#ZKln!fxG&jVc5E(oOlmIvsm#=5%{6wXt(2VqgjJ`~pivuw;q;jsUTGzM zOFhN9DL_W5ScqD$=jc}&kHn?n@N0P<_usS@%N^`llie-7Y9a=`ZeoE?4D#qvS68Y+ z@DXEiRq>ZqI=-@rzV-K37w}yr0?m%@!2PpNkX>gk3g@{CN_MwVSwkx9j^k1HQ1n@r zie0^GkzZ^h$yWl!lCFq-yO~kigE@9&cW0tFr>4~iyF35K?jFB$l*oCFu&>yS#bkG> zCixh&Nm=qcvi@hS+~vBB?B808;)GNz^bf_N$H(yJmAcGoq}$?BfSfRIC(X>C;-Bku zUy$8d*OnpqhpyPsYwt&P_vgB?Eb35&3#+10uO94bu%3+|WFRD{~<1uu0&xIa|;5^Zo@gJjjYWOJ7m00`nE9?|AQ#E*CX(HyTr(6spQ~`Nj>1spO?Y_T zg?gr`G?3jHk=?!hrY$`;pT?0y-nkPKak$4Tyw0|ion&`Q=6Q(c5mkxPDMank5Ul;X z6J?6u@wu(N1d-iE&2kavE=uy?!hUq%o!_x|A5PC!l#d^s#h&c0JfHcXdw;Mmn}I^I zyY-I`L+M3JS=+)xYUZ+^-?$p=MHg;vSm& zY)8AiNE|$U8JD#UWeW2@F01>=iwZg>0xrXAVJ|otok{4roVD&oD^Xj)JIGUaN!ig-yyqOoYO=ff4w*RGu$TUDz$|`iwV~ky&?OJvA1+2lD-U@+zwIjN}#B zoi5p3@;QAm?{*1`KXC7^JNvrRA7bg|wvtbFx9Gl?++q&Q>l)7LkllIZC*x?|TihLM zEmO$uwE8edYnh4+y>SS(?2)Cd&%(8hb?CR-L4LD`mdN~>t&M--{c#VLlHI-7bpVFU zsoCF&yHR9!+2)+Fw5JPeSvu!NLy+NCgej)2WPzopJTPGf<5mlqy1p9MhbLf;Q#fWl zIg7#l>6jS7y}K}fIXbP4?2WvIkc=4QG(@5K>1(hzFqW>&+X*1Mdwfh!R{vX$52GV6 zow+=gVUO`(p}EwM-OUVQ&zku?o2*aZ(IW0GTu8yy*Y7c&IX{QV?hYR0Z0d*>qK1Qb z^>rJjT;GMN@&?XeILZn3GZL5^w5GTT55u!@nCwo)E)P!37n=9NRr)=pe`AoXtb6|v zM*Y)po$M~>MKRp`)alpboEq7kRaQG0H2fJ3OpM2&J>i&AdLDzv=t|*vvMaK?R&z|` zY48nry=2CHZZvL}SD{#swJu$29m(#_M?Avq8Ir!eLof-;m>@guViu+~jw z-qW!~zY*=T8_jCS_73J_|5s(YliZ{e+1~2PSYdN*$9OeuQM;LRkraXF%>~0oft`;b@gSpefysXB9Md(F#H(E0tuf}}F zzs%LL`^5g?d{+rFY%ZIfbCE@s5U_C%cQ*bqE73{J$r^HN$Q(TCG4@p!rjgxk>~aVT znCErO)Lk6*1d3^nwe(o}4mvt1xI}j6HlYOlr)kQVTrWAfDnNE$WyZbwBgpnRTqurU ze(^;_F4C7~(Y_Lv>PL4N^X?W@qTBi?40MXc_18S(RZXRk?CyaoGr1#lz20 z8Ib!s_N*@{$j$Z6a+d7wroDrheEP}VtxUWmyF2!<027(R=6l#(nv>o6-LR5}ooevq zPZEBS-OYA9g(+(|e`QZUJagUZADc;=+WYufydA0kMB>nv3dFB6ke_6C>$}reFxyam zF1&(0k0Vj^GnTtjcTxSvRCbfyE!g2LQ&wrqkz1w6m=}gyzY}pX@HGyqSc&F;?C%A5 zh+;3!XN)=mTe7=vOER$F(RVm7$8I*+-3UvbcefP9Kk)z9o%sm*)ZQyffIDXx$nJC} zGczgoJI0NnAD--P$*Ln5$^5*y?jCY|EPZM}ETyx?YZ(4WMBn9M2yQ4vevGz!CcFFa z$6wmMHx=s}ck%sIEcU;Pgxb0*Fj#LWm&oo$k=+fR%FL$~74!y2VsZuD$wm*b@{5_Q zA-j9s)?0o})e=|bQ~0GChL_58?aZmcs~1*MyM_IRJ^cAPwv=JV3(%hIZfj8{9+~~Z zH5Wc>WOwO{oTYDpf_%KfEF}Kk>aXTtszx(rSGmYr1-j&zXLx(|S7h1jgk@R?vaFB7 zo4JR#ZZZ>w>~5dFr5t|r0-KnRSXM(GP;~~acI$|uu8%xpj-vgKHgfjFZTPpLqcxIy zcfpnTv61tVyZl6l?9OPMzRcWm5k|QY7?~Z1?f!I3JZmeR)^L|5i!5f4rmX2uf_XWi z2xQKq@9uX9F0qzUvb(X`bZWO!ky93jFpcc4n*#mtn`ZEnRWOt6Yig05K**n=?0NI_Xrv;zq=jhN7 zkLlyXk(_r9V+vc#C$hT}^XQpvWM6mxO`IMPgGC*q5xMa?Og0%y>pDNlw)T|;|LTch z(gn1&h(LVE4on>V1gcj#i%fPm&dN(}1kj7H;y5(O?rwRe;=d;!FfZChHk9+uznJIQ zdu1t$;`v5)XFiB~##z5HnR%bvcn;cXI7#TEMhsTV#z?X|zp6a!7@#a|+PTTA+nm#8 zeyI94W+7FkqJr%1YsfLQ&QzCLvb&o%nI+M;ot)V86dhIL$?C%~Dy9tE&gsfAvbz;l z{?cB7UgXp&grr5o_ggf^MBKpK^~^#dyE7uYThUWjzAy)MXlgjjmd3+y<1^eVY$uz^ z?k1-*|G1sHv^!mli!E4-4&{5s_!IX1XDfkZcQfy}N=Zd?30BF2BH5kx{%rb>o6vIt z`%ip-Eg0e`-x3;NT(}Fzziq>ku?JBxw}r?_HyLz1P+l|l^=$EboX$_d<&~j$*y{xD z9M@ou99`37cNfN+OU1ay*xYUh+E0tXm{sLicuh~n5Al_@WOwrwjm2}vHN<_0g8!iy z^h&*jN2}V%YqGnk1AL^ve`{%X<19{Cgrjz50+!@gqf4@dsGHLLVdyDKlv~LG^&(`P z4nbKkGfjtofrY!Bye7LFa%+39?=>TSv-D$Skixc_^;-c>?Gg)_Bi*w*!;r}D) ztfR6_+pmovNJ^)4celDv$L=0G#_qAZ-m#^-R6zEUa(;*7xts znziNup6kBu^VsLHzv26|Y`zzw3mDTdx_?hZFoa=SF3dl zzpu2FWsQDPJ26Bo7m{mt;t-kophy^y~BNrBcL)P6?4=4AK-M#jfR%CYyYsu02Jjd*?H24N{Z{c<&X0VUB%~*Po$?n>(vk*sX z_8YOc`9@4M!X{pT!9yeIZV)8uWOungcn&6Cz_={Ve7o~GaQ+sGrd!Bhix5eg;V)JC zI$~mf94cgY)Ba6EXPxKRIoeL1k=+%xc*~;;+{rky4>@FaN$;}Ya`Q8?k2{MQ+1;Cm z?$T>&D+x<0z*>H`KTisgK2lNM(TQ(Qc30n)o{f6WcAjNo;_@)u^xKb47t};~m5+28 z5K7K%D}zm+a#tV~J9b8(XVD3?wCKqEtN!d+Wo|XfT%M`igkefNmiWZrpK*0aeqt

ho7yhg6#L+1xK>Gn`4Xk zo~R_JOg!boH#%RsIZ1W=2k!D`z`7y~!41skJXe=;RbN?5c6aKyjkv#}V`39?`G%2* z-gpWLntD>vB|y^Bf<-sEoecbV4XfFcKJZQqYQ|r}+-D|IM0U57oeW!F8_2NIYN(Oj zef*J##y0mkOJcWkGSB#hd_R58os8Hcc(EZIzhk%K(yG@e)8aX}pDduxOU$EG;bI;hlHF~5u!r}O*0SL!dm6~@RxWcC+hOlu zyD^==4`zm94Y?yBjv;49wVDf0OL4XaGBbwwp=X z?H|R@I6?Zb;GTYIZ1YeH^r21M*kBcEv_rX_i zNmJCW9-`kg9G;~+@Sx%i+COrTW1L6a_4bk#!5^V+~aIC5ubN3 z_Y-sui~J3xnCx!Vo*>c7G?A367tvvN3`VHLA ze5y%7oAr&j`^!e|dod&TnD@3+b(ub+0>wG>XDDalFQ1RlU+pAqe}{_KYY%zuro_F* z-RMMirxw2pmGKHms=;zT)idorom6dl$ty<4@)@qP?Y>YsYY`x3=i$vAdY;&f0{`e$XAbKT1G1pJ)WT*1(`6yPX>XC7A4P)ed7R zzFLc%)-edU9FMHjn;3n?Tr8S{rL>H$xDp+yb~}Om4H59UkP6*>PtoIntwfCqmD5An z>$6%-G)|Pmne5K7Ll!jWH=}PiXK^IE`>V5ugsLlw&Dug-=3KY!p8{0uZzUacJmehN zUHw*P(FpmBVJTTiC%fzJSq^tIHOaK~k&a|{T2^-Qd+{@TcSu7TyO^T0k7L|;9nlQ) zm!V{LYL4_G2i`)aW&&zzqp`}i7QS7KgvZQAmCR3(;~N1&oU4Svs_!}$pN?*0pvtEaqW^mtWiXSolDgTfH7awpoC zeLPZo6=lR>_MWtEFYit{$&qjTJ&$M4E7_gF z_5JK`p*wi5j~wX`D*0kduNK?VZMj=r7Xg#yCt-S;`+1>0@+1T3r4URP~?7VI-y`Q*AM8qFN z739N}?5_T85uScil4<{V%2=|y;BSudz4P#y>~86TV%#uQmPv~A zu-)PNm5ZZjuYL>aqV|~m!^eH)$xu2o^$wxZARLXB=_rbRimN{U&cQXZySu+i;1;SPy)(V|+@<4hxPt_BZsJ{GJ6@38 zb^FZS#Z6jrksX2;^Fw5Jgq6Ikx{r>1l91Ou3R%|GIK0C^Vmk#&!lNLPB2(`EU4ofb zEIVK0a3=H`_H=3|9vg$@^P~XzHd9Z$-A<#PZ}jCI>BR2w7&Y^3WH8y?#$a}1hiXXh zSmxBo?iTdSz^>Z&a8!2^H?lj0!Je|IK}kkDF2ZxNyZz_#G5a|A@h3NF&A-XnhuH0W z@*Ae)X5&2B-J6)bI5o13n1p&uV`ivq`j0s^wO8m=yA7MpgyZ$Wqp(SDEB{sbi4xge zW`?B%W#2`&8T89ui()6zc^t|&l)Gei47Ca_E!Y`Il$mYM;+A+U6{e$nLCKc}UOgt;FzC0lxB# zFU}}Lo}Hq!ALJpWWOt2z&ax8Ch_cSYSF*dRx^lQAsma(0KJtAS`(A5or6%(!CYGnd zBPs$9hMquOwT>*W@E2FIyN5^2Wyj2$aIcKVJ9e^;`BRH-<;J2uE=Y{X?*8*PlCHnc zL)ktWLCp!+{NEikTUkmo+1-dX{xaEDTUHbuqwgaeLA}`ReD?(u+Sxz@ajAx$@RW-nj8^`jw5`o!5wl@)WfH6MAl|9zc3uyLp@jls!37m-n6!tRV9X+?Hd=VnM|dJg}_a%O!c5z0I7;b>bc z*++JFCye`~?U>JadIUCPciYBp$8){cc=FVq9lfDa{n3k=A{9ASUy4O!cZHpDV0rmF zqRd_8U*4J8WxC0x(%*=(%f~RXyIx6q5IMEA3`yr*i+8Rs>5g*k#5*iLnvShxcLSUc zLg}K0*e+wf;Ae=OQ?`*VT^}L$STYtZi^Lj@Gwc`9lZRw?@5eHOcfpKz>3X>M$Km<- zSfnV^F~yF=Tf^>VC3KNlQ-IAEqla z9K&bsz~;VhaG=UTYRK+J_GjK~pR!2T64X54Jw84cYy5t~zwZCDyEzT6vVHbXa?o7( z-P?+O7A5dWrhg4yGUpN-?Rh2$nL&=JA^5>xNq{ySBlSaC&ZGjxStQO z`!4Ug-`N48aTfEw>PxappcoqjbFaZv+|92bd}S=sY~rx*(^V+$Gn33w!7?{LK*qP$ zm(0JlX4)IFY@t~-ke9sA%_aS_KH|vVll2f>>9*LHM6d1H? zL}9&+e4j1KwvpAYvk2EG?npr|v1|<}1mv4n>IJ_sM!7 z{aj-eWI>R-eCPMgda8>Id-fIKM%i$&X5T_*?qu9(Bcm5G!w?rLGrjC(lm1Jn{=E%d zUxcIo+GCu%GKczuJE%k0&(+mZepTGTVZ%fO8Ac=W#d-YDAk(S~l)_m-a;wf*mKxSU z!;TK}f_Mxxxru|>?9+T5ECbH@%j3s7@^kwM&P(VsD@etr-cNC&+*UFMhRUkBKC*eI znv}QQkNJz}lX;(sGUaA;bt97_yHlCyAzlL&W#E}Y=~4Fu zvuw%vjKq3+<@@u$f4C0^n^k3X8z0#)hx?X8?d0ZzXNc;VhIV};aHsEaoHEyu){Fe* zyFQuk5DOWfb_<@80RNI`_F7&*yq>Y>x&=u$U3wSPNU z;}Ig82m6cTC2fhXsl;DocX6d@FkAf`z2oep{W8v(9(c>c4XX0btbJJ5DGYNxb|Rb^G0AfG1}&Lkp10=a;R=Zfwp{l)P9Eu_L<+xzDbvNuN-g!WqwoFr3}E zA0Kqp<-cSfIny;%UM%L^f6o()?wShIgAuT|If-U3UCDYuf5zotIXKQ-I_k43MUs~nPu=mB?Oyv0&x&f8QYAc_9 zAH}}9aP&N}4RK{Jf!X#_y^Vbgy=V=X+D0NT?PVV}_vRL4G*lh{o`V5i5T(_mxm@Jb{F0E z_s^i~K<3n{0%Tr?U>P#oOeX(x6|Qe$5nfE6+KVe#U}PqA?}OV=+X1;sLZ{~ zV&FQRnTC0os{IRNoZMtC@45!5{ERU_(8-TZ{Fhsiy{`nDTB}IJ3NO(iyX&~nLBtu+R0d)@vsNmIlk@HOw&V&=bX(r;+@3B&xJiu<7I@1a!9%W3B(Y z%lgWne>J4l*#o$In=ITq19cPLL+y#9SiNPYXCD3V%B|&@%N}~rx59E^KBf%$gH89` z#E`7v`8^j|+mETZ#%xq-&@bO@FFM<|k>Q5k@-dG)@jL8g#==*)@n;(*6^FBr?k z1IUmbl)=7>s^*r-FTCK%knO{kNQR$1u7@vhdHtW=vhe{wA_H4;$Wf zPqmW$L51{saBrbIeQIx7$@NeV>B4`1WrVZrIp2)n(OKLL=Wg5Da;Qz<-W}*+AiI0} z+*W3(JwxLYzVk1NK*_J;sLa-pqnUKrwFJw%FXpoP;!QZ+j>qZO(TK>b#s3yFpV5zf zL5+d3Wr~sHj;>*jDH``LBw%^y9aw*2KBHTRRN4EB{#0#p-(%=Qc9(Co4Sx-Jfr(G; z#33wHJo4xfGEtSD7t82n`!*8z?IcxxOL3nk=^ZTV=Ilf8gZ>Lg?pUbrMY$z zKlbZNHQC*g{lR=sFc-UnbY`|CM~IEVi;Ro--!c=?%MFq*>jI_MSa#G*KL_3NC_Kzh z#LQ9mutqQHDv5?cETz19mvi`3U)t&_UmNK8x@I$y{F-RR!{b<3lQzu!O}yu zoh0Se<6GZ26oT{5oXa@3%v3VT?s{1U@;$;p4z6Ukvr`m2B9gFl%>!gzWCrgrIlGIW zybRJ3oAHOSdT2Oyblrj4Q%z`ybC8hp%;qm*r|E5F`H@|MWn_0i|C2adGuW6NiIdBcF)i;QW*xAWytg6Jkn1aREy)RU4x$IyUCXp| z*!_MB{mG6}`k0xa>z;D`U~Bo?tQhxe`Obel4`W~dLfC9K$>#IRZLq6oEN($kMI0_j=;NCOOnR>$=B_?#}Bp=tugcn z>G5YiAqofAR->iCK-vxsl=rpt_-UC+QQRfeKZ-$ESsctNui@&vb}}`dj@O<6vM^9j z!pl!#qf;a@=A=O3>SJ8oZX->Wd_S`Al`Tpd()6JMo@93k@fld#{sSG$PSTDX#M{?X z4kRnd;*=r`C%f~t*@gYa3gT<+E+@(E)+W#o@B0l$rex!Q88bxw@%l4NrNZy8Xcf;{g=NLLPww2ZQ{bVB9UC+tvKnuN#CBBL1_JJAqx##iEOG62- z2$X=OK{7?tL@HwHup>AI4a?#YboB*t?G&p_!>@)#@pZ zuCXP{WkxR6M+QGvli=?A;X`&er8<+|;!kk>!82Zso`o%uIZ|?_`Uy(I_sUB0nszQav#b|RHc8jw;W#*Dx>FJ-YNFo8QK@QS@PMjYi9q3+VgaNM0B)!(hShl&Y}|Nw|QlBig20{^F&u`U%W3L>zwB=Axy0c1ggWFk8;jlCAQ^w0ZvR&FKkqn?hTl;bZkdSQs`RP#x0Fj{cl9s) zr1EE5xfFT~|9iqa&1&wZPk)IoLH05_neN~@-m-Rg8!=NZ!+Ns2sf!)qzcLQem$(^TK;=cF@thUna8^0a; znXlof?I05lkSAYfCq#sb+@D*Di{G~5Wqb}^OAFq2a+NVWE4)^-Bh&gfY8&#PbYUyf zzwE)V^wzTZtfyRkLWkc{N6Gbl2aEOTSoeosa^3wgX-7yg|WZ?#tD4b{Xs?t!FDs zkH^LEBD)*@A{TSl|AgufSBd8t|GLUm4&3{J9UF7;knApcZ3#YADogVeFFAaH^Y}am zvH04AKjU_wr7!P;frsH@sU=4(*d;-BXZ6HN-j+VVrmQ4%XcvV`{ka1kY9O}^0>zN* z&LM{HBiAnDPk+v7+Q*^ul6v&}U?x^mf+Z&+KvWy_2hIk-x8=c}_94G&b^*CfiN>KHv1kov)_+2!(!Kv%9_)q}+1ul_MOEudvX&SADT-7+1=vZ z{?c-kog2<4@OgCvzSgGV;q|9jS8prVN6{oTZoHXa2J+d>_TURL^o)nyX2M z8-2^{S_-qU6PqE=;ANi%r>V?`L>}k9J^S}!{pFE*h`cef5ch$%5TKEOeK(@%nycmB zy0PpC3KAc(yPunlFJC>_;qGL z)LtOElzn$(ce#hSe>_%I7PaPXbqIH>SF_vs#uxljcah6vcgJ?P%g|>E^5pw2tl3Xj zci3($JFh5fj(CWc5}jlxokZvQN2var3GetYY4v^Th!Lq@|T=oUuz)!^E^NJYqt&1ohW+J^}nB__f6rKMWO5#I0 zCaURSSdfSVH|}E4X-mmlNDsmxKXHjCn~pq+%8GDIdA1EBg+9SBd-+awH_Xmkwt;DId@%iXVc9*HL2QObK$;JtuqWhjZ9bX(} zR?>SUbjaY(llv;^2hiqk4O!Za`3zm|ZjZK+|203tXvY+c=p6}loW`yVdh*XAdI-qw zbX?lWH-~HV5XE7>Q!G|HUB<}argHsBkbDcI`+l(jdvDGnDgIqp)f8gh4 zSLw@h&_39WF4te^9gqj}`mHz-R*Xj)%Cb_)OVn?NN*`xO*)j4hM(U^IJ=vYg&_mcV zS5tgyeWmro5V^XDdv`Src;Lrb#JNcHwXQ;NyuR#d2#_2fKEFK7#K+_+MjYq)elHed zysyIhZ!_r{!0aK}oyrV-d7@T@3FVP^jAUH3ehAZ4YneoLH{pP9F4S7O#99IG>Bkw&)vbu1#jIV%FT{6plOKG3ja%y~FBE>T6NN75 zw|ssXm2#e>BBQd{X?iSFRE9aoOUovV`pCY3+2PREJ%acxT2kHLPu3RFO%hHYUp5`o zy^;_*APRR?s}Y&OGrl`LLS%R2yi6rV`!Zf=#KJi>4&!kR4g=ds@_+2RBfGO4r6+Hl z*moDid%RByBKthX`!zO_OLnK^?JJ*pXh_#l2k_!t82;Bg10CMKM>hp0cCK(2w1=le z-B*(2nj$10=Un$fKE^lxfzM|*$>fYJ@05#NKlTkxG1<`44TE#xUOed2Mr_)9%guCh z{{Pua+_RV1aeEt_tHNP-{U|$3IM1k~V|8ALD5P7;vj6Vl)U-r2-;YAn*K^ohU?>d* zfzl%)Ncwp4yQN7!{!a|1j*G{TK{sIC(_Ai{XC9X9&a{`V%dOp+ zek-|{OP?CqouZq!zL;LjfZ}!j-tQmp7&;kVfQooPVMB=j8JJX;9a+^nv|)O zl zW|(MYVfb8TPo9^PRjA2`i9RxWD09~5ZRPjsr}%Ut6|c$e-poINJ?C{~&oO^l_<`Nd zhs|YXFLr<&jfbj147+t|p1EKe=d9-2PVP^A#e$(O z(w;1#s0YvZWeTD>e;2a&Y|b$)LWkj;XS8_8(4YM87c*no@&OBPW+02~?oVt5e&wjk znRh;7<-%Rl7xZTLZ-m;(6sRghVp8u@$iAy9rCtH#xxupYbUW$be;s>2#bL$~_KQ8c zi17za#P@KJ3|$;3gW4I&-}dLQB!=v+ArZ%N?_rJwcQUrJ%V8_~*wx#L=HnyCP6&tA z!0o6udJU&f_M%+M9`~1Cl6*ylJ7T5i^=m6Wbk0Gm=I@xT&n`GJpjQcQl2G&;Ra*I2 zLJxUU${s9P)>{6}@RUo>IcM7CC{0J-p><(8RNjVRLYIRWaY941m;1`8@4Q#Ewvk6R zk1+jwGWsu%#5k8T2z#x^nPz~bPhhu9jhVc#t4D)x98_k-Vs6uAd^un$hc$!6fb33J z$3Q}Z&%#0ma~Olj?)uH%ftdr}z_HpvTFCCY z_w$nMy~+~#R|&R}-OWnmT{q|_mR)p}e4g>H4_sx}^q<&LmW%OZcfYMl;FPK?H^oc% z;v+w9Ina;)23I!hz?P73{Q7YS1MX-_;BQ|UOm-J#ZY}2DA3*0$5>B>6;&9uu^mOP; zpElfE&<~cab*7SKas`7{#Nx4S9J+s{N2uIP97l6EEiXVeE9%Rz!DrC2p5FX&z6*SO z$gU91!O8A6ZSa*5w=`tK^n*>u*XyW#I%SHDD*X}{_QzE8ph{AxDY8+^7C{M}m#*p24UNn(py-QH2 zp<|*k4&H69BdWNaEX}0{mF(`~M_swN;1m?e?n-yF1H`fsf!A%Mvjg3TKYYaOn7XX5 zsDL`z-PU6nFs=E3rb$jx*%B(cYW&-pt0a#?im-<4E_U!P>>IBj`N8h;n%}o8liBn1 z_$vk(WMcu@T|l=oOup4dCM@!n@3HI_^Pmq@`z73m&<{^`H)q{3B&cbN$xnKOhJ;9H z7fZfJ+(8F}M07BsJM#5;oYOLr(HD7NBD*uFHJ0DHb$DYFg9*ENuWEM_lXJ~w582&@ zYJX9Bq$4${ClEk(_pC4#Bl|x^*9u#CJ}^|)|LY@3nQBt6u^&2%!m#oq=NW3v$Z%)w zg6!`8KlCm1SCn%n3!%w(ey3vv*!y2AseJA(zYW?;x1G+CocWo%HCfm}cIVf)4~1XI z?o^p^o)an`2ivhn;~7qNPs0_myVe7bBgaxl>dEf5=<`lD$U^od-9lx@1oSNB{&eF7 zj508mQnI@oogf)p%{~xg=9AY&qs^)WOb)mWyHpFQ^$3vzgZ!my9rqKfD-q)vfyrfQ z`0&5ysEN0e)ns=A?|aLeb*d6ReIGo@?lQf0BEI(*EdSsv!^rM-l)KB{?%b^&RDcq) zyJ9aoCbAW!F3v;Rklo$A%iZ~ZKA|`%6A#Jmc5m5_NBZirEX9Xz_E7%*Y-K>f6YS_r zj}Y12X#10>_tBLZFa71@CGKyHHJ6MvH+avY$8>NE&OfZfa1-v`k=+@S-6@ghnHCn|_ZcO*9KxOqvb!C+ zPTbG?0LRH0=y;m<_yGFUyfkEpy{}{&^S&5oBbQe^=ICF$3<3-kTZoor$4oSxR0(vhWo>xK%Tb`mhCgm=&rts=dYPjD~ZLq z*H_?fVkV!-?owX_h)E&4ka|_2wmuRCR?K*8Ye2gR){;ebH}9seEZVHezQsdWLUy;R zWe4z7?;H^DtVU-F?nkOilD^VQ8qToydcK3K+|`8B#yikw1n0&%hfzOCOLlts$??6+LZ7yhdCm{; zbXpSrctl~zxwH5=&Oiov2FlgfL83UvRMri-jAeoRTr=r_`d*Lg`t9T@*mpITzT?)zY$#}kLA~c*1Ud2?H1d{1 zWOoC$+e`S|SNQO28x%|E6PR`st5?z^RKm>WGP3%cmeNZ39-eJT#PP%2H>o&>uo;Fj zJT6c&lY=B}wu!7Ca1lMX#-NR7Jog!IAWYL-;>qr!+6J(rNmnL>oy6n&>;Tc={_)2~ z{BpGwyY`_Hebz^Y4pOK0pM7U!chy5O5s>o{YfGItcjWBzyoYE%R+N5)yJ13h=X94d z>pBH_6y+{iWOpmuxrjxZuXvcU6Y7Cs82P9S#@*=a-Q&$?BYUJy+lgDq3rucG!yU4_ za_>qkiPe^WT>T}C?C#HV3px7qHoOWGP&ObMT?=cld%lr${1_Uu2PiN25x^n!O$h|w^PR2N9PoCLIs`4|KH>TnS+1&$$6G+e1ku6#Dfs);I`)tk~ z;+v?v8ISu-(NM~%Mfat~qS!x3?mZ5a%1K5NI-&-tJ)$wJE&;B%gEP$*a*^!rqn*Ea zPu7-*r^k@4N_J+l4Q9h%;NdennMii$l1q<}k*X}aP=*Y$yT^6hiE;P}>-Ka^aL&|z zfxC=IR}jT>yD*dNu6N~bjMP;U!>1myM}hmv(N1!B<3}v8$;6bMVOZ)~fsSPaUQS-{r=6@Xzm5bgc7VjiV0!jNbY5X1 zyU6Z(lijTtZ798`oP*)MC`>Czgw42n?0K=29_vD6&MrTB+pn$sZE+Lm3~xf^%FO$)8 zbR-=2pN8@|J(;zheL1s&B~GQC{N7oQ;y!VBD6#0Z>oPX1pkv~DkR)3KO6@=cnXrr< zLyl268JUE@bq^4FnOS7AyED#yq7bMhCSwlc%@AghyYE2h*(MA~aF7}2+0(R;d9$0G z>t>apJK0^z^jvm(vIC^TRaWyH%u{!hUFAQKuAB#r+nlkTD8|Mi>_T$$k_fW9h0V;n zsWYEZvjgs4;rJGF2y^#q%7O>%u&56ai}u!ZyEHJr#=GvFNc>2tg8B_+k>3W064~8J zbu(F+bOma0v8enUi$!Hu@olM@q>$YmFAtEqF8Wd!dj@$sBQa$~GCT?&;?F^A`TaIT zzU27I26IjMkaZAemAOkZBOU6Bba_l+FA3RQ$Tc#9iq=wMT#Q3xcgCmkFzDScRL^mf z3_ib}40IK<`7QWgX%1RG(~;D#6cr0q`L?Cxj(BQUwB zC5!*@lSHz+Ed#A&$cX#6s*{9S6X}&)UyTco45X6mZXDU2fu^arMDuz4Fb4Pc#bN)+ zYuK=Wb6v8#svdNs__Nb=?X6x3dSjM3>f(!q*7Ok{Uyt+-qLwgN|vb7q~G zf$8!AFMOS(BiWso}4X5%jV$x(dcnUnYYhBcG2@!O1i z?D1aM#kY}yf4n7>>~4H-dzqH<5`l@^Fy~%4lnsu-=4o3Q`oK@VPY98`iI%b;@Gic3 zC!)v4DD+r(9v-g^#s2{3x=Vs&r@D!>kF3M$pcr&o5s$mq*}pT>T-x2`jOqqG40gIQ z^X3V>D~rIE*{P^+{S=-5XDc)Ngvy5)ADQq}P5$n(AGQB+@B4fvBG-Jvrk_rdM0R&` ztA}_mQe>yyZvNc1!acG8W$jyu@)hn@liej>ah7gPpOJ346Q(`cg|ucL^7oS6efE~s zWOw)Ga?g3vb6nWOoj8fWmxfBrX<-(51U)%qcj5Cbq|W3vCXHmyI4l}7XIy~cM~ZXNUKlEs3O@3)T2=Hj_Tft(@-5|^@H|du<|$`sB)f}!;4Z@l^WI)ifJlC}g5QOR z8LKGUS9wSk*`2v2GkCi{;pL-D^jjSU*zSkTB{jLe(MR@@-JOhO*3bJX^8c5L4uugY zE<1tyTFiad`O9juyI!lAMgDUG#$oZubdAA~DRt~CG?rszcLytZR{SxPo@wWqyN|+k zt3>?LxQllKEG1+t-}#@DZ?&|Qy+Oxtr;)puYq!B^_DjqQp>vAtZsu%n2`^|PgA~gU zN_JP@D;pZEz9DFXi#YLbu8SA%@!krOAiFSt>`rT05ggVkiKD%z93#7{>Fgw3W;2Vt zCj)7RnLVkiz{1z+5~}Ab*KI?^y3|I7wrWJ}x)hAFU`{Q9jtMJ0F_Hk;mdgD8#&%-z z`Wm!P$HA^X2K7@fLI0bHTqV0p97BJ`7X#T|P>s=ZqM-375fL8up$98*OyPZSrk{Ll z)RJ}!j$qT4a5_V_<5M=fEOi_tk?ihAy_a~0sff*-QpA(p`6uPzewP*mb#|2rJS#q} zvwu6$MB}|6s`uHzntcNtSD$~3q>;iMzIS$hPQxnw2a-PwTnbwfQ(6iQ(3`;+GQOWz@V=M96^8oKNlHhC} z1;@c>(a_#NA`O`b`V%BGqfF)DW#&!#ah}m34!4)H+xe53%$w-%E+LP#pa-x{SI+%B2?esdi5F9_pr8?pnr&n`*@GFjSWaB1Wfwy<3aYu=ka-hc zkIhB>bFdT^`it2S9hv5E0wH8~$1bL#{Pt6*T(=eT(cJay%{%FGHPOnZPmSzuey1!{ zq%`AC4`*pdb~mb*hxAZkK6gqXF7U29$)pg41+64OkGqCsclMi|WsL4;Y|hQXkkMhV z?o$pOOEuZR>>P-IU(9h>Ptp;`IPM>l-E}pzkPUrrp++qM z7jDuc;Zh6Rzm26saF9fOr^|JdkwjgoLDA@FEKE(nsw21Iblrj-Od)d3k8CqvTlNpF zM4Dj){<)b3O|2KuD&xKh+1<{A+&>4Ljl>{_O(0Fa{CJgX}ZW`vb%`w?lSrb zy?0-CA)NF0M-jW}Fjti9qaN~3vAy`6a1zHWAGvFviBz(?e?RVrMSFFLJ>ny5(v+kQ zwldC=yE|6g;UT+QJcaX&#kx|i%q}FdyBk*Kq8C6%b;o$LTNQ%^P8YFcgbANnLGmM& zUAt=x#rr<*x@V)XabY6n+_{SnXDp@dBJN}yWFLEMTe%i?6jo$+Jzs1?P?uL&8E!9r zIh^m=dP{zXHga^@Ui@wwhWBT)F~a;iDr;S2JK5dA*KU%t;Sbzm^D&O>4r+Vws7Xn( zCwfZLJHC5Yy-qM842ejk!Du~?L>W<@{rjRvb(o#W|C!a z6(*IOSKp6CliyXm9A+kUWOwr}28j1GeMwTO!vFR~VtuD%B-ztRltym_+1>IA-Ut7o zGgI>rqO{5GY}3*Dz+3$7=P3I3`Of~*Qxbo+mK(i`@$$-6e4LYq-r2u!^)EO1%IBBK z8dv%Hvjr#b<)9PU-7wQq1bC{*%$;6RMRu1k)Ir*`Yr>58+o3%t9F_)0uqsST@&_=B zOm??qE3?R%>=x{q1h;|AFlbiOOJE?Idj!hjhe6CFGmETwneV#H;HAak+h5mkYfw8e zT^B4%CI(2?5qeT+e;TU-+4JO=LSM{dtp1-38BeI_d-;lTcMTal`~Vh`-9`1yfJ5_p z6tr@Z`JdRS-JRXeca`KvHRrlycae4Z82tPXrhg%0;EZk5Np?FQ`iA1DYz!j1dsnm< z_5Iq2O9%SC$nFx?*h|IZmwad3h8nUv+&YS;thS54f}6 zat?p*X5U?5pp=G_Z@HVu1=Wj)8y|zSr6S+_D7c&OgoLRf7%hQ7WSVVTWU`;0U{64|2)=8?AnN!Q~5UazA(%hF> zWS$kS0R<>EY$d<*+{K0Lu0+8_N|%1Y(Vjc8jO@;`=RU;GR+YLd%ny*=z0a|eR(qaf zP<0ye9oYerU5UH{+A_Q+-NC*gqF!Vn5#w*8C?bLTfYB&?SA*MSMlxNE{U5&EU2vu= zPNNpPjz(j+M*{ZDz6G8}GO;Z^<79V!{yH*p(Q$kk8UZ`sG^mz5!`{E_Bxq(Rck6uQ zho+j`Rw_p{+1=KISv+q(<5irqXmTbkpLrilA&0zOfJUD2l{tk-2vU@&As(`i?9Q%( zvt$J}Nu+1=5FPV)Q92aLU*0iE5P3nx^d zDNkK0-uuWEXWna`vG1;TBL*K&K|R@BR=-nNb6-~$d2_D0J6JxPY{z^4b+Yt0Bpi*w z=vVB!J8U9VWOsRsxKn6q$lmL7SQE|O&qs6|7Tkk|m6h}h<32svU8-tZ&R~x~jqL98 zpzX*r<6XDeUP8(4{9cgdT%up*Vku|FTM^tX2OqzFM}-03`N@EaL=aaYsx;$!>E#Qbp3Y+`VD=9gmVt!QpeBM*Gq~@ zl;s{uFo*2!L~<@Bwf~93m;OJydvVWIDklGgPiZb9$?l46OJKQ8S(-X=Cxh%R@}`6Q zS@nippPUzmhNJbLLkPX6DeeAnm!>L2zM5IfsLv0u?2e?&q{kN)N#`l6u9I}_R6 z#S5m~rMd#6<*{(JkHei7dW0(2?>9PFzUH#iRY6}g2AqK<+1<-qD$p}`FOL1cXj!_~NSc*MLzeTt)~J!AKCk*BnYZY^F1=!fS%y(tz1NlKxu+1IMu*;>6#i@*aq5PR#F5=)xA=(L5p{{*SAnz=-kFYP zp!LNM=s(3t9+2I=YvU<5=O{_x7a-ZL~|0cP}k_TU5q?e5& zWOoWZ%P{;-8=1bCjGgRmvAexo)p&`)gScDuG8_>bk8!_QTi%h~)eH`il)o$`t>g}l z>m}kZ<7kY3cOLQDM$+mMJ$N(eE2uG+O>OIN$vOsAMe*2Xbrah8^prIP%aODGV&A}= zTG9zDNsd5aQ7T*qKSj*}Tk#$gDqH9HNN&2CyjR_i;)QgBG-o10s~IgG&a#W_u6(?Q zbnL4ra=Z|#eCN+QUVy^&t)%INyS(H%`8Ly83b%ho$4gmg?jHutXZx_@n<{h6KJt4u zJ9h@ziTh1DGrOi?Q$Kdn4LOdD);h9tvA-CS-DwWA5cT+57}y~JWA;X~@9zR^jf`c0 zJ2?Z{-IgjNNzlIl<^Q6wa&-dqLT=-H8Z#Z9A@X#fzf8QKEnUx4;(-Tyqs!B9e#3M0 zO0<(>WOoDZc}vUxROR55eYh)Oxa_kN>-&E}-ADS4$?oRtbC)r$t;B3V0U7mHwD+ZB zBA3szcn@h(X)kN;IElrWPl!#(ME8whhzZ}1D~9SaH`PbHy0Vjdfvv2`rDLK~Dl!j6 zAlB(5O8s=j`xR$3WOrjno6GMNH{kSdJW7VJC*w&S%*;$=^~NCicW}$(tbAoo^SX`_5QYU^c#B+YQv#^hFRppFX26x9wD;3uid?+@5DCp z`}|&5lij&5%|>?2H&pI(5lh}Htd%&cQB{zpXZcu7c2{0hgfC~649rHfE#)59#r8vSaq&;48w6lub4JgG*vb)`9a_D-;mB_~i2wR( z%4f2>31oNfyRAjB??di!u*ay79`q+?@NtN~JS4mOr+u(InQkV3jl7ENSFupr8;cR| zuHdPunXGkVf8_H3>AXu{Lc3St`PE4Dv`I!lMgvMGT1yMrUDyqJB{yoyrX`2)&L$iy zf9}9H_qR|q;a&G;sC?G;lG(QOVH6aj>DpFwGs{CC<6pSr;wCeBj}K3BmAhMhAl)+; zf05m3A1c8DRTU{-u=pLTuASpRbxNNus_sH&)cOAyjDO$4L+fT-m-36Sok}~@T z7&4U|Af8dEyKoj4|1l6HFZx=@?z+!1m6idQ(K#R%hO^^vPD4YqNRrBk=zd>O$cR!#C=yB` zBwPK?_woA!(5#9hShp5&Ne`G!7_|4UjVVs~5C*Fs}zEP@%{v*VI=H-L&sjuWk8im!w z?s6LKP~(*dUM(pDIbwImI!?ja2STWtVvDK7?yPjEmqGLglxG&gB4T$|Z_a`2WGR$C z>xh&0Qn%POQ=H}23rT%+4k{#2CK(6*X^QA-m8iNM&E%D*CZ_q!#0OE+<$?v-W9|EcObF35oTjGU1Z%nXW_#Mpc zqK^Bm|LdfyBcCsML<(K8{nu#B9n?eHTTfuY<23mGF&TE0+yl8tP1+q&|01!wuPZdL zdF~xZo|H`eCh72e+%tL~(HTF9wDrX9rmv(9?AHPqluUpHI=L_y&TVcA?5^&gF21QG)v{a~tW8US_b2azXtWmcPI}<6UG7-sua56mG=lay>fk+{L4K4M zz}^~=w~e};OI%QDx&opO`AbV_H)(eiVp98{yN-Gv&yY?{-Vx^vN#K2lDkvd#7p`3l zHo?E3=Z_UGq2DJrMttkrFsxZldqZM(zCeCgS21*tx5vL9NSAY+x`zMz48sG*!0l}u zD0W-|w>-*-@pndVVs{NjZ>~3hBDlSWEfSU!0ART=K2FqVT>J=lLH$XEBvAZlCIsA5vdQ^$s2?rm80r3HN zx5fg6DaX#J)gIqn5JM^TvyegTE~0^UGK2p>Y@-e4(EXJ+(;BTqeuLZDVsQO_2;%%I z;mlSE9BOpHYGQXyhUU2M9(8<|9)&r(<3V=zRS;^H$GIygV}jV-2VX-xGPeUJDrUm6 zRY?%G{}vqWrtZOclxcF;1LwYY=24J&KD`???xNAZ=vsHI{D>^-3gpkQ10cpN^hagn15H=CJTf6H#!W)+wk;n63j8qfGWl3kUGl%i;3Nt5xX1u zNxh%00&pdESGtR|JDc7^pO+c79HBn&4UTxEgxJ7H6@1aBb8LDM{1N*J`}bL)EB(wV z*wY+uFGRiAr(hMayAwOkfc`E~oNR87xy0_)j-y`kbzfk1*>RA&8VBQ=tLaTIgEp#^ zIY#U*{j4z_{P&vpR~FbCkZ&RRI@lYlU^}rpJz{qg_ENV(5AAiY9fp~0lu@|;0X+Pr zg@HFc@ZBPMUw)&G`;)g|O>h!q31z}0rw%xah8UMcXMB(g%Dk3G*^Sg$c_1FHA2|xG zCHJk2*l;thltp21~StLE<^Rmx zfw=5MaNkH;WJgsrD5re}vAYk4bg=WuLy!D64Y{G~dsV!2 z^#)kJp^lB-On5#2CG4~|!bJ}}sT;ryTIYa^ zh}|tLGRFl2A3$YM9%%T+!+h^6@Yh%#cNx0iA!2s}uV{B(+6e*2GC^IR@^$CYZk?xw z<22my_b_#(CurmRre;W;PJR=jUmIgu;nFu9v|K|SAjIw#JXaz8NIhr~yZiIx2u#`8 z1>cp8QCq|dhgUdbScNQ_|GESX@8jU|i(|0K;4|Pw3(O>TcQJ#y2PTW5&4O|WB6e3i zr3A)57>3;QHdsM-v%Zuy@>74ocl%TdKKJ$ErH8LDIcELUCn(n)T{1;SxEWt z#O~DoT?HvG1>APr1ve7AQyDTqm3=Q@T}=i&I7NHiag7k~qmE;v+>w=eU|xb2sy5yy z%|!}CjU0yDq(^Y~qb_z4yUWmXL%+MEFZx9t4j&Rgr#TD8mUqMOkTKpOb|*yaP9$9h zhkdHy`N=p)kj*C_*jI4fMZKTI?*4ox51Kj6NbO~yNc%ybv!|f!5M@lPw#Az?Q}y^z zr@AlgGu{@$W@2}$XV1YaVbaLvIO6*R;waxuaSgv0T5V{K7fyh?4FU-AQ$!zKSH!8F z_*vWln*yIf-3yv^y_2b*>kj<&(?IUazBk4`oL0P@7{GRGG{;n8p4 zKgAMViQPH+kWNjEGBa15rd<9*)E`E@pJl>Wr)r1I#O|8*Sz@p}b$}EVz*%BOmX|PcaUwA16gZ{uca{%TCIeG#O{7clh0LEAD{U=g=JFd@cJp` z1KZw%-uasNjM!cOPj_6qR|8G%-+_XFWa!OF2V2$-_a5t`goh^z6T5Rip@=E7IQXEJ z0B-F$@K&i8;?JAnCt`Q`m*}13FNMv+=U^eRy9c`pp|9gR6vOgY`|MyB64C3$eR4ZWMYoeT9~wd=N>GgM~vEL4hZO2Wp)Vh~3GLF~MY= zH*iWn8!kUepuA?N_3jU``~MW8Ez=1 zj9OzyJm)2eKLV@ZDzQ7~+eI+d=oiS|x55%)cRTxRuxclDjU`bB2(h~tvgOeJNffWG zqRcB|cT(RdYbWV5M2QYX~I zqJt{fN$k#y*xe~}UA$$`1|bo&_q3wC9s6c@7@&>ch}~tnkw!jT4Y&8-gnAE}w-#iA zk8mfboHxW~Vt3C+yI{+9d3@Gz1>A_;tr5+GC;Sg^ex*4+CwAAk-vPySCGg6_3UDQM zcTwvEM6db-2jZ-8Hl2ef-E6SZ`yU*3JP9GKr1eTU3(bn+G&fMVEGEh9W_FYZcj|P z=!_LhsYhD624oe9-I*PO{wtrL&eZ~e*qukOJ!OxHp{>tZxK8ZOZ_`P@l7FD|pACMc z`>QqD8pVG7rrzOV*iP(DNv{%Y93?Qa$N>)%yEB<%j-xa`K;);RP(WME7&|ddw2OOG|3Hq~=AW`uaI2NhlXdicc^O7>uZAgnOMfs-Eq_fUF45<^_ zAbyT6<`BE9UO^oo^Hp%2aXpl}(LTd13ryy`2ASQ)=%Ysa40~tPoFI!!=U#&0TeR2p zJq`*#KZCsJC`>1ImpqX^Te~POzi|du6TAEQpagvSN8tDm$~dJRn_Qh0e!2J)LK2D~ zli1zGiYjQFDT&TJM_fhhPA`VM)t&F5{ZTFi-;9Ta&uigfkplJ+yVD?c$8v~k?|2S^ zK=R$aOoI8tjbKtveRpNW86?6_Rd<@ak^ss^0UGf)KTsl!1 zhbLZxmvyAiaLk5PyWYTxK+>rZyGt*mowcP5j-0#*^2F|vcjm*eJMA)?Mq!uu|9al- z(BiTPu9;c}x^z~Y@Swe}rZ763vPDf|cZ)|X@!IC^5I>1>Y>3@`nR*`fhf3l7hmJTZ ziuSrirf5~#12H#pV8N&a*mRPELp6$MImHzNoT*pfv_8IC)DENL(_v&=GH4CbnRh`0 zrDe!RL+tLCg(l`p-h=e3$#B!2v}~K6!jiH2*rwo#6JuO)wzCr2hmoh5*xhHB9OyXr z4qW~-#VBHT-71uWCo7FY!WY1U*xiLI1=O)P1mn^z(S&x=D&I)MkV(F~C#CR~&iIKX zryrxV6c0?TnH!0ybM)^j>?mDaW&|v%%_+66*<25M9@8dm4tI@=3#O} z@YN_8&WL3|UcxglaHfn2Ur&4@=8CKI6;Y>105=Ax!xwX5Zci_S8<=4Xu{#yMBes2z z#K@KBAcEMP|kJGq#; z0)N4y34W2C?hQCrJaFN)Z^RaSR6D@|t1=MLu?9S*U<$GNmfS)Ih!3eRt z`BN@~bAv3t+u@8F#O~CEjd6}n7iiu*0-{lr6XDPRUIQv9d7L^umygDRn>xtpwSuqX zVTcS#h5dug5PVgea%x8783lI?l2gMJr<*WcED2`crfkZVPV(~_A)nY?yrc^*`Xq;C zx>x9?jHe#hJaAn25k&9Mj7sdza2mbc%f->!y8?jNokq?HSb%>iNBIBQ-O1ZA4b%!5<`DGn+Ts#`R ziQPRJR>5o28b}|P2r8G4z@p!;pk>eqJ-?GqY&XsEZL*lO@-mnbyL*y;3{=wxU`&n$ zIuW}QDzit$L@~LZM0mbl$*xi2@DnU~q zf&X(a4$Xoqgo;qAzP$G%4N$d@&!}A674`jgH{YfA% z+(;e1>WHC~r$+3q;*J)k$v%KnwM1-T?{`t8cRLgP;)>T{erK7TW)zMPySpzzI_nLhc)|4y z_!GO^vfvb)TqT4`V{GvO{k}P^w8H(*en5v}5r`Yd!M&;HVEPM54B6(0JBi)>wl>4> zlJ8;L%v|v5ql~|U0$3uihz8DE z;Jn*m<57Jqv-dl0?L@cYzVwUIegaFAlXeBe-gWEx=-2s{(2a& z{W07MPJ@=YDR8}~2|9GOP<=n$%a!i9rdAzgb#KF_-X!?BAOp?>JcnNiw9g=Rr%LSZ z^?3!%AF72rcPTgPMlM)z@8Q%YGu%$>ZeXG#E?6#!9=EFCxhmzbM$-I{{u5*`TA?+~ z74L;@afyTw^52v|IY^-F`;ctFTzT_;pXrBlz;`MN4k_s+bL_JUYM`QmAU6c=OgRdtKL+D5foa%c3 z#`@ZnaZBAT#O^ZBQSYbCEim>^f@z+a&=%bRlXn>60b+L_7r0>JQQGS|U4=v8@u1v! z6#TwZF7hOE>>_rz`HTbJ97|ecmr9sO?C$5yV#s;&8|1>R@jlIi88c|sZTttz_MU_& zVt1#U&ccuPV)(Vw9*c?Hv9T7|_xuyIxgCR=|FOG)%Mju#hn?llC`IfptJDa==M{XI zc?6uw$a~P!0JrCCz~FvFHQO^Mw#TqUiZj0D!jIA9F1 zyZ52A7s>wsovL{d>`$KivMX?6tvs%Fa>3zB(oobH;-FC{)CE#joI?_rKcJkCm1=mA z*xiLb4?Gm4jbYQ9LBTZ@R)!pg*+Ol=)6~T$#O|z!-HBzXpzHE_P$PDCPb~{x-Rpwa zQ;kts!3*7II-`4#EE)(d!CGQ>2BVI{fsoJO)oX#tpGbQWVo&=uQIs|*hXus$7Vank z-m(#Jcx8jX==~+$ZiT1EQRd$Z>UJh}mpY>gjIAYco0cO^A$GSc+YFo6_JP&!T!2c_ zFoe{?RK5Z>R=VJ;?VdQb%>dK?J%^568F0Ba2}}fyFe*eH%Zc5EXOfpSNDKG*+=oB= zQ-C?9!9^9yt5wv)IAV9A#O}5qP{uENuF=1r0KXNo!S8oBDB7FgRbqE18k}&;3>o|_ zUk$s6-ThsV4{~R|0`ENap%J^=ddm*=J4Mj?L>Y|GtgHTtcGfLII3d**cM`jMtYw8T z@*N`53!#kI-S2njVAw|rl`7~AB6eqAM{j7J$u%Z=`9m zL`B+HoYtpayINt&Q#uV-Xx6Ax3b#=zx;a(i7&HZ`nCk%N!C%$k`fvf(VR-`?#mZF+*khu2HMg49lo?r6DO0|RH>f&CMcVRCaiBusn;azFKH#`MH#Ca$<{xgt(^DF7d0 zcaD0waBpcZ<;j|2C$T$|B1c@SB8AUxoC8r}cYY5GA;*q%YCJ0>D2k6Z+G1di5Ng(+ zfaoL(#O~S?Mqzi%R~WCK4_Fij(;cfJe~%3IJ#fN>E?y`jYJw3> zl%L(11qpAdU#0UJq+C@-bqzNZB6g?zR~ILgKY}iqG)PWQfuPg(p&>>KTZr8a?xg#9 z0qH20HiAJ_5^Oz_0f$$=fTcYKsJe%I<0oCvc&Y+gYu3W=Q}N*HK(kFoANXH4!++>-DTT%iWh~0e}TMqf|VmLd2 zv_v21?Rd=scjIUH_VE~061$UlbqP$4(>^l58GFR&?>98YnfYCy^zI0JCw6!KYCTln zRly!&cl*}Uc_6Bb&AF|hHRUi&AD;?KE1Kb7j5bdFP5Dp6?#yS~6%ug}OH6DHG=kJeW=Xs>_Fp^mV6KdoTUOD6HhqNE`Fv7}hFM$nZlFls=jvc=N=i5~=ZGbY5^+uzwqz*1k zZh@5KRG9oT6|!p{!kQ=@JVESk+Ih;_8LNtF@eR9Vpx7M;++9Q2`3B~AqOl+3P924QVt2aps4J^Q9$i+t z;DcOG^qXOb9ey2fSw0iy1d=}E&@IZ+RYU#x?&wAA?ue8&8XkNAKAkDBr}{8_Xl#S` zTgi7v>~8!dH+=1=f_G0}2UTKs)!|tX*zp>+7El)wu{%>;XRQ4vgL?0);SopK_t}&q zH2DjZyN$w~#O|8y?Xf9U6iZUhfCjOag>Y{E1YNX6GWC3K@zdM z?>$u@dqfg1t#rg2*@;9p{QBE;@?c1dFr?;?m0ySvduUfcbH03)ODF0s2q2ka0w ziD11(8QiB?chuohIElizveg!k61)3&-xAln83NQPgd4=}cze%7!bM_tKS(3Cm9$*J zrnq8J57_Lz^PQ!{c2fE^FVt2eyecYnl4twUOLqtL{Oy6`DP7G<_9b$Kp zx*qsdL=*oVx(gxcw9lB94pQHr!kZQPsAWh#)@7~;a!UB@9|!LzlFoW-4pc)A6t6Hv zxh>QwFzkq$4N|x^`#ek`b~h+p2(q^*GqZvEo9JtIdqL-*uP|OOE`@CRY;~e#uzQ6F z_Q%>`DzUpjY05<|8l*ned{B>#15wM1Algbf%LgcH$Cp?|k_rB?d<$kV*}%#YK%=G( za+K-K|A79_U(y*LsfY7^K8865(xB5e1*QZy!Ixi}_?g&UkiZ>X|ElATwA=9LPZG44 zWWXBL=ioZW0Ogl@;*57LsPsdD?neP6y(Z4TCl@+5zlT6?GnC8n!lmmSvG4@-e*UeZ zy)vEAzD3Y4^%EZLx59DsGxyY%=6G8n>McJ75yb8whJH5ph|)Y|k5RwKOFDKGZVLPY zV@qkDLF~?{wHo{eWKdtt8QqB8NtPRvKkqfH=4ZhOvAg)g*FoBpbZTSWFfWVz$Gdg$ zRd*Y#t2+!ak5b@8$OB0IqlG_+-FXnZ`~6i7y9;kY_=Y4f6UhW2w+@&(j(R_d-JM_O zf+nxz@y~{1RLNKwr!^7o3MPS%=+aCSjP_|>Z1%9y@0Hql6-3im*zwa{m$;jdC zqt58{)f2mK8R46zSMd4J5m=X#2s^_`i|nk5?ZocVW{gJngE}-p!w|4Pm3;Fp z5d53`YQ*lkzfn&{5BWDP-+*!5iNM}xLUF)L(6BYa%f#*m-#KIYEjip%UISx^-DzFU zgTc2S;p;nd>?U?M#mxZ?*NCI{v$JrT*xibO6EHdXFWers#w0r9jjvmiFYXTKV~*j-)cF<4;o8BSJP;8kLGIqCK| zXQCL!%`Jzi#O`FLmw-p>Fg&_Itd;KORtalNO#B6vw#AT4>~5P@6>x9JZz4wd@a4oR znn;6rt`CBEM`3L-<&6r}!oU~>oSjdscfTiY9yGv!T`%C(r3|=JO1q?qjj(8{I({T} zXHD$xMI7xj8t#L0UJA4crGeMsM-Vlji%To1mrU0Ux7;Ru(f4Z*+@AogEm@RT)(wY# z7^9jKWq&+zLY2cZs5+?{LW$iK$>+m~?C$A+9g3NW;)%yJ>(b0rTX_mj z#0%l;HRP?Pnd-=7D;$~n13F(9LLITYuF7-JDk_D^xsKFNMgHb*rkD`b3%{%=GgBl1 z`fmwf#%x8@)pNzxDKyiG8KCZpXE5n`I^5zVLzl-LxHX%)K8fAkp5uWE2Q{&|;T}x> zkqq1Jqyg{L6WH@s4~K}|sSvwcd`Sr<4eQ`q6!|?KX2X)Mx1jXY1WyyYd*kDT%eRt; zBJTp6-4F-06AB_onjK!DS$NA{ zOWY|v1TF;yAhS3Q9{XK@Z^qJ?Y43#diQS26n_}VQcW~bz2WG8HfP_p2U2BxEE{*oO z(w_K5SsxuIK7~Pvbn?U{!%W9}P_a-GBRoCOhuB@-ZVfzt=MH4}C&QAX>G14&JA^%@ zdx6-UgEQ@{ixe@$mjfVnC;dDJ1Zuq?bI}y-cT-kQwIe1ikiwBa=RlI!o!H(&c-r+H zzAIRvD7`_fb8K<#DeN_R57QA7@x!Kw!$_>r^23&ifW2`CHj_E)H0n-9Un?#BMS z2*V(QF#_^o@~HpI(*)}@-ax-@Htc*%-O?MbL1Lsbj+JynBVufCHpbs`AnPG7;<>MMTqF}TnWkXj%1hKm0?);LhG6&O&`a!2tmG231jyolKF%0T>~8;j zV>El;1+$H_;3=`YN0aNpHBN=R{FIL#P2Or#>PXXV1B309BV>~bYR=7&yjUCez4O3s zSMp?RQbWVuo6zc>1fu?#ASO=TLl+Fu=`3}Xd$?fFR(Wi?b_I%;5Tg;#gPJ`bz%0-l zmtWkOBH&{F^hBL(OZW~mBv9kmoDt5r7#O^9)Q7544 z2RQiQD0qZY=eN#Pn2{upYi7CN;!;l>x8D#iWOcx=3Dhw-I|<&Z+yapkYS>HcZZ5Gq zQ)|k45^IJzQmLSucNlD@v;oZ1MZG=rJzegG$#YeRIoHDzSIWY6&w`KhUqkjD>VqM6 z=VIqfI&WFrFzXV;5xXn%I}YFfe1>qbQCLIlE^LB5o_HdP4cDntjM&}khb1s(U<980 zq~2=UvDI zuWI6=)gH)iqdUw%11X>p`VEt*>mmbYrjSm>g*+KENuNRNj<2hTe~t@4pV-~8ak*gM z-wPiN&F~7bJB3*4dFqqIGt16_9kIKyBZXif_yK_nt#A#o1f>bKxF%c(F9e-}N5t+{ z8=nD-CDh3{Y=?t?NO!Y|_PPVqh15cfqKxvRa;o9(X&F5F)d`)6-L-Wak4VsC3bhY z*aAJO2H;E4F^C?ZUC(sNc)Tf#Em6+MetP2G5hF~}=z@(6N1&6~-DsBvQ2C;Q^~CO! ziQV-z=+Mr)6~rB=4=ywnlzudWr9c~(DU8OS^2A(a)X>}hCae@qg1$SMkoBk&cFG%J z2eG>n2^S3eAcw_TSKusAR$O=q{tJ7{PC)Mq(lA(7fZTC$RQt~XFA%${Y%|BvTR+14*gUWr6Avol zSExTs4!eaYL!H>&ZA~M5_OTPjKgxudfAntGx(U~YRWV4KGSrnv#O@k)Ib-TWSxj4Y z8BTtUgN{tnh#VP!qFf8~CwBL?)E>3s#jv-k914lunf*8k_6LSx>>(Sp|ISArVah`4 z@D{vpLZm&54^uW#BDd(m3TD{8nN7>>=B_+1g-QH%xcXZt?ro<1*{qwKuTLoB6oZ(`qH-?Y zwhNL%H={z@O6syrfoz{Y93I)u&Z#b9YfIO1F6SvH^WzSDtUd>CJhy@CFEm)x&KQ;v zIF&u>crEDfHNl%Ed^A5l77Oq+y^Ia@U3pZlE+JAYj;?vPlCI-E#)DV`A=1M!}ve_$lFcWbpX093z|IRPQ3-#MD zIG1vV!!kK`e-#@Q8qV%-e$LgdI0t(dt;f`(o&Mjm=f6U`9M=K!jb`%ar`yqbUhF^Rg_D?gGsb@}R zU+-~($tx_daXTL?DtMILtHN`h>BQVe_-t;33sY*T_YOWuc`n6#>W87uq&p`CM=B<= z;ILRW>Wv<|E~W%GYkbkjFPgG<$!lygmCNv7!1C5avX_=(%&v78<+3bAk$qdyF8>o$ zsUPQFJYLNN1Dja2S1Y%%{R#}31>=Rp4LD!yGCXFZz8=iN@g88MstT3hrtcFx_1Gf4?+=)t$)^ zOrsozX_xsZoJYD0n939hW@H=jLqb7cA}ta*|b08^ImQRRsl+P%9YxPH)wZR?3) z=2Ntp-xp)3Uos0H?ux=MqvbKOcM<2+;LqO9k6^;%h1k-=v>UfvfqBkburc)=oI6v( z9lE!cjV#&7%0A!W%*NNl*5@JkMG%Ay^j5XeddZ3Qg|o?et60UMG){T~&8yS46V95r`OG*pj*61&0<6XA1s)C4cgb>^ZYFSubj0g_ z`1ofkB5`4wIGWn>x%Yuf+3i1DS!mg3?);}*=$f+{ZRUibB|L`Gs>|G0{stx~ z9L(0+Ug6$$w}Ogd82Y_iOo@A0GYb@on>D(AL#qDzK{wpq6IZS7X zoD&+qRTPMQ;<2k?d{(4m!5&mz<7w`kjJa~L*x{ywNvn0ar|10G;RjJ{w}S$kvT6Ys zyjh4Q$0E?LQW*a(ILKw@EN2;&TUbXyFSj|a7$*C!#f3LF;+Wq}FrHxdN?r(4bzjfo z&sT9dx-VcLJ{+%guA=P346uVA+|n1@SjXPQEF(9ZyEjJ)bGPn5;nQ>R+-G+vPFG`Y zS}{zfX&S5Z_$BBkKT36XERKmAkJS-;Ug&2VCi1f|ZY5*0HnW0#uecGda?p1V!YLM^*m&XwEQz|!-RTKs^A@gS8*EFt zl5uZf{?KN+w^!gn#YB)a8{x{uBiIf50Je8%IrpEL9Of;G!V|i)FU|F=iMtk&g>fkhj#F=SUd#o&?r3>3N*jgPO?(GSPL`nW&km?eU4j-fw&CT> zL6~tRn>+s?kga+d##SwO%ALx-2q3!w4L1klj&TC;ZEWEr`J33_+BK}^NItiE-UqOm zwiUl;EyJ)md*R{^QKqRI$!u>eU~73Zx#;(bxYaHiH%8Aydn;AS)YoN((_`62$4RVF zr%G_`>?k~an2)Llym7H>hj&K2GkZ&Cd@RqIt!w)2Ejo&aQxf(<*3Jx$B-r^Hq?N$&?= z|3dD;{WYw>e-pc{+050+a(oTeCM&#TX>vRkt(Y6M`PN(={W2x3?my2+0I+B%x3C;EN&=4 zP^e~uYwq%~GSn0AuGjLuU+=~a4DwmtkUhKoY$DInZY&z}__%4GDJrHv6_{jCVbMop zSWAy43%X(nxBO@0RQo8ryM!`Tj;-YUP6x1KecRddga5di$4OxQa|N1|Z^o0c-7vNP zG`ITdI+j@%%3^Qb;<7?-!rkallxSOrBh$`+w8Lv|*XGUa&CHc-sZTQJPcz-QtJ^VH zb`e6vYB05xWnI!yjMp%m)f~0qhW#`#Fg=F44yIyPU?*?vkO|vl&u89q$Fg6CW(jTw z+vCvzK88?k%#!2Mwc#H}vx0|wcK)~x;~O5}y*B-i_L8wUS;Ppvv;GR?7f)v{-=f)v zF>0(V-HWo%=A!L{9jFp7iO0up;hO(0X5nlbdv|Pz)82OkrYQv?!*J{)A9(V{^PFGC zdbUa}gxRcR+`en~VL)pWuIycd_O(T@dvYJQvv~_kuw2e`lJ|4w3L@zAI09WxFU0JI zIWX><0@Ln}Vl%D$nD`}auJS7}GpksI zALEo=5FR|L9V_L{T=V!$%-fRHPb%YmlbM7;E9kR@>!IodB`!^2COhsC%}n%eBg;jx^!;uwowtnb*twPQ3O;hPMv<3RV-4-HHetGcD{Opwm0Q0!nAy(T zz+!$>b1y`nKubXwj_wOYFT-4Lobi==p}mcH3@l-4nNghmXK}ppA`-Ja=Hri%anM_- z!t_r?v$HWX*q;VTZt#Es4wc5@%taGW#xavO?x_{)|Q==)9_&1JO6rl8;wTkE_Tokmp)D4V7JEu+LT<+Z0{7^l@`p>n=Luze=$W^&27Cd@+{G zZ^wHlet=~_I_G|96-&7l&X$LCaQh}y!GyjbjNB7~HzKZ6KF>WavuY!QV{2LChZCI1 z+#Z+_xdn%+mtzI(_3kPNGYpGhb0+(<%Bc&vi)qvqSRRE^hO;niiUAyz&|#B@VwnBV zWVT?>ErE1`Io=Q9nR#c2>@FSUN`+PP&ZW5O{R~hTKM-$`m#q|MlpmJQF&55D=M{6QWTT>*M z_S+JLtN1u=@ffUeKk40j!-XBE&n8{$$U?fccwxSP<(v5U>x?;u+T0YlJe$le{*GZf zRys^@OdoV5W?|&TD6F_7kM|1}aFLGw>||5~({vSPJ3;4huJjVAG!=T&ZKj;0Y6}&$ac&cvlzQJ zY=X=tDb^RUgI)5U%ih(va`&axvBE9}m())qJ@7YPx3w`F{2a^n*^XyxB4Y(Yi)^v# zB_F+kb`JSgV+ww{v+eKsY}`CMW;=TYufca5GHpKXlBm;mbf3V!WGa(e7Q+O4HCXB{ z2iToE2cJLLft~%tb7uu{U5Sg>v4HLDR@`q+&h#*p-dKrG%r@iM-7n$&sI%PSupqX< zD3pmDZQ%awxdY2}H=;my9cAs5!qorXa&1Oi*xQ{eSl;h=t`0}Q(JBIGb^9Z~ekq(e zD#uPNjbh_&&tfvqO*rNAS~&ed4EFr>L7B_Vyql$F%*c<=E;u6dR(2P>qU?!hzxcRX znYyO(x7VK0^J0y4e3tUliUmJO=Vjmo^gkPm&q@vOinb)zyJ`kgtcYgQ+fQ+}nFO@bYmWDp!Uf&VB;-PF8c46E?8n;9%D1 zR?BtXe+XWcn^4|r4K9w%hl_JRa-LhZvWv5qF&o|y<@ML&y4sB!!ZZ?mo?dziy#iRRwyqwDk9(yz`KNoTy|V<+-_ zNC<8_@mTsEKI0`>u;yQvd8dpf<9_>CR8-eRXE|-oQpu0q>x*J)T8b=DcMjO6EyOvm zB5;9=2(mHzxihNEnSRF>=9}Kf&0b#wHILR{@2E`}r}O}%H5sQS7Q(Vlu4h~8&T~(x zXYqzpIQEzZVlH{@H0KO)AJ1=NIVww7VBZ#Q19gZikJ*8Hhv$;oa}1oC9;0pCIv-A+CB9i*5KHWkDR|74vPFNDH6&%6hWw4QjO-R_>VmnU8^_pJ{yJE4b`2 zmc4SK&#qz0mdbbVu6>z;Gjd|Eb+aa}`fANpm(6B#RHE1lKUwzdK_Dm$1yHxxcHEux z7ea=TxT-NLS^Lh-Y+v+iuK0HuC|qBMQ3;_qn!5>Kw%_9ZR)(_WP3zc2?=lXZx}od% zX51{d63eWT;STQ~_xR;@R$mvu?oC?7C3(u?fO!#xU;u6n3(| zUEmx@9$rsAo}Drl8ey6PMsX);3fv{07BE zN4Vr!flO98oTbiw##zN&fJ?dSv9BW-zb)e6Y2yQqo4Scbb**7uWrbYuWXfgvy@fK? zmmzN24;pbItm8)nTegJu8Dv!3C#;Ak|3=}`M>CNhqe&gWy3EHWmW}P2#Ht=&6x2SW z9)&}E)SbY?t=dAo9R|*~f7pXL%)bPWR>dD;6**^GJ66xG4KOvIqM2EW^DEwxVd^2dJEw&*{%w z!&(wI(cADL=klc%5?2PJzupGi(038kq@QwLEn!TtJCIe$<#07q2VwEJZRl6B1l1Qt z!Tt3TtnW`G%kQ1X=G;L}gd@hxqVepg87LVi4!u7OSY}x)Ya0HKnV(M;bdyKd_y!;S z9jG@hbg6gNLN~VhC!g&ObYLr&y7FjD#WOy9Ojt~L4$oQzavyw{()Ac-xKfMB1ew8* z$ZWi`Dhk(7C;YeR%Q&1D z#+}=o`urOZq7jPylj#k%z8wB|yyAQ(Y-W=mu4MOCq;k8x{=nWX+wqV8BHVp72n1tg z*!zAu<5$jM)4$nqS3)&#c}NT%44R7PG<$f@_$F+dCZFZYjbp8niv-Il+fV8PA4_CC zaK!DvRja+D+2$8~wn5334c(09nTL+YOFgl8;05&(z4{?|b8Z@|FoRdb+y950rr0~*U7}poNnBD2w#vUB{!BvK2fa2X%D98@S%%B&Al3hdUc zDAs=0kIhoj<5sBZ;6%w-G|!z(*@yMK`d{YkWhkHhkO9_sL`CrV1L+Rw%|EH#1-Cvp zSNkD<4Es|?Ka*!IS$<+6PuzbZ`h~}0_Dp?b@8!9`AYUf5HkzdfE3+%=KG3DO0PXfg zqJJLc%GK}UexF~;g37kC_LKpxN97pIXkCrj`eC@=xeY!&ui<<{({?OnVXb^FCR7|9jz?m3+yGSReYxS+lvi(q}R?QO{(`_K5ol$#DiVNg2J;v zUpt@apgFsG`X*1&)d%^#F(_ZHjaQ!-a`O~tG3DGSR)O;DVw*oCNBd*b_6WRqNeG*E z$8mN&%h{YoTiEHnJzQAXNzl2y77da%;zrW?Z=7_U6DbX0ylX*h;ekp{#k3PXa^d(b zU=`YAr-9Y{pPZQGcDBi7G3y!=%DGOK#(kMP&{}#f4(@P;##nXMK0b!222W#Kd_D_i zdKlxYf3cWye;nyLxAGR>uw_oK_-xcO54J+Tsm5~&<$=BBV>IRO2KcTQ$d!*{2juBz z^13m5cd3sjJu($%PK`l3B@OJp?8NOVp2NOg*}-;%OEdS~8z@V65ysBnj#0_K;G26I zH&1F6EBq18-miYi<+@jZ--yeeiQtQFL;0{;jyOBNbT+3z-mvBWB-a(Q17R>Qp zf%{31nWrbjR!@&$AD;O$m(;~vubMp8MMh!nwpsWt*977%wAnyc49hO^VZv}vAXaUL zdjk0Qs2MOz)sDAQ!;vY|?lZQ`l}&q)=pDkRK7%WK%>6%-&N8gZ_37e-l7fh!q)MZN zw1Be4?(WWGcemI%x`9` z%I4xRFtM<5dq*W9b(M0)zlqXhm^SkBddcY@=acQK&W$H3^Ho1>J! zov2)$oT|znP=&Kg7K(elF;d{TmbyZ9l(LaamC89olsk92t9slsk{exDNKi>{DL!HZ z%`;ZYo*AnYt*f0BSG&E1Yn_~BUq-N8JPaQQBCqQYL3uD6@K)mttoGBGErb+g(V^BGisXC6G zr_6aiL0Ne&PIbJ`H+beQlE;@vihBM$Y1g{$}E^f2DjG z-c6Rwco0;5+fKRFk@wR(QJzOiVbL>pnO+kt7q~kxGTF1B+3^5n`L$pr@p230vw2id z*U%0UU&xw&LsNNsMnm;ir(PP?G0*>t(e0Z@JV8vl#bgI z-m4nhFH~Ob7^CzUzCl%9S5vnAULswLhDyUxeNeWfzVgC!g`#t^Wl(=*R@dc9`3-$#c<5NnnlM~4Di_lW{4cuP3RgAC z9jD}!&R6WhD^xq*?c*%j6z&(#l3%AU;pmtPD$gFXl%@5iD&9R0sR{z0L2d4Q85cQT z;(A44^x)sBc8QCX?e#|}$svKRNp6=#*<#VHTP^3R~OoJtRmy1|ETU(fQwY?HVb~kNC zGev*spP*m5J*8G>C1;>bWbjIVmBI6Y%E7VAl(~Pkl`_wH_?t6Ql1_7CF#H>I*2JlX z-=3f(PoJl3oLHs`pK}byMo*KZE;D8Igd*-s6szLi&Q|K!O;&ysW~#;vdJDVa1>!Vq ztfX(-gt7#6rP0YH%9_JNmHH0`sk*H*l>Jwh%akR332Y@@p<>Jh5K zZj&9Q`;uT8yFW;56B-1)9O$XEWgN_!=dC=tey+{61DsPPyR&NV%$emqg~`V|DdBTg zDQ_NIDTek&n62t9S9-6I315w5LHF*ehocyO|68h9*w#@NKVL&V*3oi)?LzU~T7?|@ zRMn7&6O{%Z<|ut#OH__;PC>(YhU^HLF7Y)vXjxOL%3m;7(R7-qY_Q#->OQI(>5Ugj zrPpXl+_e(6E9xpgR7(}DHiMO+jXJ7&wln4|*b2!j?j?FJwV`3%M5)@oN?CubqmnZ- zsnGW}_kYOl65=@r?!UFoimg7%7T#Z09v;fvCoO_{=0P;b?zHG9ZsKsY@L+gXWx=bJ z%FXuX$_67_^cmkzhOJ)C-EadLsy$LQ?$R*j*T5x8i`Ht&w)7C(av3M^SRe=1y+V5a z9@UE*lNHYqvlZv(S5!rP&thEpOvybpRo*W-f{Bh#RBJ2eDd}btl(1zns(bx@VtlW~ zQvP;?Y}22C?18$<&Fp2$@Z15)>keM3*e|By)`_{UNe>xu^LIYU*kk~0ilCHL&bAbWcUC6?^Y^#YUv zQ~SaXs3qC&!R92JS-B{gRdG(==*zpeN%e}jILENprQ$-EX6sYt= zky%4+v;Otu=n*d&{4#*Mwq#9hpJIQ{cziw>jt?KsVbQRLQtz^_Tng})!U+}<-kTZ| zucC3`crzGZ_t~SDW+ncA}1uV?0*we}dVQn~Cn@0J&S@C6|Kh zN$|`(jA};hdyN!i`d6Wom7~Ppqn38Pma@^Ru8d0OP6qex2HVib#I6A~PT`1??oyX}T7-Xm=6~QgV@*HQxXE;c8LM49HPe#~q&g*8K!s6)$;_U4$ z!o9nxg-vCNc{!5C#Ual#0;P8^pvxo^G57S7ZEssi{YmCBuG39qdPbvm_ExOZzJpyB zmQuFVUk)wvm430ta%k9DOzB7eE3*X5zw-?0z3CNT$bFDrUYsq`7q6TgEWQ^4KbKSt zUib-h-aE>JszCWMjoSCWHRY<+A=q;7u0`-3*iHV2)aNep%9a|nrOoAHz)xJiw-b5> zAut=joeXzv3AON$`?~^VPMp0Mk9~*n-?n3URw(l47jS;wP*xuEmWGQ1ME_P3asF0@ zyQ{Y0m?{FV&t9UAs;Ovh^OH{7T1o6gGuhm(7>CwJVX=1%ewde{b3-dxbH-nm>QgIj zT?5wjr*Y;Xchig$@lpK+Y}wZv*d|a;o$-`y-Sos+=OlG6so63z4bQVb#(Njrd-oKw?$xfk2-($h4?RmO1U)w5A^8U6hm>POLAxK#*}Dv#l1PaSET zoJzxqjOn^rP@d}HzIa1CFcM8fx94BR~LLdaU3F#2*yH#+Dg@)VrK9fGuV}K;S%Mn0mI0P-G9_1C>|FMv zkaKEx`|n4{cXip<)=i2ztK6`^v*?}tilu$iv3n}D=E9HT^fcB#9XzGQDC)RAwUw^l zUgBR_BF-!h!w!QYSRXRtoRp6&`9VFkDl73TxQAx9sWDLqvTlbiaam9U>7>5{s&y4>9PswXEq9AUm7+Nczb7qV=E<)pf)0j7%kX(i_~aw388` z)Z$L(?z*9lv@SY^DQ+Q9N;9y+_d6!GZ7yRtBR*%QtIUm4+i5iyF88Y^3ab1p>+v(ByR(_PjWa zZb}15X-r+oY=1fS!&0<%--U-x44%YAVR}|EI31u+Y~d%t-AqJp`UM!-MZhdJ4nEQ4NG@$EP6hPn zb?}zq?gsLu&naZ}qnBUrWCWB}qJENtIA7r|%xr4r+^8!H;*KDgdv`DA?n00J-w4ca zAsZP7ch+^0^2|T*|Fa8u+`EgvdKlwI)RnO!D>)Aq zh4n=LxtAQe#aS|+X5u#c2{wO@!_abmx83CWkEst|?JM^!{iRIPf*K#UU=|*Yo4=x= z6y3(1RTh%noxUzvzA~pxLs_-{9Ga(wqh@tHI^{h@ccqz({KUPtbT26}sVD!o<>HKH zDEGlrklx@UY^FQP@`^yYd&6D+9j+tB28ZFoy}RVAyJ7XO7J2iiwZmEDi#=M%fN{Uj ze@`Z!aqljF=uw0(*OI2!7~}J}cWy~N(4zP7%H(-iPkoLN`P8s7km?EEqRYKIv+hl0 zVz9_sBh1VwzocXek`(`zk(8{DMl3=P@e z-~fuan`Rr&9kmYBQ@iUbi@3Yi;hM8-c~pa^rqplf7J@fhvN1PLM|%DAkVXI7yPIk! z#|FGcXmApY+J_;&QiU3IBk4E7N7mT{NYylJvDABj^Z~I*jEIE$;;Zoe*+}%wT1kKf zbte8b63ca0(PL{QcK43O*xLK>o@gzxkOE*DbwVGqCCJ)HyeGt!XO zYq)dG?|x9rSxUEm!Ndn?c+9=KOKvCdvQ$@2Zt;}xUV*YF)K-SryhPg4M07Y5hE+-t zhL|>x%Qt-_x!hmk7jgHox)hP?>4nrX3YdQ#i)$N;-8`Ph0zXOmVJZj1F5||}2oyVP z!*z=?EFRoMo{bKW)EC~;`HP{zr4a3&g`&fz?TB0c7Q@9})+Pi>|0y1FDqUOLzEgjd zdv_Q5@5D8uADFkLxeVu-o87`yW;=8D)0g`uJd< zU8$H*z}dP7C*k&mdQzrdvdxCNmHW7p@%cIaEl)sdvvBAoo`K?MOkE^jNsXrtL$Ian zd2|P-3%BCly(qj`a07<_%*66iEBZhC$Uqk7AguB%}{pI3%3$eR?8-D*72RBEf zdigCpsWs;~r@t)zLjU86hEkMu9t)o_4pzp&Y5Ws(Xvv+7n*rkV(u=w%^<>$eJh*vt zAEY1|`5!*u`dbGHVvKI;O)tOkb>-=u!|2DoJCm=w;2Qr2J=I+#lret0(n9`R`i&n; zcd_@g1r5WG;M}FU(r0>0xz0O6H`PIoR#d`sP%_T+3B~MAr;z7lAT4aED_IyI4v*+} zmr{=S#5j~SrDo=s3vlgVA|uTFq+cy}SvQ$W&Y7F2>`LwX_FLh#xdf#@E#&xQfBC$b zo~Cz=CGq-MM2`!{k4N#?8Tt%+8&NY;lUi_N=q0sJU)p@iLBBWj0C}E*<}E&9L7pR7 z0<{LVy32%ib);LLLonpt-NU|nU|seX-E!!~&AS9~^mv-?}gh)re^RalJYNl{p@8H1&Xck%GEr3}fU_oRid6r67$4?mv9gSTPm za5n)5ax0Mf%SMiJ@6PV7r}#uL#IE9Df`ir!mHNOULxRUoo+lvn*pSd#t0IjQ^`HQ6u)_EOXsiw)FkXQkRa6 z-6V{;@cA{)lBr&UCp*$nG?H3#gOB6BgRVq+c*^xj)KWFIlb`YmftE=)H#H0|cPPl1 zW+cP6`ACWmW0|3~JPNuG>pHP${wNaut*CQ9vN3mpTZ#I2KhX_tBoEGB!KxvV=$aaf zfGZDhs?1vY`v=Gt4<89jtuGE;Rd`_>hV1er%(i+1tz0{5tOrUw!Tui3QI7g{3={oB zFnx0d^4@;Kjt0%efi=jbcvo5QNKGO?>_t9n6ubBX7*wJmey81JDQlR#Q%rZ^V)m+Z8 zc6{*4MeZ;7hYg$d-~#7!avL7PnXj5+F_}9VA9?qFbd=|7KcT-@D$fM{@($+U;Ankm z*4<0))u(Rq02^w2P!rcWfxao>7}@(Q-h>;A`4V4x$@@!KGfVm1@DA24*osAN(Xb1+ z3B9q@9)C?AT5mr|oM9rfo?SqVPXwGs#v!;~Im&aI%BRBt;^#*V#CZmCGXE6TjtoT` z-DIo^t%S~G2bq1AK7#97imfSUD6SmAHtyZ+YPt&pXaC0Fh!!%Oaqyj|i`?_5#p>p} z@sfLY772%O(v)6EIxVH;{Xp@waTNQ`RT!$LouWoeN?s@h~ofnN!lAA2J4zio9M^5-!2Yc22hjQ z{33SmHIc5}={Zy0iaRalQdN6{wP7?0Q@3Jt-#gej)KZ$Pr{CQeUn%QrEa{EU!pl1x zE`bSHGx|A>Z?q9HrEZpsm-K?ZZ0wPPva>% z7TmiFy0-_-zW+m~-_%I5X)TTg^i<#f16dn(V!Uw(baLr!Fi2Z^wDpkV$$^r+#9rE$ zyoLA8?dX3g6nkG3VBJ1LS#Zl+9?uGp@@-AzOvUvOJx#Z8 z_TIrv?r_#xYeW<}PK?3I38mP;xft(j{u21nN6twD8P?`BVpL(+?n`gyuow6~#a4Q> z50t~He5Y;oq*uxbJlsS*w2f&v-Tn*q-*OT^E$#trcazq~G^C*Y0sKC`1;(-u@z(0x z9dhN)eQO!_+gZL${)Qitfiv8@n=v38V;1Pht-BubANTHRQs`Nc{2H1?Ntn`{8tO+> z@KWkaat|Ld@Z|fx%UbUBeSk+BW8tzalI-XTYV#UN6&ZYgk5;nNrm=LYy^6uP)J67= z#o*=lF{G2VxEs+MbD57Eb~Ta%lNDI^55s_fB#c(QLQWSu={lYJ`1L)-yRNP*_>m1a z?%nyGPKT_kLFWW#8OOc5EuWYVMse@%#y0a|s!kudxWKE7xmtG#{2cAvkv=UXzrx{KP%t!Q^T z3jh5mM$^;O=Psm<&|G?&8k@?}hL?~W8iD1n=}(sX2*D0b4sGz3-)iIvxAT!6 z9g2iuJ7Cx1J!b!6UoR_A)L$^KyGSj?vqxdYy}QA^Gm)qL3uWdlB#C=>rz2dXB;hY^ zsP92T?sr-5rnmFjI`o%!7d`IXy-TMD$i9zwnVN#dpF+^7e=dGp)|Y)Ny~LP%cjN!i zkKXVZnw+Me+sbfU4LFChUyY@8y02)D@t5G!7NTZcf(5Qy;W#%MPJeGArN~?s*QW+; zJwN(nneaEdfC%*nSQp39Tc;dHjHs%2mhWmEZ`tvtp2QiSLVOUt9u`qgZNdliE@rKM zox7l2TFR|Gb?Fmw1jip!_c(nQ+6kJLDhQ?}lmQU*wP{AK-J-v2Gy?8~j4efJ|)uy#>JqNAcqfH71_5lz2XOt-tni)SL7B zqj$h5Dio$y^U>{(frPE|miuAcvu#d)fzyxB?fo_cZizs1{Y$uHU@Bea_{mBY{qD}2 ziN5*`>H|jM=-aJmYkn7+IhImMjy=}bSJuC6AaUc*Kus$gIG%v#oMHR;$VP5D(rfmc zr_9dQld?V3jy)8D%8RKu__!J+)0`ON0%eM(yL_FfDW6>r!Y+?{;iLB=cB+~jhO6x2 z`_^uBbM8fcM@!wEsIZ|2$be&Tey%O6xJPt_?`w#oy-fV`2CuqpN6Sij(QYk-e#83G zS&d#u{Q|_WbrZRj_7K_jXW_YzMEZKYp-`X5jAl&5jJGQIXVn$HV? zV{IB>{T2UtQ%{X?&S-?24C$yL)+hGkC*xq|<$d&hQTMuEPwkvv}FBi$RYXWfmOnd|Q( z&My`RPed{wyNb5fjpeF)D_LpKN+zlr$-}x=ad8E6-F2}z-{k?urCZC-mI0F9%SY~= ztuOm>RIqdj!`l2LIK{q3$L)4v)ton%JOFR%(o#mXeA3~lw!{m?z<0-Lcr(iaI`U# z$D3NowZnc=d(u?OjV@!>%LuF*No|ygWhjfL#>7nerj>Y8kHJv-RuI_4mhbD5f)6VRh(=H?LYR!@G80Z4$0 zCu4m5v*^~78WW>98?nJ(N=8~r+hKQ5o3RyYn$c)oa|0u6&E>`;>LT~><4%f+l-XQF zx4{wk6BGxn&5zMIxT);l6Ck|@c}q#00X>oOacxd0o>lC?SM^G??%^QIna_3F*HT^t zYRT6YN0G*S@5%K{%&qu^#|!BN$9%A?n~Rhd)S~^$-Ke5wW=6nalz*%vZJxV}BiUVW z8%OcKNp{yV1?}pQ-Tlag$H01$c+^X3nU6lnY$nxhoKfN9*cy=sBXHoFluF z0Qwz`x8QEyZN$BdhStewB-Gx*m1gW?lig{N-BqSFlv@Au7<_{o6Hf7HclZf3r#6%S z$nHMg_L9dd>&c2~c_?j4orF3m2x-IJQfo&!OLn(iza>2d>dLW{!{|nKmu0sb=1#S^ z=k6lon8z=UrpCn5-x%Sr3&zazU!6UIo<{sFR=1RQ|EmM0IY|9&mC!a!hV_V0cppE7 zu`}tX;^QslM*`$#epAUaEr-t7IPCR`fct|Bu$W>ZSIF+P$nFOGXD$!g-NbB-GBe}>3DHlok^XL?UBnK^;|jN{aY zFC|NGO9hsGqW`v|{3W|9n@WB7pPJ%kaR}OEcU#x&f%$Z5OjNkYW7c0bcbm&S-=7G* zyA!=wpOp>Df|`f6#9DetI_tNevG(FS@*P%v-42Ump(t5YfIsUEWoS0*zC{5t^F|Yy z`?(B#S8l@*B?8?qUP8<~Q#nj_cO@y=$MfyvgTov6S0o{zK^P{rRpC`qeOc{J&rY(tOHZt&cgX`h%ZSChfsvS$e+8+_ z8p-S$KM5ea+cCDWsN@>T$|I4c7K^(+_pword`mk(2E_YFhuKDwm#H9ZdKhjrO2THp zSLkSLC-(pAH+XqUW(!@uyT_41b{85?Z_@8yv1paE3}U~+$%6WkC)Fj*c|TN)@xur0 z$NHb@Qqs;%PO{&T*VkDrkA1~}-ei94hpdY_j_et_vbv+EyckLS-Ev!5@cAYDA0}eu zk}&pFi%@&mNH*;95lynYFO^mjk$Vq$H)AmJb0h|zp!VnL#!|muE6IG~Czpmd68kP! zFr|GYvhKtprSC)h_+u?qT>|7rQy(cDUSD3`EktLsyF8oi7&h$<`c&CT0@+>TG!F@_ zrz6t~kHLcM?)&`=Ob_^u6Ycms*k=lw;VO$`)mT^WMGpH=j#m%B*OEFY@7SLryUW|` zB=+;jXd9(rBiWsP0Cl9c=*gwqyz|NKrvJ4OgEJLa^B@6l$nKuKJ`IP{4P=6uuY?|> z7VCHVh-KWxJ*^mMCq$w0crmosnMw3s>YT3fliB@EW%9sFa5)?S{j6=spYsR-l}+fO z#NK13x6JHlDDMplU`uxQ)@KL4r@ce|1$+5-kZeuYLsoszk_-0i`?3GG#Ws_@)t@j@ zrzafygMCi9NZE_OxHxzZ&XL^}cRhs7ZR*JGNOzgU{^FYgN4e_#3HM*8z~(jcmNz+g zm#Hrw$nM^0(htwVMrvZ8p}{lGAB+jdhV#@9yJaj*H~GrXDgL7W*Fx$=m(WvsD?WCL z#_znF@LER?5VE^QWOoOGnqF1 z2`WFu0Z+oQb>De-J)w6K*vwEZMJLIN~VQuhbGZ?&G~? zKmV_}gJ=}KN2iP(I7)Wcb96q^?isLN^OikH0kXU+@6k?=VWb;}mXjm!H0C0nydyst z>L=?jv#+aZE>SCQKzSI26{ohMsN@d*Ewhw~asKkBgRk^=F_wDiXOM0ij^f1$xb)^Z ztajT-G1*-|GcT$B&K&3INvtHh`48wsOB`pjd=?%Hc9y zdFgWkcY~Sh-bq9AtzXbzlktuGs^NM!88A&ln$|ynT=K27#C_P>NnNT+slh}3RdvN# zcHFPQv4$DcdkKLmEF1sj>qw7Z9^y@QH++hnl=XRy{FUtMlHDmERk){VBsL>`#F*@^ z^Hgh@t@Qw2{bR8|GLrHADr$cfVoM)!5) zNQkF&BD-rAVk;Z0SvM?Bgx29OWSuF3Z{r3MbIV5@$nM@Pu#(9iO5wJS`h6Zz$X#?D zt^YNa0stv`)xez;^vM%1T z9dm=J8`9REvju_DbFv4Sqqh9_H48U*rZfiagv#UxibAL(#53pLoO!$>H66Se<9R0a zHXMZ77EQU-++7;~43zTLPLe&e8ag{uakqeHy6hyZYV_n&BQNPhb~kRXjSQ)Jj-JaB zFxw^^-FKWplC!ZqBfD!#cDH<`rRY7lgINXCU3@@YMTydda}Fph+H%>sweeRy~L+FKwQo?ljt)~p}~FXI?3S}v*H|HwQ4AON2#O3 zSZB1(LZUC z?5_O_FS&ENo-E4DL#7w&R#h_6sy`s`y@O0+jMn#RDFI{aiq~y^X0p4P-*&-f`yYg9 zaE5^~eyFO2=$!wJor`zTbDA1Tkw z^iK()&wUnB-f7V{g)if<*N`jc4qHoRhvwob-_hz(2KJHN_0BtnBf2`0ltMjF=42~A z*~!1zZ!pWAnwfRN(C1bmehjHE2~DW4OLjN*pS9==eu#&qv8ZSpiDvDuz-eeBIYxF@ z!KcB2iNyZm7nO zD=V3cR(JB0u-Uqj6MGy{%vIfbq+{s8uTb@NmKf%;TROPO)IaJ{Z`gjEX0BUp&pEYh zb(v%4CX1L0&s^;+Cu+aqBxmrgm@7{ob{tonbS2EwQ#Lb~UT$J14sBjxws{g(P78x^ znu2vRjKnd{M_kD6YU){wPvCuIYs8}6WBMHhUW3Ny#?r5KD;e|6Pbgv}(~CH#HaHT6 zX|Z^F{Q<^2ww5BYyOS+_WPfsfQSYQeE3&)IPm|EC*&D3Svy)1)yKUKI0{wI({L3+z zklm$*@b~}l4OPa?#g8>ee4MK+dq{ow_j_@XHOleC1L%EMLq?q8doxo&U+*{bU+N6-4xm)uI; z+m_;%UkvWAp|`VtF=U*X+#-@&ag*-Q`wqN66rJxV_b0db9SkwDyp4FKu}0_G3-I@ zTh^}k``O6l$IsBsDgi_Lgd?@zS)7hErdI?#P08-=H?fonMt9&le=82SN8@0do47d6 zT)f}3k{e!ra$%Z@G%CLUEwZ~)qnJ|~m1A*UQ|ZT=dyp^n;pZ4|9{d!lS(CfzCu3|R zJ=~@^Ncvf7k*;kit_`*1>BS=$%bNbX4Yh;k|Hk&n7BY%)@PLPlRJhi{!D%-P*b}&r zbQp&k*A)%DmZEu|^BqkcrEU8vjL}YkF?$LPC*`5u@_Mr4mY2L?&tcH?W^z9J3H#*n z&?)9TvCnz5PN5Da+1*UCyMA^Sa#Q0r9_B=&pduPYoo-|G7zz*3(gy5d5QWEvb)+`sQr)Kxu#(4l`2dRbd=X*ccm5XQuMiw zYz{gMN3y%iOLs#*rxvuY$!bB=GGo;8rGes<+^q<56h(Yf0{&mU5oGo?*Ql zWT$o|a$oJhXR^CVcKOhbH4yEA-V#7|H+M}_SshNlm9}wsH828StS(|PcO8C^-DNy( zC2#G`W%4iPx^<`pn!Xi32He5m;g*t0c6VYleGU2;%V?9c7~>U=hpiLPYy5Mx+iWAz zWOx6y@RBiYs1?^W2d-pyrGry(OuHJXI!Ksygv&K_OhVtbi<<1P9XZ^u`%yR)wg zF!g|;^t|mYi^%SRVw*_d^fF|RCcCSMz$^XB7ImTotX^zejOFVqOfyP z44O_ZW$tJthsf^cSJ5XUsDao8oJMhB7>2b`fG8vB#C+7Q4ML?_5iBc`wVZ#zHU=>`FPM(irE`Aq+Uba|GrVv zI0L5aowggCjsA;ticG^(C~D9nbniCTpj^(ZKt)g z;GWae4Y3GZ9*Ns(S8*Y~5gADzK?&MttH)vGumW# z*IF3K>i-l>?iYrpt&`w)<`w+A*-0pS%I^$4rLCr}Y_G}2UG|*Eo=u0_rWzzCI?H1A zq(^>aJ{Up2yX*V#g)x3o)_%-yry-+;xrr}(+GBk=t7rEWUDVRCo$Sv0=W(7nUGbUX zDf#S)TNK%f;pCSXcrp>=V!{}6itzT1kxW11BL}Pe#q+3@Z1%o~iN|A*V-bb0-`B9a zsIj&=*nk->nTUS{+Lrq4H-wR!` z2B+o+$?`QtrS2}#WD)mg(6hFp8b7Y2B9N>idgn0j;;&U|R49R!&W&BN@z%_16-cyWvenbb2qno!8fq2D{zmIT_O0 zGZ-oPuAC3<8%)dYBrh|k}rGMViWLzP;bD#!G@ymJ= z#s9C(xd72@NQU^l9E)zo!JnV+nC%6ij)`pI=kCbQ?|jx=hHKu!XMV4k7OeT}m7rE- zAszX>`()5B=)18b;~e)M!jYwl$70=QXvE(uhQFHz+1=G^`m%FyF5JoPs&=FxYv)Il zr8>$@{_flO`+IDtBY*ZCLOVW}-hX#PJLWIGgt^ErKDR&4!oA z8%(8!?Cu@U-I23qvZc0|JTMB{JeTEGcX1-uQl{~|CiC3xeceEYjyVJGx|}aOk$}U# z6LJ+eFJ zZH4$`LVb9?=UIH;rw23<>*R;%qrVNK84qTvT|uN7HFX&$<}+R_J>6JZ)Lugy%P6R_ zV{q5=9;P0#Vm`z3%6N0>u8|~eq_;fd(3%s8=sfWy{!?rvk#TA-%TO(+A!DkaTaIBxz~($quXn62WvlS7zgdI?L$boy41jQ<7eV==GC1J>ZyGS!2zg7-uX?~Ap^RhZ8E zV{l;-)Dm9f+zvab!~3QCJr8NUSVy9VWTOr5pEGST;6DBv=KOS)^Sqx{9&(lC&D7;Y z`+a!N`|B?bV8{^-Ig#ilV|c$E;61mx>lf7W-s{Nw&midp)-~0Wj=UH1ct840x0UHz zU%=Tv5%YO}X0$pDE#9Z5|GQUtzy9Jqdvi)Dx=xP42;RRZYp&z1t(h#~y=+f*=W)VR zmK$8g%oh<*sh?tdmc0tzqbWwOLjN+`gSB;e~ZvX_A-WfK{4}# z^+UBe)0~Bi%o`@H-HD-@KOoE}hBB`(sCALbKmTy`;U0V=yUWizh+8)`CA_V>crh>O zr0XObbgOY~NGfX1^Zx3RgQczYc=wlpP-Bju^ zADhj*>^<|d+xGeJC%e1#atH3zt;C_84sw|JoZsG-QpNl((D^8)GT(E(nTh{i|AHO! zz$eTHlbIJj&#lFaWxHX*e6f3*!?;^rM+P#F{LOsQw6&uQyjF$VZYelRc9-!x7gmGo z$y(;2naoFf>})2k0Z%cdTRgH4hhy{CbFgRrx{`V9N#?V?#xlltxs3rYqS1-@uCw}W zth2Qccjm!;UeFsewV@pLJ&$?JlgBq_PvqDWBu=9sBJ*f%vb)dBtCvpBLtp0EbG7Jk z-?j?Y%)hJ1?$$FOk2k9;b$1+w4fFJRj=NFMr525tzyB|f_ljyEO&0!!o*nah=K0AN zj-b7G=~~OLiiX^;yc$EYx^vOR5#MLs-A9iLsZVL*F6k z^LFHs-MwF0fQpTV;>o(N1M9!*tOHL~mEk(;!h=N-ICSL_#xF3Hp{yIt$nF}9HsuKNPstXJQ+;oZyn^+YE< zdSRTz1J<=u#-<@R|1)xPoun7*-U`;g0i_!9D(L{Ovo5ZF%NYMaO>|i=2eEDrZP;8^ zef-9`_Y72#-L?6A4A=VVP;1RY^w#lApRyBU+c%i-ED5j3?m{}KaCt|48OVD53+r~< za%)Mw@c`4)V^K3G5+$lDn7X2o7_;8r&bt3D>;GP zd)6CV`DiDl>=QLi^^gQT9Z5dL-uV}n{Vu`d z5NDB(ZNvNpk8uA(6WP!HT#udJva!9PlXj`^Btw4 z3KfCK{UD1y|0xR}yBNpv~*V*6PZQ*xg z9JJANksmw$P%D2I8nEyC`Q~Bt8B_g9BKl%*&(o@td#Bt3nq_JN;oPFz87jM(Y z*+Pog&mPOZ_8I?%@^*H~&=qYBOX~x~609nDl_%HUywW4xS#D2NSq7?d> ze#F_*{f$Z0N zjy{T|!CKOqef(qW=ZBg(h<)CB3{BgCuI%@Z9G8zx4-DkUL~rRx9`J-*psF2rd$i+V zOJ2|>{vwW4n#cpPyU&+eNwT`R7%ZbF*MlhB$lr>Q_wFETg{4%G-L)pWTio1Ot|gzr zEpm!AOA`=N`5b%qFxMr=7~9xOG`{J{sK+OvPj+XVk&5SotKm=np-m3bw}rdBCl_hA z@gPo~(e}V`AML+FW8A{eZ#Xoj63!V)i8qk5jbe)+FxClf$fQXD@Bn zyv0BAncn0yhaC&C`jeqdsPLARo|)q`PF-JtYK3$B&Oa0WRY(**|85{T^JY1y?T>>C0wQ_ z+?^V%Ymk8^MG9vX(YYK?Y@)W-TvK30#{r`bN<=JezC}_^8jn7Sj%Jn-F5so za_Sg~f3*r`{I@GsB%wj?*VsMTPE5!R&ypYZEYOkq5!v`eu6Wou0~bnb(3`w*7(e^+ zd#6`Q@LOFR;IxhTZ&a!o5%6!b4r|+j+__ za?hmAwsPG3C7LWs#3lZ=ug(|2gq(B~dFdhk_CfQl zu8_N`^BI<0|ViRbgoY4YqXw^x%d9(!?(XL9GJgZL4mDQ|gJ zYksib9_S=j2Ug=-S}MBoOn)pt2|e=ZXFTg)$*b>>TZg@)56+SVYC(i!OX?XcXkjcF zd{@%=&Rkw@DYNe0fd$_sZ}RY}aS$ye~5%Xx1ibGKbYIyw3foj82x@EG}B zsDHqB)t~R|y88x_J0>6B$>GPQ?|^Uqd-NlpFXp?xpS*t7DlI7(c@(~k1>bgNB4orb z&T+CHW~>NV;39A8{Dswx-T2N};--5T)(7iI0Otd87;C&4bA~nk2(^VNsLMHml;~V& zaIWAdW7MNhjDx4i*;G$4YF#`k7|RX>pF?2)=hPVEmNM22U1K4&=WpX6=NGzhj-lSV zTNt3unGMc2=rdNx&3 zRo+34lP=(PXA?QYyUCAtlsfOK^!%F$?i`Jjj$1JvN7fsai7l-C2z1-T9^} z9(8VC(2aVSk-S@-c*id9q9RTDA44hc-tZYV3^KD>W2zlk%+Me2%_FHv7qQ-I!_ zSx%KlqR+)jocw1k131(8aJCt>kA5qkES%%aqe$J-&?R4CNF9@kLMIu>*~r?Q9XcgP zkk6SZ_lQUP#oRdh>Y9jHR#m;ahQ{Zf7x8N

}Sx{j~ojARUTT;n(s{-Uny zTUH5PQ0Jvjz1QQchj5|(tAw*;CG}v>PUuP>{fj7DWEK2lZsp z3pM0%@M*YlRvkVl5xp|L!-V>?!<=O&bEaKfuPD1mA4WcB-4*uKshv@j*cN>Aa~3w@ zOnmkCclbvpVl`*w-V;wF$5BJ}a)$1~S-OsTxS<`ZkZR0*GIerD&d2mcy7G-Pcot{z z0a`6(Z-;X9RoqX%KYbS{LGcv!esYGtKpozS^+wW|a~-oepNC4arO-vX@D1~iI>dx?U(t6;0`#d*tW?fIZf{K) zLET~@-$4Jlx03DZuknYv#yGx_uB<6wFRY%7q3+R>Z>U^DbLnSEt&C?V8u-SV+xZq0 zr<=%JzPgz#8f}ZO3F{KHr8L%aWn!@f*9#sQ2XCaqep;sa?>BliSkphi}X8 zCdY89PFY4#FRIJ8r!L>1y6eB7W7`9`%D3s0lYFC2(~^IDv%2x^+Ha_}G(M_8jA1xu}`MpE=KL2wzw=i$nL&4|vD7dTAkZ6t=ob2R$`u zbD13}eAD-_O@+_uKX}Qv{uG{r^Ie_fi^D%$w`DIfcLK}ejw7{&sp%Zr@z57SNwbM(qqQ3bacO|Oa znHY|0DjN0K*vMTEbUc^1|mh6u66V*l-XH=l`KM_0`XK zf2~~AM*6;~NA0Z??BXuxgkuKkz18F@cRV3EK2lA+_9f*HxcWL4ySWQm+4=&$gf){Y z?ug3M==-DI`|{pr$U->Exl0;obrpS&8b~^KO#0k4Ioq(C_2+GNiUwl}b>zWg%Ai4A zxi$6WKd3WrJxE{7^a|n69oD`skuaI{3b*%K%WLkqR&dv~)JIDad!0o#cVKVE9zd*C zEmWv$@AaMUBkJ4>Z!61~?4zjR4$b&!8k#gV;uLpl!?TzGCK zg2t)K1nTO$aYuKFI{UphYOtgv1`*uhJ$sjfA;+}Emb!dx?)c(DEM({GCvck-hM(L4 z_Sd0j9-f2L`9J=~S(mzhW$OP6<~_j65$0k~U%(Oi1DbT!llPtl)D`-{ znEruP`(Hzyeu4__CNI)ou+~CTwnUymv)%h(>6?H^X7kQ0vzJ9`oy3?s&VI+0B&o|0 zRB`v&q$l^f){0_DKf?v?M$gmVU^w+B!aF5lHg~56<4&O$b7#xAQ>|F*!>(@|sST^f z#GGhc*TBM9 zCjYzv7w&vNaQAz7TRFLLOEJ(R)8uaWJ^dXc|I0@v{T?4WM?>pM6*IqVVq|D|gcy{OG&8eh9yK#(zF_7z=tR$*%D(V$Pkm zDtFrkmfvC9kbuA3c^m#cjo)D!;zA$Ga_+?U=h2ff{S5+7N5P7ImqB|jqWFQXc+dyq z#hv=j8FV{Hjf={WH4X1Q^WADnd~UJfJH&Rm^UdF z6>faT{J18vQZ! zZaAV|KgUxtvk^t!Vx;UY#jl&n?xyFlg=|fS{xo%`{IrPs}CacQnwkI%- zZ0=HODhx;bML+uO{?~Um;eH!Y8}t`0NvSx%e_q!91ga0J%5?hme&6I=LI2*7nIECG zIu4`B23_>dBfGk(tW^=ml2(cP`}Y1<1R*^OS1~3{jrjF{O@K=_W9zv zmZXo!Mh2Pa_jukjX`j)ZKF2!#cVCyhvBP#>diUPha}o3ur-qW-t0QwfVh~ z)|O&&{wdtwhQX13&MLiP%+WKHM6%+sWOrW*Ohm2z7H-l9I*ESJwO02r^Sqh7<@fao z{h~MC=!x0HOPEC#edKH;)_Ye%mHyI2WOv`_Hw`+WDT+y1XrK?(poCtfH?`Qm&|Z@H zd$A?+R-UFTm+h(d&f4HG5xj+q8_7^{@WS!;XYKpjtQnl zQbl(6h(6uZ`Nop*y9Ach0NB#UyWaC54C(9rz;mXtz(Z>2_no!q$}&Ft(c+fE#}l(Xl*RVdHSaBQ-hKI=LT+D8A}uTs=dkXO3xd~--hcrMBlY5 z{nyq*9^rUOODX65&_I8-{zV;`keP?`^ldxlN8>?EHM-N!-HYt5A2lL}Qp(vfY9>P!FUw++=U}&b2JfS9~2k*DRrS=lq^BZ37 zPQYQ_f8S!MVX@YfPV}QM;{AAhZYzml2K>AZQShQ~{gF=r-at>Dk=@NFyBjpjT-wfj zfNJ{PH;v~$C`H+D)nq9>VYNc9sW5z__GYr~hDGSN&0!O%u5<6&$tS=yiTC0oIu{fjg0hY{9 zsNg(jPfgY@<|=Hp%YZxQy|tw&82!GUdgnG$$$4-}ANC-h{)b&_4x$t1#qT(dl5Z-q zfcXt~pK%|;JcotFWOq*S=+%tut|1%h%zc=5vb_x8JbE_KO6GXJ#8u`)gi>pE{}<1+ z$<4)!`4Me7&xTDll}9~Fp<5Y@9ofM+t5V9W2U9sqb{9!4Temo7Otim@0OnIDI7Q&f znHOlw+zJ!sS6JO`FKX+W%IX>CaGm)UOPa)EWY167Yt>rJ%6(*!hN~nPsmi#hqOt8ijACxa1fKC9nWN#f^bgKkrD6l;{1Mkqz?+>V#?0N=&3C|Q=5P$~XAia} zz10(aG2r}pv|rjxZc_W#lJAJPY)gUmb5t_dV<$C&&tF_Y{(J*z%iNFZYQFiWwvb|< zJ5Zyxu$mgfA^K&wWN9YTs6AXp4PsFM_2xqgQ8n0~jDw&3iwb6&Fl&eJpFrl1giO(r zVaLxxhwr0FZU>;d^$R?hSJId7rvuC{Irc+Yz8G+y!S~h7U1?}o)`&jLJ1OS-YcI2E zzq$N|+TCQ7@_jaK0RCYg7`3}#+mPCZ)#ry-v4BdR}zGpSnVGtkw3hN zSTg74#y0BfnS0a8;w}7NL?e`%+GD-*a3o$w?3t6Zlk9Hu)0R?u;}L!`M@Nx)I$bYa zhsP!($s)V^$Gjb#Nybvvu>?n+24aQsemHb4M|m@97@By?Ip+0jn@>+hd_GLc?q--q z!wpsF&wQVwvzSjv&G5z68d5ptG~Q3#hvzYgxZU_2cFYIb%6-Kh<^^pysVGr4hjEE# z{EYF3vFe|qI51ym7xx|N)GW*4?@;fbh(hi|B>XhOmN0{p`9x#5FDYSO(am>naO6Q0 zzLDLvF}{eLle%Kgd?RD-TMpEii)22;d*&YneP<3*P60F>OR@sZ{|b| zn)ekq^b;_Z8uSXj5(oKfN+z}GWp2#2`eQ9i&sXyM&Y6H3_OYJ|@VrP*K9b!9Q{$e( zoN|Yl`>><-eN1!^K3}+ny}L~0JlS11HS%`{8%o-sVt7(JZ_4~D=QU5U>a&ID?4*W) z8v6rXwWUBk2Q|#i%I+A8)9LS!e$7_qbHDeO`C6ehYSP^@6K%NvGqg`Z+4kQ^RpzY3 z{ooVkbA5l>fZxN?pupTNox#VjrmKoPrzfBV_lu_V2>fySg3FchFk$Z3lTT++l&&Sk zliO3b$lNbuYnka^f!t>iaAs~;^O8b*En_buJqcf#E0$DmDlhilLkjc8CNOtQx8M#A z?`k2s|EPsBp^ujy2aW%(ps!T`9@G1 zmvHYmdjF>?@$_;G=U#aAJ3I0F{1LyI)AoUTWBr5Y@c2?w$)Oh{`i{3K&>Ql0`U{-- z7!J(}e|ThE#?x2LrPVi2@gloB*3d$tHr~aIz1-_I1Y_NeQvBI&Dt!ial!MHZd(T|C zxOGMFXU<$DJu4AK^sX>}ZW`HLXncFo*5N+GKbzs&|I^^{jBoiFlNPs@AC*3`mw9$3 zhg77c>Twis-(FaH5OtajSiH(f*7N@Q%sjk}qw29VAq9cl*MFRvfyG>bxZ zrjXq|Z)A4xs@s@R9)wo8A^82c4AVDJS02IaV0wvS?e)b!ni&&hcOL5_G4;zUcrrI| zG1=W>dXbuc*OWz%vk;N954I@>aD99&VmjN41KFJ`y-Z)3N0_$#D2|eoWO$@wyS;)u zr}t?pIZ88np_29MaA!m^ev-4i{E>;f3F`8Y-l_OVAJL?jYWha@MeLy#ne6VmV=iWX z)fS)E%ta>0@g8GAZ^sjCX&r_!%vUt>y@sM+hH``KZazI+{Tqzs`HGuJWG-V3Jzirz zKfvvk<}&j+-(O^R9gOtk@5PIVXRhPzHBr8&@~PS7 z2~dpw22JKg4k5oPWPaq-SxTa=c?5^aw_@WDVP`)@8Nj^BPUK%b=wZvd^8+JwlCXm8 zZf?ja^vfr^``b=3R!~DelRMVF)i7HZ4d4}XuaY@NUK9k+GBD?GG z%}`cvyM|)sp!Q=P>RY=fm^i^gtfn#pmh3L#qqfvo=VBT>T>CWPOqjpLG$U|xqw%xB%q@7yiswQg;tAluuf;}yS?UG^PCtsl>L=eBaM z&PPn>8GSXB`;7PlI8S!>ydn#~>onyqJ*FvEKGHYUS_-OOp?Xy$^MCzOoOB68ob_cU z^JT5b?o8Iv&vds8SD8OM^+^zxZNAMnP7Aq5c2_cna~1Py4MVQtDD!Jy(+hj3&ohke zXUTVjw~U+NE-??9N&3VKm`ZjxC^;5}`R{Srl-}BNJ`%^A+|?V@q;XsZ{xCnc+rboU zm{QNY9r}-XRt%z7_q##^&XpX589luBwT{DpdA!4!%NxkE<~nnFmpA{6zs&93Np=^# zKO3K#v*VuL;GJK%d!<)c{lZI}+ZusO^br5pR)qbXmeZH>%cCIzDeMC25yY_}m{?Ys0Z{;R||CtYkac-70#V8<|tQDE=IR z+UXom}P_UBP_g0TWf_LCJAyI+*?OClzYRf1%Ah0AnBMeP$yzd|`Yc{nQH%UG9xpt4*`4t=6VX0(3v(la5U3uCktz34!wi6%WOwz(o??HI`l6}=Y+zQvg2$0CDyl@6 zMr(;AyX(zNfmwSrWusRX29n*Gm?WUnimzC?ml*_PcaF>?a4=PtE7y+T1ZO6-+;r>= zQ;?U;Fqp-eN|Bic!@Yh{&y>XM4f^@>P9nTn6G>tQ!fLX+?H_Gqz?`?pwTeL^vl4V5`^P+XW1Wj=L3ZcFoc7J%-yroxE76!l%_(!-53#HLPrzxE z&fSMKLlWU}`a6~lbC5#LvL4KmSXHMeiX;C&W1`OSFgBi5lwwmC>C0I-k2&&>e|*RE z@I;hN+6S8{r*Yhg`S8r0Z=6ZLiJpyY@TkIL!)SaVyIXKDA3c}o%EkX>Uy$7;X|$AA z?aT3;x%Nw51R}g!2?G8zmVnM)VnTLzWUY||pX2^!BzGOj`_cRUBc#7P!oCg@g$a#-8BhJ!tNhG5o+itWB3N? zzzmSx4;1*hr{g8xC=cR~pz9+gNo8h;CEqadS@v?~%vZQfO27uPyPIn42k5IQ{n!_< zitMhO{Q+l`U!!aDD9n89#|+^Dyx68EE!j74?ovn5W`;?+*#op-rimSU399853TK!| zBiY?>vb)%&^v7Q=hC_Y;tTu&V;rA!#f5$>zk=?yw@4@fw+U&N;!R1U}Z21v`K9k?! zQkbpe@$IK()=s{&KjFWTnQ-LWF#ka^YQ27=lvypW_;z$-cFVsxjp)B64Tj8g2{k*0 z!}ZMWW>3R=X1*L@Z-e9NFK}{Z7R*ETmYg|@l{2)M`@kF=Z6CS7-UqJ-75HQl$vu@n zeV&C_5vngE*%M(*cDK}w-I^L@h*}Z?12_89dfY+RnJr{^jh8H7W{uTs1DWyg3LKba z^MaW+*~ZUtILDHmp`3Ny-DSdpW@M}9afdw?dOC5ijrf2G^X){rh;ueGbGquN$%;!S z@RL0lk*!nVwEhq5*pD%d=b$q)cmCP_!{Sy4F@@~TGT}I4%~YjbQ&%}ccK6x5wXF2{ zgz3uhu(0xl$%1pJ+{Aqb`#Idn?wsaW368$N8}@hHx$TbvpUbF9Y%a5z5j279?zyEY z^Yrdw!kJ)nuL?%xz*2mfW-7axF*J=?L&L^5msOvOF#ec7-VTTWH(tm}W#;@F?&p|Q zocK)(5(ddVVd`c1yw#$=^ndPPFuG@%SR zlguQE?9P+yZomM2dEBfJ=iB*%N*27D`wA0%t;LhsQac>l%l3|1a-#cL^dP(YKK=l9 z=+t74mc7j7j;suA<;G2A$vH#aJK5ci=V{oas37l|Y1NB6wyw;$8W3NHZ<~_g#_X#} z7c;SA0`mcSv=cS%==!a&m7`Z{5O6()*|@%_`1c9+U*t$#txQ=9$-Cnkj9 z7qhpH>tDmSvxZ`SqJ#ME_7YDk6PfhoCgR2fVfx%q%v|&U{-ew#k?ijE2M=lEqbH}@ zQ=gIJhccfi^oo3qfQ7BZrH7APi*=K|=9&^4b_Qv?s3rDKz>dM+P{b^;y=wH6M7YS% zqe^1X=?I>3_qn|{_qsNUGW3MAq;oeq`J{oJC{4nNR9v&6xi0)ty$#bQ@ETN^`?nUj{Hfk6)2I9E-4UEe-l6z!# ztC*=a$lO@of90&pthMo8`!Q-)IfnFaDZ9w-7BQ1e(_U8`C+FkO5I#3&sd#x7_)jQ z$q8o2DRHNLxIKL-X5XRoHvtouQLo!@8t)=B#9^MBIB_Sg!%Vt)Q{Ldl$tb)ayW8q} z5ozVR(nxl9@nc7MeAHav{17pjp{O-ywu#bBxE2}9lsR6~mF(`OAsOYLYbY@dL`1VN zTuXY4Pt496HOyPe8rgfGuOpL!a`E6jS$O{#tY~Y<kAB!E=v_^hs|kp=3xW<61L|_A|<&;_->>&T|5_v$wTm{uaK+ z$)Jw^vXUdp6|m1?KKvGcr1fBKPQAV?N@kuI+1;cZQ|YdL4>1lQxX*0K>rL+B_T?62 z``mppdvb}QfrPiD{;R{Fto^vb!(2#^U|61d1(!us16dR^AWkpD>qc%;xnZ zyVESCW?;)jbRoObI>#KL1#htEQY&f0Y~M4p+@x7BJ3zjlh8o#jfN~<#9KTb$;~*1x z&V`L}kp)na9mfvi6VE}LyN9qbSW%p6on;iy$$$u+@mqhO*~LUmB)j|l`xLsUY0z8i zCN^YuHN9ThIdPzAUE7Xj23hENOrd+on1(kIrwvxzva8k%)VcP4Qp+sjB`Prw3C>T{0?Ph z0%UiucO}F7XdTL#!M%ucMSQ)JsQhTe*ZXO(C%dc7qfWkzU1Z(c$_Hk7Z&0(BOvPFp zA8`OL$nIJUI*U=FC1pPSChoe*IGrVPX`7N@$6T{ub zG`N|ZvA6(Tvb)ThSib!}Ab5zK802zy$t-k5PinJTW#I28>UGOf(C>3SGMJtImh+%B zv(*1({Db|fgP2Bk7u)SPihiibjyLpuJ>#yYV{6$}M0V#G&)i(HJH>POHm<1*KSfVm zrMK8J%RSZYC0h56z={liwE9zoyHlIXXR^D4ULEDxL{sV8wG=lif>CrX7=48r~9{&jtl=_u0<*`$?h~t zPQcYjP3(8L%BVsgYCr8{?p}6)G>OA)vb)i_=V7{>o(yL0=N+fFni>3CHJ@YHgmCI3 z1K?G81tAL!@HAl^BaY?l3}R54|bC?@!V5gPFb{*Un%rf2iZ#O z(091^DF(mD?o7Aj;KV*{sUo{uNp=@_-9iRdKf&cSVMx0afE9O((a4U4V`O*lLYP-D z*+c@|Z^4jV3~$-Vu)*O06im%!VG-Xb>}=?_K~G|r6p#=3VQPygIP1MePp?+8t&5Mm z$#awY-85zU-!o7nyE`x=0fB{IvEl+}UA}vxFSYu>wBm|M&J^#f%(veN1)fr|wlHK{7wUMc&Z;|~hnr~x2l=jO*V1ka+ zlieAS-938TQo0pC!j|~`XgZd&?q%k~Z#I%~zqpSlyX!ZBIyKJ{ggyzx3wCMTlX7g+ zp(lgv?&E&?I_Bz%drUt5o6CL?^Ju7cuR@6twQ*#38tm%0Wvd}gMx90=*`0cPA}%QZ zz{YJ362*N*ijj+K%TN>}>%%DG8UJqLVJvA>lwrN7QRcqm*gyvve(*b9^+`ml`P5lQ zoyP8E^j7zA6J4^qzK>hU?tl?=5e(s=qL+LPVIzes|2uU|;-MV%V=Oeu4mWnX}T zj6R)?YVM(4lpcYVwX(GR#5_>4yT~2(GJV!poYqaiEwZ~C-LlX*P*YOa3A2mr?#6Fx zNy@6k*s@6UeCLP#wFNkTMNcx>DPur(_r2Ck_D0-CiBc%G!~|hU{w)mHV@GCXUCu#Uj;Z8eAlcmkpIC$(eFuvhwvtSC z_f?g=qna89^Gv)~-p6?@1($dK#%Ffv#Be{DnB^pjj~aMxron>jE@S91Ea|Qy_3YyL z{+^sayR|fT_=22@cxtR;4D1q`0jJ3NhgtlNdS{y<;G z?DdpIWOpCycn@u~*|kPnq|9$iZ^+aaFl9&67qYwEr!FJob#o~vyF1c?Ij(cK7}`KU>e{GJJIrPQ>v4yE_7tieDn^vXz{#^_D|%?OD~@l$-ZF0BfCqRo`IR$)nwEiSLvVaBPoV1I+HLrfhzg1v|344hIk5)Z|+HWoKCb z-+b3Pw3Ygu%F=A>QACiFSbL>ogQJ2pcXE~y6%9IDI2~4E{UqiY$bDyzkAND*>z~{Jcn5FK(d{g@Qx46jc8A`HV{RooD zw@eccVeuU2%}xB|p%*SrV?3-7&-roeP>s|9|adA=zE|bQ>Afvl;`} zMB^pdUB};fSg)ZgA(Oe!uw+l-jFvL*PdO?E?Z-2A+STs3fl&&^;&1B34h;Hq>x^XV zO6E?m3vbrQ{m^gv2zTeVgZk1 zZXz@GoI($>J1yfRBt8FudUo_}=D)iIJNtaQQ;QsS2!;IH)SDl{_=QUHkR5;DGd<~a>1fbX{46%++(7P-`JG*)3^7c4zd~K+XkRMfS`9EY%9f^a0QCot>0mH|4tN?sBTEnM94dfa2}G z@J@}zk4x|2ZEhzW&hhtf!&SDeQ@V>}ctke2LQ^SWC#MV9-LgC@>3a4h zfBz9kIKbY|UGx%nX)Zn|J!L-G-MfvZ@-VLyHthT?VE3oPzPq@pVk)i7J4*Z)PnrI- zxvWdMjAzgMG5s4edY8UHoTrt%xXXF0qP-a8aEBao4(e{c_+1!>!T&yD>t}W{^NcQO z>nbD1tFqtpIFiWj*8WSyuY-RvSj|ax@{A8S-$vS>`-8M4snj@AhaGVO@pn`uavK6(McW7{ng+aP6 zQuT8%rjNF)N_Cg??d&P6u@KeyPnio6hGQoJ5H^nbpHYVLlkDzj8d?7q`tDBLLN|8o zzHJhU?`iiD`P)qTvxBz-*<_VEZ%@udgyYK%tdXlidwvm+z`wniArX zh3Cop(9Sdgt=D|T{eAY*o$OAHUBIp;%HnbL2og9mZOBi@<_P}27CFmk&QzJ=NRQSp z1l>U-a~$;%;hZE-2iqd zn;+$DsdW*PSNq}X(iwy5i5y>Dy}5XQ;OnFV}J`ZwSTSmx0jgRf3Vz znWxspOIEPE`sHdPvB|oQp(6renM!t9{s@C!wUnkl^qaVNh(e@}?C+F^c4T)=s-khv z=`9-Am3?@-kN9P@lg9&?!~gyy?_KWWgOjl4_fJ%@YkM}|AS&$M_AgVAla=ZCz&FaQ z#3S&2q9oVY)vd@kjGVET>zV9!o{)elvb!veEX4QMWakQdt2_J1kaqljG*)7VUKCuO z_`z#)0ke+uq>}6oWOq~a%|+eh0dm`i^6m}7tRAeSKWC4{hSK5^^WoV= zp0b&HxVk5(VMlqt)!w2(cGrBHwiqnSfqkYgX8ekQ?X-9Bjb%`IA ziRz}z_`p~54k|GpP{M9{k0R!hY<87?&D10}{{$|P-Mw{4h2`cym>1ec zX7U{T}FAw-p!aX@xQDdv(^&a;S**k#v_I7&UEoP+}_eu zhLyFK$4|W_e3q4LNPmIeb`dyz#~-@5jGW}=Vp-)WrOq9xe>4^Ay1Otx&FqZoV5}Hg ziu4(#qU+O94i$UK`mxQ$@k0^T9i``dPz18OzeK+@E7?hQ=W(LFU>Mom_iU{FAG>QG z5BCS3a1^a&7TI0E%eM0PgNpcj9>*!NyQ(b*@%qv~6b^8bGTvV`%lS7xtH+q@Dfmuy zXXBcIsxE3$b(o%v9QJ$lvy;X@@A39!EKV%t8Si`n1yRi;bu>E!$?g(2SjymS&k)ur z9JyndPi21LfaIg!29{q&j3 z!fr!Xf7Eo1#LL{|w?rLAAVU4naSSwJ=#U0zXEJv9bU57IplCg^H&ZaOEYbUA8{GMd&u|9Hl znXTL{tiiG37(5}nYhIfJ(@f^9l(|b6vb%|a7Ggg230%g9p{|OZrbgFLaL!O}lihvW z#jJ4)6M6aJCf1J*qGmP}s>>e0XN~4rFJL}?GN~`XVpuKfJW^W3Fh1LyN7Z}M3Z7+G~;w3vxjb+W}8&EkNh~kdC z2lkcY*8p}Q>3Pe6)gIztrz?%)^U<5^Zb#Q>oV{6v3xjNA^a8SMEjO9_uL-@jr*Lx! z{lnK1Q5W(9iE$2+#hrB7dwOQ}D9Y{2htR+?zA^hSru0#g@`*0IH#*5MHwW=C{*ITw z6Oc!C_dxLsob=? zT^)gcE?mj(@&?9Ws8S7XIoOJQFuCOFcH;0tUEU0)K7;Hos%bLv%YLEPR7bf$mTb>~8$gEX>Mc2SS~jj3&Da zerzq;&XtJIWFL+?bI@mBqL)ce{5r7HG_9k|KW!#i&F>?rIs{)Xv(r@VHY|%xWJ$CdA4gnbCKR|R8q)4SMrwS}aU-Q{U8->R{>^f0}Gnt%S- zUdFz=56}6IvJ&mf?0Hgjmv1lG?W}elbII-+H&Umz{3F!L>?EG-ZhsGQsspMr%<%;N zJ!J;c;Z)R*|4Yr<|FgRXrOZ?7^%q;?Qt^lE&a2}IlpR(T-+uq=4q;|=348MH4G%B8MB?VJIc@87Lr|e z2R#=DV?o;xY%;zFiy~7gCA%{tyZcnDF9jY&=()ikhYBNb-Jk;38>}RP?2e&qqIy+} zZ{%#8{mhvsIUci5e1>jHYw1mP*E6}T^xLc=x=F|Ar`wBls?2$cCu0qAk|2I=uZ-Ks zQN?;Z?4N=)WOwVXX5iL!HHmxUDi_J_-0N(`&GtPGkB)_Zh%cr+ynwgL+R{jN*PHB4 z)y7f|o_vayZ^B^eAAnSoVl*){l)q$mZe({$@=QeO+bw3H20`mHv*PUU<7zH*gbq<( z*xpkNU+Kw#QJ1)b^M}cWNEmjjgn1>qZXA51**9j69nloq*eqC)-F3Nh0HZ$EV$2eI zdClL;%LW(Go2)ERt&ZXu*I%6vjEq*Sufor;rRSn|+u(`6TLU z)FpIdJ9!w$ZcHm%W@Nm@`tC8fO?I~{J{NJO%*0jpkcC5-m*Zg}>LHKuSS<`oEdy~# zlNl2&jih~92T7jE&V?Lf$^Tk{*T&TSpAE(3&JQvFw7EPYyPJ2B`iwieqO|cMHV6CR z&iN?hEqQ})g{@>6+1-ViZekXwA#ZC>BX!9>WT+v0t5~E6>S};XLCv{y;@;BF>QAmHs^i*CraWNY71P%wtDzFY45? zs}Ry58XhD3uy;W|f?TPa+u$LabeN0Wy``L8S&nDz_M_okAcB-iFv-?fdb@eaPfh9= z&Kikj-F5h_354<9{k+E>p+1c{n{K@O`g_Qr939ap4_Qri_jQoD)J%E+hv}gxnizz(3b)|qU?QFNd5I(0-PIF@GFX}Ht|EZ>4q-U3 z;t3{2G8cKVw>aFPUiYB3ym^s>@nm<#_t=H_s0QEH*@||&k7S{p92~ALS59T3UXOFb z-efps)WI&#QPy#;nDd*v&)P=px|fEoWOx1rM{)jvvb^unR<4oVnW%D(|Mvw6!w%4& z%skMc)E;%yl0xV9QeeP7p&izeIrbI8y&|!~!yifJg*Z8aeRoqmWh~iUlhI~U-oFgl zah#XbgE2<=4n{h*5WN>(;zM@#B-KDx8PE?fJOC^ZLtXeYY};umu7{X)I*eU@fz2e% z>;l#=Vn@e&?kM#>P!DG(eR7!Crq_z zEz8P%q?@{{Wa_Jm;{M}EBfGP3;jG)?AMUw3@or^@uwNVLHTMrp%~MfDc6agC3D}va ziT6%du`Oh_;|x35z4HUQs>R{INjwKH($})GnFM+9-Xy!5cg9i*)t)17TsUTQ2tZ)f z70g&{Ai*B|zE!bvVNwe@=Y9uqPQlo|Dg;|wl)=%48iozbm{{d0`o7FX?N^9SgE+@q z(UVbKfs#?yGK%c(RNwZJH9<>erJscr+1+Z-19-XT3mR%$O9)I>!)4 zcGuB24R6*p;_O=|Im7o??QKWVYyBHrOPKFUcK2>-Cd$0jCC9RzO}gZ+N=C1trwGqSrc#TF7)`2^iohasgf01wKFadRy*Gb205NV2<GAj6D#jbwMR>YlP-9lMYg7U0! zILe92pX}O6LIv5~iTaaB9oYq*Z$tw!>P?5@DuOSX-pPR*?Z8y*LuzZ%cM9_4V-YbirD z==CJKE10b-s*(9fpW}z$Eu-LVvV7T>^-h1`z;RR3eWh(Qx0RUqLOUr?INSO?-((_ft_yOaiLctwvyfbi8+n6 zD>P(rUpKinfqUcfR$^PpJhgjKn77^!zAZ0eLzb?bPWKRJvb+9O=2Cm;AzI!DMd8mt zcodeP({5wgGJ<|Wvb)$3MzY5HI=X{8<;VLmead6(QMMo}_LfcN9@2J%j{J8m7g0+5 z+#F+2u=Xvwez%c8Kb{q_?Ic&FiL^U;60e>1;eJIDocjGjeQ!rOZq9u8+0GK=$NcI{ zb~|$qweVcbdbmQ-}&RXuB ztVBX-B(&et%k#Ye;l+Bg%FI(DGCE4nFJ|&C^gdQ7h9W&S2wyJUg4RA0c|&&Bne5Je zfT2|NE`}TqK%2ANS#N%dlV2_53E5qCg1g-9tSu3WWYj6V_dCa8NyaBY-?QbhL z$?gW7Z7t5WUvTP0JbQ2U;rjQph(F3)^QnBZlHGMPu$Ho&6`21x0#C{A5=sk^RIV=u z`#hy)R!8ahi+bH{_n_M~1lRfp!>;%aZ1%Mfrv@+SOLjMF04U%?U!W;n!>ktIJz z>_?k zZu;)X?yNSjPtfi&+AZU(OLjNrI=h`yRhVIM3>(Pq_B=d@XZj6zyT(Zt^ZuIJtBphr zVn=sO3Y6>EJvJ)?6LzY}^t-NNM0V$DU?#qh~88`6V4Fi3lD;DJwF8PUePnOmEP(|vgWRy zQe~qr+WRkIilslIH$|fE*DDM@YAw30$Y>1sw|&u+fU+!{zOWDd(hnehS}mpu{l~w3 z6B$jTbb+7W}UuY94}E*EWo(sM<2CuDa&M_7o3;S*|r*@OOvKBnMn{9kq~ zm)kwbooly`W$Iwuh|M*OUF{>B%6w8@?e5?d{*7 z!M~OKBfIn2<0kL#X~;y!Gw4Bfm;N*Xj}yP)QWFPeS@VqF>>`V%DajX=BZwv6icCI) zwSyET?TND(kbgy9c94qVANZx21nU9&U=+z&x3G!yZD=P>WOrkx*~sRu)!48q8ezTs z@ZaA&OxDtsX;V0-lHDDe)>3kRm1F6^{rD6bh#7lsprw+rv^8Un5ZT?@pGH!>^ag(U zF-BrClp;(IdOlYt)ha>OB1I>W2~Aq7gohzPknN*dx2U{HvX;%xWTm zJ5S+bU-kvH;9mU|Gc#{FNC^Mkzua?{;I7nFhaSQW{%s+KM=)Zs65mfQ@{sJVqLJCb zd%jUulYo0}bXJ;~P9#5(Vu{vcuaKal%4R&X&<{^0cFP zEi#t}&)My~F%;PzLCk@;iGFG((rP7jYS}#FKO4&7_1D-PPhF);7>2n#LBeDU`A&8> z=cT*ctkISXvs|c>-9-n*pfaEa7kb;up(rxf>GWhos7v=6b|I17{hE}FEw**24|J3W ze&?p!a*nr9kQB#sG|kzIC4oorDo9!Sw{0syWOu_m*o$_*T3n1ifCRF;yy`4G_^T^$VGcY#Wa51!vI0LsuHf?kvgf_AfD!l#AD4 zRT_h152CPgOBK$mw2>yVyE#Msq)UBkMj)KWxk(ZD(=q`T{T?8yw}oVp-K9?OmP<97 z5%pdZ&x-61J>`gbdz`ZDe=13Ue`TD)V&vI7tl8itF>7 zrRDeEXgW`w8rhvqt37C5LY-Q*o8T!kmZsTDp2jCb9p*E72o3B zdpok?ASrcqmodYYWySeDSVDGJ^LGbc7yhD8%vm<_jPH5GNpSEd2G7}nOJsK`DaDAd zQI>*v?((pbI_*R|dgESWO0RT;4`j~XVCKVnYD$8=wXCZ3GUIs8_P7);o|C=3$w2YH@3G{Ry_CNWlAB>}65_8U4jqfI zob2w==zKhQ`v=pHJIf5-&6j`k%|HJ;Dok_HlkD!S^FA#4*g{G*JY>;s@~yMB(l+rW z3_a4YkL<2Y{}|9iO9tdIAD-;aqS0LXOuY}6B4$kNr9NZy1t<;Em+W<7Y+r*0&I4oOS=WQKweMtT;1Q>hG!jU+E}+-+V@^m@I^n-QAmX z0MYJhvc}g_7L(o0(_+7SwVI;-Ni@D60~JrzzO<) z!tk&m1-soFvFfCayxhpY*J2N8=s~SWV+lLwLNGs;d3L{>*wNTlmhlZz8|@;oH44)1 z@lJf_8|7QhE_gj?CT}WS<%uGH_Wcg>sOTfU4d0G_iPX8ZJcNke>f(LQQy#WsmXDjY z82);SO6_EPA-kKm;WU!Bw33}Ina6TEK=$d--`@Hr#(TzM{uXv2b-jwN(~QI=!C(Hw z`pMtf`f}*R1-KoLfaCgj-1&JA+iJ~aFWH^jK5ubZrzOAV9K*J~q3HfS6^c_{GH2aZ z-1DeYGh(-Zvx?m9yAQc!cU|t~;JSRrjN5JH3*U~X>YPP?{9i0xnU7v%ch@Y7kou>& z?3v{z+sW=8H`>eax$mLtmgrZ3|9Ss?rb?4a06tcVaYuwo*NWaPPVuX|3 z1vu}3&c>f8j&qW!JO_)NoyE}XH;$O)VK3R;(j9vcVxcT!wD|9l-5DC&OOe-GO#Pb< zC#z7HEjWTB8(K=h4R%rjrE9eQ$;?5-Qx-Aq$7);a1F*yp|_gs?)`c8Tn*Ke`;F{|+7%9`eu-GqwGQ`paz{dT*K-g1qx-d# zmLCqINj((xJ{f3p_bm+DlP8khEq=r2A#H=gY7gIOEt{kjUj{zhR)9)0l>cu$WvkzBI7 z1Ea`gyXndhm9sD;yG!brgr)PJU}=Pv+#$QmvGoHkZwNcXMtC#@MI%n6IoLBNSc4ojbO?gKcGc!Z$2gk%Ka_yI1A=Fy}uN zS=7ZtV#w}>&$pG&Ctu)FMJm=@Vz=4nW6W99l07%P<@zjU2Zx)>w9)sldw4v)H$-4b zn@U8MF`rgM^?(r229})$dS+OWsd=rhsO(mY}ZpbSind8@rpYLg$JQ{}n zNJf3;Go;V6mNT7#>)<9R`(|T2*i1~8n=q1t%9MXVi zhf;8l>@M%*31nQ*mi_&Fe{7MMx zNbTzY*-*eNC)-=7-V+NKizv)dyMn{#4JB^6zkI1;#)Pf`d#TA`jUo}P!>pacyYLco z?q9gqCA(v2i&%sl$JUpjNa~x4wk=*Ti^5hWMf1(S#Dh5&D$=HR2^`4oYPE7ueETcn z$G4SAvV`sJTqI_af+P*!i2-DH>%4d4(~RcQwS}8#kwpxi>L90TKVavXOf-?*buT`I zA&1qOC+R6F_WarISV;%Rr)Vfnf~qR}&S#uK;M-Qx*WXw6Tv6DA|8<41@%0KpTBkGIoaKu(d<+>Y%C@Uwa~DqK4WP#o@iI2OC>u^bpu3=?C$(; z`Zo+Jpg?wKc9VT~?;axC$WmsJ-Az}ZU-D5)S=9U}9tQD@Uz?8JOW$D84R$h<-L32B zE`QUNWxIVbuH6sDx!s&~$Nqx$JtxWM86R8iB(5EpnVGr+P7j0eBB&Ubb}LIpPv*mu z-4$K56NQm)U^hP<-^lJd8Xv{dH!a0d#anirpC@ z{G>J6UAtr4%Y3;8jXBXc=pF+-lUiIaH~7FXFEJ|DkRv@0Bj{ZSTI`^=djC5t+hH$r$nG4o-DJROC7GRBgtKIKk5uw; zEBg=XqM3dF|If|9Ny>hIM^yJ*9Qeii?c!dXz1%_y>fI%T?CwO9Ep@%GaAt5C9AZPU z@AENwt2;kM{xd5XO?GGUk$K<+>f)b# z2qn8hFrX$A^5FwY=QAJv7yoR(+3h^KxhPrg#wD`5W;1tU(Lx0|(Ah=)PGRPqtUeu47X z%beL#cQHme9!o8m0k3rtCoBx)7TKK|+1-?*%pZGq8Rre6F!)F;X0*Qr!$YRh|9yaL zKj|Yquj4*`$%t7*EpoZF93{IuHHn(5a5YhRcK}_;?rK!B@Yv%s9D*F# zE80#v4soT%yO}K7S%?PCy0$k9m?@_yn?Ertj_2g#I7jKe{0lqcv!FRJ1iSz4hn2b- z&tOkUCA+KdM4j3Jb}#v);N>7{80MbDd`}&DPIjlE9Vj*(s5x738}r=bpjQ?Nr52ZA zXKyIa$?k@z(_ekifO(=9v1Vx`LPMApH}VcL3(UmcD^P-a_{fZ-+R~7J0tF6X$T^pS z|MDBi*la{~CACCZ0iO`EemfqL-R;=HosWaMSY~@l1lgV8WNT@=@fo~4 zld}siu>2A_}tb=YC`;F-7Y`*QKT;~ z|6ahXx(KLEipP*;_wYN}T-am%Y3i|D@Gb`@F3(Q_+E8WTNivIPG zlKv|4ckezNBD@(t+l`Xj%n|D1COgRP z4m5X=y6^81(=QVj$nF#dmBO==hU|CslDTAeE8VTxYzm z+}yy~VX%py+jUG$i$RlWG#<3L23=?NOOV}#bm#Xnx3!qII**6}5vc0IK8lnFFxzS& zQ3ZkWuD`c*52qH{i2QvvwN`J^Fhc(|77w(O6~~$Vo8vC}o-Jgs*PdPs{)(@}`Tai+)L85SYta1>^>@dj zF-@D9LT!y@kO#fhkA0bsq9+R-%kku5I3BtuLi_Y1yq-s&$0PbZu6jwAMJ;9is3Qm@ zyPKtzf#&_+;*zbsWZY(MR||JJs-rAJqW55JEw#w*dAJ+&8@j&EGKgoqMW~Yuoct4& zZFa!qbugAx7sJGuT9qyC^63oy%Mn9JIy?b`{3Fotq5}T&TT3TjKk+BK%O2H6JUy;qx>Gc&m&Txt*>yayWp?l?>UGKP z{DXDHum)e6dW=!StfYYK?p!x7xiCUQ=If(50pNl}??F&d< zsV~iv{p5HwGsi|5NtDY~RIZLfR=Zf7biawQW~MT+oWAiE+(RvEB`z~g!+`AWj%hL~ zjh6&Qkbze|ubTvZ%OJ)|Ys-MTTBKl4k9t(Mv5}Qi>6LNwl$Vxj z(y!kE+!+;uQQ28Y)c69I&5lyReMRrqt`fOhk)0ETxWF_1)A&M6XwIyiZmuHScl_w# zD9bWFBfb;whM6I_n{fa$mZ-_h9-gvs6yK(|tYyHX=NMMaJ<6&ulv$p_sKYwU&1DD3 z7v4e7Or>YuEgY(h#e=VrD6F^)gC6D0SWd&vH$E#!~EKFlV&YjDj)e&ly-QlZX?`$5+O&SG5s2WNZc zVrM@>}`lP+X;Nz<4W zaaLIZw-jSI+1^o=6{?xm;8d&+Z{MVc2{_B51gke%Y9#Wxm3%2 zd%c}Jdin-xrRfMEyR#@hf|Ls_Wp0C)Y^kD#-PuyIhCjly#snODO8$1V93vX_q{|m- zFN^|Y-FN0YFQ~@8&C!_CECxO|YhW2{EO*K778mj7_tcXU^UmR8ayYJqB;rL?9gbdL z*7C-?|P-1c;+aHO<&2 zb~mb!`pqdHaHc(fPre7q6+1Uk-Oya_uiTByL>meTFenp^Hd?nic)|azncNz2iMd7=l%$jipon4}sg%FD_KW^g23RAg4c6a%z zkEH5wjxRlh;k)?lT$2nX=jYfQX)WFQl2vbIcU!fZEVn;M{eB24A8tp*j!!UGca#jW zyS{5&W%0OXGO5`vB=F5|kXL{O{S@WqeHT$A|LSy(``>e4P^p@YrM4zTYBlpa?V;gTKx}R^OS+6+!jf=#M&`WsU z+)%2>?iLyNbIvu8C37yJJ~R^DhQ(o~-W{0FHIpo|yAC6mG4Wel5}%zw53)O(O(~c? zrUCi0ZNzK|=d~se_CBl1kq!HiNOm{fBpd7NzaXc|QS$g_KV8G_Z751@Q~_%EvzZ(1 zLht#_#D1Hr94EWm^v6MZZ~BCs%i9sKo;eJ15W!E>WNW0S-0a1-=>}_=qy8Li<|d<> z?5?!t6v8cA$@&{Ua_L@xgv>G(&wDq~WmPP0`9{HV+ZAZ48c7-1ofFyJ&TIM-(4i7G zN|C5=i-(dYyO36xi`uk6@qFVgjSX57YjhkU5egWU^w(>lgzJmeG&sd@& z33?@%N_OWnE(e{aeZ#c8wsPBo=b(*?#F;9{rN8+IJsylNQ+MNDOmlW-yGfTO=G>XH z+j$c+CXzCtdol!lj7!ndTtgUa7&|lx!;R>(-F*gio_zBU^p&k) z0kR~Dd)ezpqj->^UZlE0i9;wP%-TTAAE^XRpZ-OgGGxHs?t0;XBW zzuAFOGuK<5UelCigUX=4JQM>iq~T5ID_B}kuX}`DMu**{c4!OHU$Yl|$?j~5a?x(r z4;-9GuEnz=X|A)FeEAKnOr9ZEgK?*^2tW5LNk)yEtf->~qQAYo?)nZNW@SKCITXLH z9LDv4mQow%#s1wu=~HGYnG5RBbwVQg#)ji&*f|(?(4!W~SFW|^8zq7IjM5q;e~m`Z z+-THYtVZY8Z6v@fK)lHAG|KdtFH(*jFT;`Alz{!q9%1P~OBr>WTI9E!rw_N3iSb7; z)PefK#&q`5yus*?%ylQb(>HOKC_kR@@Ase=*_}(XJluHu3$4tYC52~vi^ERRYu`_d z?YaZ*jm#vQUyQKp%5rSJyJS}ciP>5^>8J4q^LwPjWKbyXZaa#$12x66jkg4l-Nn>c zh>djjSlThP)2ZI6F25jkjtsr_oq0k=?D| z&oX%XW>309G~hFF({jvnSoX^MV&c6)0m%;!CCiVDGojefr$?Fy3O9>{%L!8 zNp^R1xtr|PSCTd}in#X;M%VxHQSJ2?i)z?2&bxX2ueQ=`&Ufa4FyB=z1Wy#%S^K$# zoM}m&+D_i>1-8;u{SqE-Y3N%JiYq;j;q7!S>5L^>S^;Gc`xAZe|{ZRcs!f8d+{ zQ4-cyk;#}^%lH8Pe?Rn;#0~0l+>Hv*GRVm8xkjq>0TIDVdmd2sCFL$?tF2ZcQfed=heE81(v{4$0 zVZmiAe;bKauVb-a;WqBwGL>wyy93!i@+VkFepjEQ9wiLx>XNy)e~zQStfl7!?sfY! z>wL1BOx$t+6%&}Fuq+E>)_umBA&#P^!+dxr&bl><(qnNUj&j!B;8}=C4-~1>a+QU; z?c~sMN6~rt8DD2*p<)uVMFJ0Cqo=pm63^?`%sqM*3$^MHYas)V7Q>(+m}l7zwC?s3pW~fm2G2oX17~r*^9$h}xCI?_nv=A3TDZbs|>R zgk#8xau}EE$)|h1a?+XF6Jui;Sz3*i>Cwo(5sjo_H87mU4v5|>~Ww5vUEoKk5mobl-(f6L2rHxAR zkH;RU@_uTwI}cSMzaf2{sOEuzv{ba7*b-hW9rSi)I%iIq4T zQj2WoC5!Yl#8JHzJuZci`DLPm$p^HycaUaZgCx5Tx#aog^0H_*v^ZB-YwpC2tAFV) za1k5s*g9mkm7Ia!kdc-Hh1Mb1e_HLLz`a402)?T?^YZ-U+-84yZEPfSimu>!hbYd2 z^tvv;iTIJGa{Ok1)Iac%D+60etG1`{^9Z{fMkiz2<7XH>!df&tlXE0{O8>8FqCWN@ zK81!LE_OS5jQoW6@7R^2+Kzn|>_W@UeK^koNGx|2V^)nM=<3M>n zjd^pTBL#y`;$9ze$?+)&)@#7102_HocK1=uQ-0r3m4Qe0qjWHJYFD#xGliPEgO1{C z&`yGX@y)+UQLddTU^ZkhoEymQoSTW$f39+n?5@DbQLa>dg0E2)){)&Ms~m*qadxDx z@|4FzIO`s^mXCv;<7HGbI_6L(H~tj5Jk+6|%txx91<08&QyD+{7G@R2qNimPT)SLB zn|ec;GR|N6lHE<$F_675 z6|1|yzy&>9$s)UZyx2q1%2Y((v;;AB+-J1n{BZv(ES9yEy_|L5_HmKq2?}C2bSL7E z219GyZv5`uT&AhG$=e^)9QAXMc~KwGU za11jh^urMrcm}QC^LrHFE1nqvk{NFzYTa(Ie=!E#FYtG&YZcx$7>QS@zdW7oC*xG8 z&$w_NeoG=yek&dauTo#+&b@A2Ap3cl@7P*P{7;oZjqJ`~QyMJRy+Yh8TNy%jci@4$ z^i69aGq>!;=0Cwu9-WKH7C-QxpOd)otmvQWESBs4V8y3AysQkyg}NfNo1-L&yWGT- z>~8M{_Q?-?hp+<~nDU9aU6a_)_F02nJ6^KxXQ2G}XemR#)v>!H5xb(o(c9=8YX9m= z)(&4;Iw(L^r7|bjuNG%*W3X`7-CiJJ6+Yx_yI67^ac>@sn#X7&U% zcpfn*8D5LPr^YgWc!1<&_{zU;bw&C6S)7~2-s;_n7-sqyuNCRf`4h;FL@ycfL_qqbv)p9KC?zpnP=wtVg3(Dm9~bi2OPKF0%4ByoPEO3H z`i|~hb5W%bg0`L&KTTy{> z(ZWK!chPrO8IMaD5xCNL9tB4FGIWt2wFLeWlVK#cZ0Y|z9)+VHVz7Do4Q#w`A`RIA zQsv<*V;;7WJ;rCy|8p3wWK)~d{V6=_+3V=a9{9(e;&5JFdL|q~r(Ns@Ysy5j&qtgZ z&3t&WyVPIITL@|{>7RDt0a^Is**h_3v4Tt*;UW_`>t@evD|)40aqDL`COXpp6} z|D}?>q6V`1hM#1P^q23chSJjZ3JUe3aO7w#T-V;h`5IH{^nyF<6FxHQvW`6McnZp! zm>ryzjET#h<5z{Xgbt@icaW#-DO3~t*9Y)?ZV1v{vM_ZK_q2A7WWDXA&0tsAYSc`A ztSW>J&%xZ=1GKJ5)^102B=baX9mEwiburBGl#XO~XL?#oN{46I(<>Q0PKV)8H+nKA zwUY2ZJ`z+DAPGH9C2#0W{L?cQ8|O!1=hZ7Hb!NA7D08j0^Q?%_mzCa?nE5CIp_Aib ze)t~V9W@hr7wYAGl{Sa*(fI-eY5* zOzf!&VJ>JX?%UCm;pQb~WOpjltmLTP6Kw031Y^H&*xQijET&F%j<2i@;dwC7M0Rw# zj@r#J2r*;dU3C?zT*!s${AJHS{M!*^e`Qaw>@ACcD(_U^JCcCrAK)zupq#gIV?Wr9*zp;Mo-S(VV)WSd#&VDB zuFVZ!&ggn_a%4IFR)%AZC;RRc>tGm0-`$-+*>uHAMlNV6XC029PwPjq5Uxzf4R?axn2xi z(-xAHPD~BOP47AcIQ%>k1J>XV)89#$>#fO#Fe%(8n7Yv^`*6ISillgvLvz-BlVdB* zySzl(7u4&XWCuvuF=WiwlEsI;1!Q+qc9~1d?e}mgj2_i;@`8I8urozpX5}(RD1~02 z5k{id_9{EyqF_2G7Kz_(K-0oh9+KT{{_Z2IhPM)%)YIsDgZm8gWDFYl4EqODuNz1P zM0ThAkGdQ_cMxvj%*XA%9RsUABJw1?w@U3Kwa``iziuXa5!51+-Ssdpz?|lavS*u% zydb-K(~_RTv?eHx$;J$y5DZkLLbb8%TI_a{i9ga}6;RMQ7#+RYbzUG7cNH zZ(~hsGYL=+l+BxcB({r=Sd^cHAK6`m6+IcD^*HBkBNHbEiLR@sd^1-QAM*n^KZF{F z^;y`e{RJDhI7(+dW(O;{O3Xz?iD*-Zi`-|74ll$IyJqsi*H!Gv?zVS#l-DCaBu-cMBe8%U1VMJ#iU#ELI*_~CRH*IJm%3bMQLnm$tQ zp)Ivvn3<_a-FY2(r6n>m0)xacMa4n zItPh`mbC=zdJ2!KBrN$JhCS}gQ`^%@{*v9L7Y9gWwTWmBWL{P4SbWcj!pBPbJ`0Rw z-a&u4u-Q-Iy6KC3`wLi>6aoF(c<$)#BvY^pgH4#j?E`vUooE)-PsPIax{PHIHFl5g*k63AAEI@;|MmWKacm zYewMPVP@LQd58;-E#w&4-A)~E`KG8Tbu*9RVjr@#(diiL@dm@nm=Av;NVZIMm+PnL zqZ?KX^SWTHAH4(bZ~jE9bMzGQ9NgCEByN?zaN*Ssyd}H4uEwsAZOSs!&Rx>U?oL0m zlP}xfKqo$(c$Rla$q{V2*ivpadC7oEY5+S?H$3 zK;Frz#x6)aZ zczE-BcJgb=E9_XBhTUsIQ9Q2<+E003 zE@OU1OrSg)ZXrF~AK>iYcr0AS+2P@NjJ!>K#%SvL$?lTw8%c~S~G*MxtC~MQI}4C4x#F32xjlj zL~X$bgbpXW<33}LE&FoUHJ5+-?S>)Wjz0``Vurhd?00jKQG8ow&TT6t9lzmW&m0(< zGuOne1U)XWmv@wh>`M$1%OG1>;{F0_CZwYEolwBPn5{+a_<^Sw%ZlM88o7;C6eqebcv@V?NyTz0}o;q+1<;!?dbU86Bb!HiaPm~{c2ZP zIkuVXyIBY?&bt4uE5OVFilX$89U%JLX%{h*N1+KREwXWy?C$iX{m7_S6|?uuKp?vt z`k#&L_)(9vAt}gc$K7kQld#g(5r;YK;WMO8ZH1Xkd4C%*J>oDupLsgxFQK~8K<1kI zORc`YsNOV?A>Nl@o)(G2!|A^^yn}6_X5#4-DCv`Zr0-8{nfv$zy7@5kIVT0WHVtr` zYa?TqF#qYfhy0CEmFq?O(UI&f$t)XoPEGWhI?8J9Gh(V;B;y}N(VS9%@tk!Rekeqj zh0UZ-u&caQX(z@F4)W&YCv3R59qD9uRr3y#E2>N7I!_5ByZg3|TI37QpfDgA{qn-_ z>c%O|v~DHepZLhG+5q{z*;K-B-NdsMu?Xl91+{ZmfH#IRcnLk@rOaWtsxLMFR>HD* zB)WLS)0=mXyE=1OKz29swYM~{*P{RXI0n{)VoymbjE28JaSxvHDM7NjKl3vdsmK+b z5?GSmEtrr4-&x;qD8H>d=XdU#m5ZbrE6AAN`N-z18$M$&aX zJS#@c;jH`NH}<5lGn2c~b+3x>>7bGvs%5`P9d$GP>_xfLJG`Bdfnc(`AJ-1!WKc^v z9`7X%WOql8Sjycwb;uo;2?qLw2|FtuJ#u>ED=CjuS7a<@&}P_EnFdIoMJT-li7$4Kqg%wUqy2nBit0iq^62PSaV z4Omo+4L6nL$^v&;Kz3KO+D>Mvyuqq&>DWbf_dN6{bO&onb7OD0b2L!CU$c;2-5%oE zq6CaKj6m&@3ViI>T4EjjWH#B|>_qA_3a(*Z`)Fv7i^1A^wXl9^Ed6E%$k~m)^0`n~ zir1gT(COj4{}W-B_ZW>qR#K);HaFEv+|qc)-z&vGWOw$une2jikB(>UnbpOf!eyLw z^^|1E)FOcFZqN99ob~&QqFQHpYST{qf3_8!8Q-z)Qw|=J-Q_6l!`P-4a#@o)HL|?n$3xxIR-$>{7hh3(1V!XXcV?b02e}w`(iW4qwrF7c~rIcg1NXn5?fVdviS`oa}C4m5p>xY{cj`spzK~hFb-TY%XuYL*urROm=6q z%0;sFC`bpT0_c(5nH21Tp<;8{caOOm+-KPCa}f2sk0|rnju&Ki1D+nl?EdOvR^dqx z4>Qel*_Gw>j1h^{B0pfp#qrZvv$K^HE0S}c3XnmXrZTDK1`Zjq?`}yHp7xL)w4>dX65X1kU};Cfm-R>*yJ^_k0Bvb*KQ-jXs#OV0Tn!x*wVo!_ZAG5aM( z@8>>)v+iC853zLMoX~?gLMrSjRLaF&mG4lz*H%>c`?Tevv$(fmhD&-r){)(5*%x7< zwUT^T=O%?@cl!1AvTf>n+-a7Hi)43Y(WP*nr6Jkjk58RQL9^v^2G|wapUje zx&#cF_z(^S^n{V!sgvEE?V>4Gi;tpgd?@Z2q@ytJH5P5-K7;ITVz|3ZQE5R9YB8SI z^Nb(D-p^h?nHTCLvv>|RwRWb5?iV~I56x}_qq=Yp26R#ub8B}wO?D@S_VU!}ExhlM z36kAS?RErFTUv_pEicg|yYuL7DOPtMVTNTQ#@B`;cXc`5Rp?3KeP8hG5*#!{ll?w0NLk})nV znPqSo0lMUp0U4-&{1!Jyu$P4F?$28{`BbkY@veLD>)1Mp{$sK` z1+u&6<8`G*;Vc%ohGY16Y8VbZ!SrQTvf7Y)U0c3`T4{*CN-3^iWOnexOgLG5z%mE! zbvf&9=klh)M zO~&x&&+vStwG1P>d!6JdEG@BfHxdza4&KKcVjj2RTf3*R;b`X681NM;^Oi z!FjM?SpjBgD2mPj7a2=-7k%GRN?JC-W^guC+LMn>W$#cH+1(otIYM@~D$YjAHZ-uW zl3hrFVR#vH0uwK5v(wE-8tvJ^xYkT|FSvsPtKty9o_>=)moVX}fta=Mmmkjl;;wHf z-HQ0XWk%w)-T$?0xACsO8S~@$E+)JC_OFht8E_JIWOo)5Q!w7J0S^Lg#CkS!i&Z^E z{kE#CJhUHU$nK_G%fi`=FX&V1DD64xj{eCv{|0t`oG8FZ&hZCd6k?!jGZ{3_RZ_|B zdK)^*lS`k_${-6*$nI9EAB6QuHK|=q-yPZAk3-gC)&DshBa_iQHw>F6oq}y0=lJvF zl4N%Vp{7zc{1#Mp$6}gQ6!N=Y!Gi|wXU6->&a2cIY8lALLzVDr!F|ThIK=L|i=`dS z1UsTk4w1v=^5$^){ydy70I|A>n8YEy#lWOv>5au8Mb z6?2wTi_BT~Xm1zE97kW!pq(h-`zwFKZhYz6TtZcuwL^C2)z?9`h0!}&mx&u>cXkI4 zVbeWzS)}eIGo6?(b%RX9@+odak#QJ=qw=3Kc>kf5)RNupPY;l{vD^iAy1@+b7%Zxc z!kr#fD0*Qe6=Zk5WOq@@`ZBEiJUT3c8@+8bL5A-l`J?=HhrT1e5xz35JM*LQ3#hS~f;zCW|mc~-PeaTb@=e_-$- z4;mMFPChMy_dF#j+08TNIp4+W>AUOu4qHkx@Qm!va_V8EH)*i**h`$r?xuaPq`$Zh zms8kd6djIulXECk)RWP9zVcyUfK((K%LlJoth9;2iRIB)*uR?RQX84A&huZzPt1Sl z$*jBOSo1m@vre+_?(0KbGhtu!W%ew+_maRL%$WFngnoli+{#IZ{)soRIc+CSRn)z- zcNbZqEQi1B!GDjK@3e0RYNq_cyAOQp^Ne5c)JeJpGBYz_2R4x1{hnEj#wcZ}= zD^dANzv2upnN_DD<(&_sJ=xvs$_$M7@eZ4!>471;`@_7I`$AV zN|`hMEfX69KEisegLEOgTmI8k0s`o1`mhU^$nF~F?!>gE3X(X|h2A0N>&|E^*89IA z<9jxW$?k08O5kx#MRM11ULm`4RI(MtoJM>bnTqpdchB?bF@QET&_42gaG<2Qo6E;7 zcQNH<9NfJlk@tXSybW{HZ*reOcITa9D6y7T_?bq*;#e$dH{8O4>!xy)>`sI1F6^R? zl*lRc*cgV@Imz%}^&A^6SW7M0-G_mma(t(nbbWCEo5=3YxMgAJvd?&L?nl^d6Epsk@; zd;80kKg_g_GmtrUmoRQ|Bo>Cp!7}|063?4S6WLuuHy_z?NL#kgIYEtI7)}(XK&!X` z$9CAr_OL6)3lmzsP3lHe9%oi>drj11yQiAxq{`cMxq$TnIqFr?85a$--GP# zVFa|M#-q0E9wJN4#bgF|HCMdl#da+jlY0!E$?m!yO-1>$7Z|(NR>qRu-F9Lx_$(D! zxN9F=$?k?9$RQhMj?nJ5lFGT_%oomsJN`m%M?Mr!1Y=y~Zsgo=F0)3sNgJ}eg@()# z`uHA;du77zS_qD8E5$_z4T*L4l1qC0+@@H`d+jH<)h7vW$nK`wpGEtny7Hau?oV(4 zy>lkgvi)^z+!TZ3=44#gs}SMVMoyF6{c7hYjT^YvZGRqx{UY$8djbwVe1NXG7R(+F z6!(67p9X7++GF~HCx$||CJn}BuQ6hXo#>pV-YLpm-u7<69NoRJCA%Bed@9K}>}5aM-GGm7lIx-@e?s@bo$PM@$UGd~@Ec=ooTU%X_}FbuvexV;PQS~A_lsc8 z+Qm>cZy`IA-DUDge(p={~2qjh5S0fe9}1y z^nXPl<$VRl#I%+>WOvSFcaFo`h*jHbh;AK?oX8kxysSkRM-zFyIzY0f`AYCqU0Jl~ zEXsO@Bi1enMjlVFdm*!fThhnV#Y^4{(UAPPr3fOsn`4}bq`3ERX|k8zWOr}+F*8#` zNp7nYp+DJO;*oruuK0_L+s=|mcBgr{txUT64S`uXctv*SH+>&U{#B72Zx2~UcDE#( zncp2=qH9Ac5>JJq@We4}UZ^E~k9fR$o zD`t>rm3fMOkh)Z#K8W*SeDn9*j(=-E;_WF1xlMMrxPbZaFPq7R&|TO>c6Y|I08^C} znlBEHQC*)N*np3-iXOJQ}CDUZcy+E zgam6#7awNr_ytmfZzfMy-@%~0arkF6^VDjoMb0%4cO~kbI&jt<%w8mi%SbGzzQHOE z&U&}uU|=S@$nHuv`AA789rw zB)dz)tjXiW1T(g3{fsK1;{$?i#zsuFcwNjnXYaMwz1IEQW43c;0z!5MS$?k&Gz2u3DrX2V{XQo0ha$J&7bnYciYw;W;yK9MelPAAaWc<2=a3Z^l zEz7`*@xzStz__HX9@Vt-RX_C(#!AzcGRaK zmF({C!NvF<2!rS9%rFas4Zn3=~E-StG(7i77e)r z=tjX}S2$KwUPWP^kti4YiYeJ$d2f9=-}NH%VnUI6JBE4T4-xXkOw_hB8#CNf1{$@O zehw#4oxx1FKaz3$`zyr1w360&+@(=;lZ&;?hNvrq((j$f*2qL~+&37hI*5?n-A-{7 z&vw7@x%VD^FLrW{A|GD!m1XIFE;6x^UgSeI;$8KY`+lh?A-g;H^9W`gRFi4TJj9*s zuFj5`rfZ*I(VaMSULJyIZTiqlb!1>LXPLUImJ!y~b1NCe7nUdQ}AV<{!O z+t$fPo(Jg3lws#_Ry`Eq$6~R5Sq-K=GnWphISZ`mDeV{nBSJdUi7Y}LXoLYI0PGUa17W<#a!nv8w z=g;Msa8*|heCF(lp`U0~8_P(K8|V@jfp5x@nDFQ(hUb{bpJV;x>Je}H-gL!bei^(2 zLvT4K4&xFZqo$&h7<}a3K#+&@uUC_G`?+`bI}lD=QlYfuExLr*h%?!p>IE0h*OGLo4@0GbS4gw-8Ci_;zw>9S$ob^)*hkrS<_lN z&S=E(mC5M2F$ks0k8=)ELz>C%G9&z@|5$Sgb$kThUon`sJ`@d4F2Jc;Pd<>{{afxU z_p6OW#pxOno`<9CTqKm6ZevgeGfm0v-VgAW(Pla_Jn$^;YKCBFT09<>KSgt4XHl@E z6HLiNOe<7n#E)W3CcE3bKMg1I-=k@~EwjMs)UE67V% zCmGDQ<*-HetTewOqu*{!BfI8Gk0>DB0aa|C4z1 zqrJ5KOqa|Uf7v+COq5k0pq+UP?zay^uUnV+j$~HaX&+gz*jFCk=eglnNvC5tZU;so zOz|H2C78;Pmvod@dC4Pn@}$C2W?uxuG&KQNn(Gh|X(9K??jlyYOV0jwvT|q<49V`) zp6!BL(??vhuoD}yyI=n>_j8hx*xt^CGi%+WTlQksNJa6evYr+TR`J==%voIbqm-xdI8V3`DCVYk0Cdxosfzt`*Qo3gez?G|Eh>;2Od`4hK4DXL1Jbo2K-w zIfWR{V0dIDqQQoHcS~67{vW%m<9>HUTltlH2(SAx@5ej?dM=+){lZQn*w2`8-AOV7 z6eVH?XYW|+{`{B=tW=VffzEQB?C#`qTTwXi5z{a3g3=bA70Zior?sk_+3YSo2L{Np zjTZ8;tPVYfCO~UXFgjJ0Vt|#lls@wk53;-A|C-9nTX(T_O%%ot2*Hr11`Z-DGX)qF<9vL08Q)Z4ko+%x6xC~>)Nw-eiHj{1R?Zr587HJWaRwATwj{v72av(D*B!+;C;b15@G zv|}-B!XqqNU@i)rQ!^mD)2-Byaf6TJ)oSh)TuH|Ms79z*SkuiEAYF@H<;4ijsr_Au z2cMYpejpQ_4t+<@*$y&*wXWGh*1AnUF*Gq79a-yIzsX1A5oNh{+eK_2^E)@#hSQI4 z(Kt61YHjG}xK)C@uIjQU)aKi*h^PllHHvV&UQwc z&}(xO+MgnDA)7P1H*TQht+5O?BeQJsmT5B9ZmLDb=uV==Ikl~? zJ!IZtb>{XQMPITz?bj)|UD*VmPu9$fW!AZotE78ySNhd{G~Z`NaGPwL_}qdBbJl}A z<2M#LFmL_`j`g4yne47{RRR9K-&$&yyK>K-@2hpz64aWgd(g_lswHNRLD{*>e8~&OHo-hh7@2 zRNo<|%tm~g=sI1^T31_{Tr(e?$?gnh?m-*iXWUV_x zW zcZb4o&P6Ppq%T`Sn3>cOYI{PNbh< z4r(w|p2f3w{0tAwnejk&*Zh!~a2r(RrCl*5klh9M+Qs>w4;US8D<{eBly7pb>79}! z)#qV4+1-dsIhb`?LAq{p5(Cy$e+AgfF3T^d&B?%dvb%L@2QkjDtsKtc-$r)#xzbYp zih7BE4U_PL?CzWKDO}#FDPPF$qI&zwvyUBRi9$79ZPA$Q8;151F5}=#14(M~q3?|| z3{gDeGp?Y|>o5#`%bcgS_u=+{+3{p|ret^BwrNRv#c2%L6pZ$D3CL8b$C=+2a+~b# z`w(||Kdl|vj({D_a2sbQeq?uR?VKg~s-kpTl8Ym(b$9yYV(b$|3GsB6 zKI~_xthN*Hr_I z$qcN#^cilZ_Oh4k?)h3L*|SeUE-CJXIoaL5ygX>NY9)6bvQN)`Mp=Qabj?@}BI@-j)5;o*H7bjC;XkcUO(*_|1Kdx}Dad z!CH4ju&XR-%{u;Y0S=Jeof(^juS0%deY}G#;yGBX>nMG0wBSzHY&enKtPbZmjw-#5-;?U=I2tIEr zN7+SPnf{2g$PRwe-Oxn37IMcVDFU|lBXD%eO>AFa!d|T(?;twV7wO8UnlgOL3Bk5S zad;5=8117wNdwtkc%FwWaZs1IBPA%(Cc6tr#pUO3(0!tf6q4ONZ*q~AI%Rq7upg(% z?y3s2Q5^Xb$A>vGSDEur6C9)<DX zWUN^qgn=r@F(X|=tR{HM6tX+lN#^pf;1Nc9$709CP?XNUfZezCWOpwg*H&QFS4I5ByGeK&&-lOiMm^lXyyqmGBfImhIDrEt+)KLe zDW7L@K7SGW86gi~H;V4TSF9Z-&`VH5#{}73^uP3}85+r|>`G)xIFu(uA$-$ayqjz) z1!Q-lAJU;dR9kjiu;*C9`G~0raA>H*!bujAPIlKEM;`yNo%9@61S_(;>v6l#dHP4> zez29BWOvsynT3?0BqLn%V8D9N`XAQ18j8}r$cb;#F4EP@?t{Lae7f+F*(K@dwIUFcG>V{erX3yI?lOYxZe@{$3>ows zvqBP3Ms`;`rxdN8YSAytUc@uzQ0+A3yY(J2a-*=#A{?CuRbp3zp(K;tDU#jYR5y?h z2QR_1HTxMY(dboF4dY&B;yuA%bjj`lY>FrjvX-aKCC<61;{r;FN)tm_)LkTTa)` z5f#~Da1bq4e1GZh#^$;&IJwqdDp>1o8|WliQxv4<@En}v`wO%7;mxpCvacOI0AzR9 z260Yp$9r_BNki|#K2t`$O4D9bd#4Hc) z#>V)|@g<%Tr^W2@W5<~}9fa50lTo|95gXoE$!W5?#0Rd-JZ~c(HWfmR>@N1tO!T+? zj(2{XMdn#i9nYTinqN5dCL7LVcY5{ta9pM=mH94mh3xL}MjKHa_!b)qQ!%ZXcl-Pj z?EI=GkD@%}&Ub&g_qLNPm8qDs<>Ez>lcU>1~uWU{+)mdD_uqahwfp0bYY?ukk#8F#S; zEAnEoR-g5t@_DFf>&Z$jAKB50^BIZ8^3C!Frp%8(xMw74r`|$ly@{+F?_29QxJ!G%O}uiO|K-`UFtl#muu9dqDKkJngY>WkqY_s7Ox|1q>k+FUaE_1 zAFC`?e{m;+{S0-@Jt)}!3+wkf$|v5<5w;HU{QWo9@tGJzcBk;W5HAn5k*f`S+Z1re zshT?(cV98Dj9K4gcL$yw$9RYK%<%9OO|rWPQ*$}9`yrhjoKxEsik)vS(BrNzg=BZz z3w=d9$w+Q=yoO81!ZEiw5@(m(!LwTSGsy1xxq3@hwYK=^oyE})!SK(H$ADqa(66Dh z^mHK$uXUIH=Tv2JcroVW24dv*G%V`+0b8fq$}F>_|-A!1QgXwGN zaGB^Ny;$qEnrAQW1z#}h3;T*>ckg2llH;hz-py|EG?cR^O6*yu(Ty-Z35(wcp-tXN z-0V*8xt^DVkllTCHWS%ejmDSJ@bwCV=95dzS22)FWOwt%bG|U%P>efPVu@}z{yQ0k zHQSgibk|heUinFnlU|a4QA;*=Ek*E_V06z)K-*2s;k;}iCgYixFq|%k>~=D{{xGb_ z?q0ZYw_;s02HG*(ne6VuaA%3pQPw**9 z$62zwg+7O|^j%wWId{1>kN>`nrL^ z?n6&68ui!0@X!4U{NrdSXUOgx$nI>T4CGhm3XE70h8Lmad%LR8^%Ccoo%}_g?C$Jg zOJPp^mpzvJos&zRb`$%ZZRKLmL-^h$5P6F^CwTHR0-EfkH`$$I zmXqXuP>><*_9Be;SM9Jo*ql_7F^8R{oVBk1V_VU1=5F;ryO2(HH}z2w_G_q0>nwN4 z>mDEn`&!61|2nw$OTbOCJL@5QE6vpw6FMgPkll6Z%iZdscd@x|6e^d8(^pW5F0MwB zKz3J=>LX)<`2BPuyL%Lhh`BKcI{g4HN6lpGe19ps!dc|B_R=f;1Z&J7*q%s2#>)oW z-DD+kWOvu>=}4NdBKvXpR?zfk0))hg`j?y#zH*zxez=-UwrD7kF zYB-BL!9|)`>uMR;iuId!nB6}ON!QuW4?2Ppdo|hW<{^KR-MyO03=s9FupJbSQr{5d z*`I^)8XYNF>@Dlb?z#=_Aj`YnW+oXkP0b@P`u;V#;EiSY6JHrfc4x3zPsVn;fRZ7h z*fJm%%bq^M<1D_l$?k>@@s!Z*8uH^2cgE@NEO3yYJmVY0QF=`Ni8j5mQN(^mv(c>tV_EMKeM?A!X?5=P_C%N;a7IVhMq53dg3TovzyIog2A9zbrH$Uk; z%0yPU+=Tsw2#mPRI{xtuM7o&B8?w8cTkQR=&=vDh0f)ieJ5w&{^3F{;qtNo?8 z!b7$!QkOHGkD?FRoq2N#4orLl4SO4@A-fBB?;`mQt!3oS{ZP5J6P1&)ncMRdsaB3M zfM@)W?G6%R`~%CIGBKO%E~%;jzs%c6NusNGo(_|5#h43@Kf>F;V$nH(-1OslxJK(q;2+FlZK9)a3}*?guVboi1e(Jmk=b+$ z-|Rcc!VP}%bdI+a%+e9JrRPxJKLkB&JU0J+bK&x>wNf--L*Qg2Zt{I#=R;>2_?HLy=*Vb?tbNd(r!4b zv9G(}0QLyK!(MJ8y8`6*F8W}*v7hlg34UjSPj+tb{Kfsxt zF-W`+ikVO8B23kn(^)>UDZy9DMjMHN#Z}y#!&&6fQKKCU z(PvP4i*J651T;^p!`ZPG;u;Vj#>d>n+D}zpohrgqvOA5zyI_0o0|uS7l~2m-N#{7r zl$T2KVMiX;kll5)?r1HoaNf;CD(gsN!h8>D4P`wE9*q`h^og0 zA4_Q>yEAfh7rhScWTO!?K*;Xy{+EvFhM(ZM(@vI<-If33EZ2EO*{PR{tL$eih|EPl zdnM`P=PaMwbEmMko#>8i#xj$1=C=o8<(|X9x^~iSlDjyK36MV;E#&U4=V*N;0RsYq z@ndo+($ci#DcRj9vb$Yn^r`)M58WQp5C4NRcsH)#SFWLyk=-@c`N)z%26El;GUnQc zVd+=)b3CeHsA?u72Kv)+LI1Fmri8uaJj?GOY;j9M=mqXC>R3q%+1-~oH)(27k>E84 z@q}|~mFMY29`^-Z2ieOL-o5Taon&vOf@tRCz^{Z}u6Fy#^;*f&C(Jkb%}k#4wlY!o z16I_fK`lKHGlvzUAzoGPHn@wrBRPzkg(U5MhSG|7X#5C>6*vce+4(W^@x6%r-dkIt=p)Xi495LxPQkP zE;68mWJi(KzY*4R4~og|R_(}#(_hSu-{vCTjl7=@kg4B!i@ajy%RdW5u0jb$99EOT z%eg`bkCg7RB&_(68}NUsbX&+16q5O1lP=_f~Tn@I0~Tev5wNNT-|NifZ?d~5?LEcvm%3b?bqxCl1wrl4 z6j=E+!TXf8TxUOH`YczeKiOJlj3_`UYhBxES-4a4199j7$L?C!vv+-_g^r;t^d-CV z)GWZW;MNjr>nc6T?&_adi*o{N-G~&db`FAb$x)2IsxIkYJtU6o&b50dnLM}_x^=P8 z{}h5x-^$@~T~}s*VQ#en-@f;a<$?=$OkyG+ZFq-0zKOQECi0x@PLJ%a)=^h#XP068 z&JZli#jYPjXaBAxWC>}zVLHf>*ydQ>%XHQG80W-=>ttE#JhcMWJj5+^ghDfxAyc1 zO>M-D<;kcbySuUKIBM!OEihqIsYmxYU%uWLo0?LNF`t$V*i4kmjlNJn?( zAMKHRM)yDRhLoZ1x?nKs!?y2J&DlP~=lwhcHxB?(iigYb9PlQ{FU zz0{E1-5%{PHFM0E8T5denK5{z8HRm#nFst_UyhR9-B?5(e$P+}Y%5{VB^*A%QSfSg z56y|Da-QtYp6u?Isg{M$w$39ZFB@3U?{q*G?h_6>%Pz%=P&$ zn7;oAdn-E;vb%e0S<_BXlGGcyuw$+3``=zn9jz$S9y`fKvb$Zmc2f2A6S^s7U>(`r zk{gGx^;KIj{op1RWOrjGS<2@R^-vs<2<2|UP;Pe`bvjydezBK~AiKM>inVUTeH`i& zjqtr;_*PkgX|D~WknAo}kAC=@bY?nL;739j5++7tsaX~7gmx5d&Z+&J;U$4zn3M6~ z6rOqT-`}PmgbCNti z*6q{wVmWKw9iMaIyPEy^AZIyDb~mlgR`g0f;*ayYFo*0eepM09tEkGcE$;GdK!7x^ zw-Adnb=WeP&P=kq(GN>uXTx)_&Pz(@n8@5>%51W`u=qO){|pR=QU&M5-xG8}boeMH&&)PsiHh-nJknHZ%;(aKOZzY#K zT%_g`ciT*C<>KG(u_iJN3rYiFVR;1iEIDtl$!upc_WP4M%cjVu7&JT{=S`S*S9}&r ze09Wjw70Y&yPFo&L7Yz9#@z$_Eqw|{eC0K)|6n9iNzds>_Sw$pNm$V>H=a$E(Q*iEG4KOKb5z#y5MBtGpl9Mrv0UB7^Ks zv^N`hKchSY2WcZxN_;WJf>tuIFV#|=$M_1C%c$3`uiFt&HeA#ys zl^-L}Iwu0bcW!Ve(^#s=?lj5nf{Jv-XG%HVy$HeLAF=qpwHAJ3nE^s}m)b}#=RtKD zAAA(0)6&k?hXD9Ub@ITbNbl$jrJf()F-| ztjPL-d6LB(rkzMzQ-HwcC&LWfDU0G`_Ilo^cvS$h|lihuf;Co@LhFt96DW=E# z9| z$V9Tc8LM36oTjpPP0xoi+1-ZOdyvZsL$kY%>q@H@h*5?9NK{0Q!ID zJffzX803&gXIaVk^RKYpju{|%D2QkRFtsKdA6D_j4HCHV8hI@&9 z`bo%c7mN-aPhsDG%yu^P5*xBR`Or~HTdJ666OCDZVfcIUWvrWRAo7NOc(S{m5r%Rw z^$MIDxqtjF3i)buWgi;#6l2!9zX!X^ z@2TyiV*OzhObg`fOFC?Vo3V4eoeU$p8*1w;yRRt9k%hVR=k0`se=deSQ~` z#{QMe0I6xl=Gp0}CA<64<1ohiGTYgozU8UxAHKDa(24ahS4+fzCBgXh`ZO96v?Ml+ z=QG*e-ruIO_sKnMe;kD&x5Mb1yTbh~L*BcbJE3FZRgeMu2bWmz1$v!5}0zb(CJAF$J97tWB~Su_-3 z&?r?=xk%5bcYqw%P3QPVD z$CFW4F=(-oC}sG{x@~j^|I6>^fs5#JG?aI53>x}8#Oc$_ZCt~CMm{r5$G4Yww-e|{ zc2}i9R@PV{XILcdV0Baf<9NGwujfmas+};UTGHcZN=#Kk)iEI`-gOS^PUy(VW!}<`?9Nx4`zA+k;q1srR9Z!#Y20<>FE*AVWOvPUOcbxt zlhNnSBhra~+tygzoL_^9`^;tdL4WD%;wkTXXh{3T#}M}?XRS<9uw?&hj0&=rZmf0x z2;xjut2WH!F2F{zJG}{6cs1e&&L%Jmj_2S?9Y+~n$!zCXnib12*o>DyZRTe-@- zssNdzXCuazZ_wpl3ht5J%^!RevjgcxzV9Jp$nKPTcao%=wQ%Xk-Re6b=<{zm5-;nD zNeyR_9XMyFZzB8m-9TAF1m;ym(Cc{vAX? zdt-m%+i*uY$@{BzoP(U&`5oqUnNWGp9H_2^_@NOpIjhqa`WSP}Po)=mg1L0s_Xy>lvG`|FC^{^O*<2#M>i3_Y`OJM`*cO_x&{UD6EdIGV>+KXnDrv#JT z-TBK*(sn*T_J|n#Y7AxFa0%~f=@H7I^LeYU?AAAu?-`Z&7xdFkjDqLCcabvHRP65i zN$>+N@fxfx3(e2qz>#2jNE7hmbsauUwvg9kcRyp@WyAY+GIewjP6o1OO4tSU*&m_O zY%6cb?s9iC3u%{4q;su z+1*<=@guw25Me1A|GYrw$VA5Ovag$P3U=2ypFwt~LU#A)AI>l=sKV7X(J0y)hIby6(>Okm862|{ zp<(_4kGon*4%ywOHtf~hZYx&`4{;wq5T|aZV^=ow?vC2Y9M-x@-}vU=peQ4cl1H+R zzx0NV33nyQobD`h$nIkG?4)hkM_krUN7f47?OH`>b*`P1Z{$ATXzr#RvXFoJJ;&}K z?jL6bzBn_>^tVToeoz5NIN*`&hW*~tD zm*7f2{L)|1_*_zr6TQu3*93p*(&Q~3&M+A6#WcdvchYoWnPMhJ_*V7(zdM&z@(I^GRFA1$awQ zvY!Oe5AV|T4n98SIam?Sp7=F5HW^71*Bf$?jSzQ(>+24x^%NMExZ*JW^aFbhNVg z&CkaG_A{1h??L9lUpSKcKRk(s_sT17Wufv*fp zFp?oA*N}QN9HpNkF@41yL_O&s(=+K)bMcl-_qFA_)>-a22II}1co>d)hFh;XOC;Id ziyC*CaE8vTu6v*zpzHw&DtCc)y%tJQ&8I>z?F!3J+`8DZ( ze)xlP?B$QVFL3&l!JcR!PSX!>d|O2dw$QahcIT#OC4-Y*a%VaTh9B6&+kX;A{?L@3 z^ut?_-HmoKldz4|So$Iw-Mv}s)?GrA4P86;e57(5=YnDlCC#`JqjbX2cq$5=1L@49 zAAS$n-KrDxBA?fiVg9AKM#qHmo&-GF`W!(Obl;QR{h%NIK_(p&&kmz(5oeJ-)8R%x ze3rePd?dR|8OA+h9VMBwG8YCs2dy9P#h4UD`9tl0et4fO=Hku!1ocDdWRJ|frXPOB zhqf}yz+D2#?p9e_iXZ*(#RC#iMs_!2-D%7kt0h)+Ol&2)+Y-=GX3n^e9Gz(VydH+8 z2Nn3k*-)l>`O1!;J~Ekp_%O2y^jaQPRcRr+n?`nL--mg3K6O~u zCjoKif-!0M8Mw^Xmd9jwmDlN`>1`^d1MkABPZU0^2!~l!C4RXXiCc)TbSAsoK|lNe zd$PNSq0pNjgYM@Z;41y_ugUHjD?FuRN_%lgJppC1ySAs2xXassAzQ5EI@#TM`r$+7 zs>qA11IW^1235&!9BcIrF$MN=fOW-)kB%}T=9?k0`$o?lzpGe?*5p6mR*t z!%ub&?jZO5ZsYl8X28=AU;W@3+IbkuQ?k3H03Yc|KYWY-1zZ>$3f)1maIJrYwe-U` zk=-pG%>1(dG~{hH-N9sc@9!itbB)=~RkBdm=LZh{ zxTL7kSHC_dp*TvB@X9>zE23W9EMLI*Lhs z)a87Mhg3h|8)PjQooi5p3$OscUDsJLsc?6>GMBvl2 z8|0EEvdV+m&NsbfFa7X0hLvN~g%C{giGz;DW89)2zPj39u3cvD_%C%iYjzZ8>6kG6 zoPzkNZ?ML}M%q1Owk7@W#dfV_&$j)jy-64M)NJgcAO4Xw-zNOqRJU4%@R zAQswPI3xJwJQQQ~B!G?y4YIraql`t@;yRpkBG5B35&`dTA>Xlsyd%4_B)hvqKm6;3 z=b+u6pSfK;@_nD;GX3y})yXl)?zBg!N$I>J7)o~cpII8_C%?nzuQn1zc2__@e1(d# zq$uUXl{$^gQqx?&DxAGEmGH!mwQTpK*k=>nIbO6acRHVq;P1=y%?MSnd zzx-ce^RpyWk=@mloxt$b?M3N0U2$Z0+p^81G5P_z?TA7A#ZX*$ei1sm^kr@~-^B60 zqBP1#;?1t2V0Jis>4#s_>Mo3|Ol9x|_N>Y7p3x7#D&h>j-3-QM%LJUARfmD}!~YY& z&*iAQc=@PGs}n`|8Wae-p}U~_-~;l`*@{ln=HODT7+Q#EF~q;8+ABjehuH zA)4}v>~5nsIZAOy*-k(F?0nX-Q^FwCmzf`CAS>vH&*{z#t6_$c+35-voD0Ju`r+4` z+{YSYdN#@KOv&!v(hnbd{4~_a?%vuY!Z5BL{ppAIn900W2Y0bBZYNp#hjDyVAbxF6 z$FL5cm>+5<+sN+XS~$y9rYLQ6a&d$Gj1SSd_=A4EHnY}C$D}!Q zKIa~W>xOpXJK0_8SnHl{un?u|&zWP$xsae>IMENkFRYT#Z=(WOq+gy~LP)_=T_O z%p|*;;K6rL#Y+^^4_{4o*Bt96-@mI!>8gW>CA(9;lz|76z94k4y}V_gsfB*{f(!*o z%*sK3vO6R7eRxhkeDqUi`SFW$H|uO=spbbvp&x!V+1;>_#i&YDm5Z;qkIp%@P3dN(M9~iM(I&H*38;%q8Z2S5Q8z=!ehU?jo9G zcLNG-WY&$h$fO@WpX_dTs}g#u)Wm-U-{WL=W!9bL^|B{0y%~o)E9gb0AO6!h9XU^S zH=68D`4+Q~=!gGrNF*K(i@<}b>v*!CeO8|5#Il$a>~h zlimH&U_YLI`10wjLCNkEr>CGzSN3)3hgY~1AVX)m%G=|urEq8g!ddICosor$Pk*5L zf`er796VR&AY)Fp;8a}}R*>C2(=I?Y{qT8qu3|uT_xEFK=^xVs8~WiDT!P?!{3v=| zSC?(yJS2eZE&=Rk^smL5r?L3;iT(JWdwcA`8a((Rbo4Z5?zadPW)6?+Aer{qWDyAEVW^PEtm8H)ba@Vd;n8lEb~bUxD!6 zmdfnVw@{}aek0l4y)tGyCn-z8g?!|*)_rgy8>u$GptsRc-t%*FcW{vMHQzBSJQH@` zm?yNW5cTxK$CkT__7VDS)U9R8#74YWnv5lz=tW+09OW-Hq+l)QGdQQFJ<41RtscSb zM-0Yp3`I8m@Rg7BWatzhnL>6q;U0JUt*@cmvv4dtABk?ww^5MAygRbHMg6@c&4eDI zt;{HIAA+Ow!>_1(3XQ{^C5`MZSiwVZK~~7p0L-Dq%#3%o7B!qB2qwPJUC7VjO24-^K_mWQSv?VIH6c%K6yXl8t(o%=i81BxJ z-K8yemvZ{y_xCTtFtWQLFLt5rr;qrLe)x%GcLi&lC48Kc=v~YG|9<#G+xKGJSh|Ft zILQLCyZ(Faq@no}v=lOMi|nrd?L$~eKYYiJZc@99tYU(tbbeb8&tZuuA-i)?KaKNx zS`xj4d8GRO;=Hn>On-hKy?RGucWxN|x={i9HwH4y%vav&`bz$F15vTB;OsycR3}H% zqg{n{;T@SR#s8Mb6)hJ$^*D=Hj{j^ySojZGU_p1JL)Gf z=Xwx^(hr|EqX8Xc(qPlWQ-Ym)_Mw} z`*{yulHHA4wht#0TS=oQccVVhb7y2Lk5;_LobWW%lHKXr96>q#@OE0dpvaf&8Lsy`A@!ivoV*o?y`^hs6MVNiuYXP zHrbuxKpPqE`xbRGQ=y^4eU_>cY^5K5UA%{MBD+&8>LkH49^(}K@a3@~Xq8xomHl*O z;#uxy!B6IfnQ-^*CU$&aJ-9al9jk7@>7%jCF!z&juf3)Ckgn9z58saLPVHwbj&H9; z<+x5#PIg!F%0r?H)n)WfW;>JJsnZXia)+56U#;cmod7v$;7XTnYuWI8KT^oH|*@EBr2y-8IkjklC?n5_9ti zu0P;!-6#zowdpCjU?ZE!?)t1`txG?A)YN>`pWBK0xqFcI$8U_i$DAj!hOuAlrOT9W zD0;mc*<^Rm)eitIZKO$yXT=`!vrH>lclH%xZIV$=c4s&C1h&%;Uy|%8y~*xW9-GMq zqlc&ojzNA_D5@7;r1w)_?vdRcrenfCl6i}XS1~dy9ItGmu<+|0%z4>Cyo>!LUdLO$ zL~Bcb`r%K!;hb1~JdV{o!y5|=i6XmGf8Z`vYgI+xl0Gf6yHfh$J%)XNUX-nTBfC3s zjXh5K;a5D#Loc#BT+P9_a|+!3a*{915WeKgeUlDfup*Q1X)k7DW*tQL4sGSw9yhuE zAA1V)!xsm=#BJRqj8F{*m#+}MU6Z@9UUIaTza+h5?&r5ERMQW?$Uh9v{=AI7a}DGf z+1(EM;WvaCN|&T7bg+iO;zJZhYu?9yHKwwP>~2dM-S=Cxq&NNWcQ*th^#ymtG&!fH z#GDMWyH;d(?WeSpH){`L#I!&xpdVg0yctg?u&+yYXKCXsd+CRFU+_P67ttjb1D-3& zGy36;$?it4po^lq853uwqZ{Yc?)5s%vw+#o0q$~`>~0eM@FipGF{WK2S}$d-`|dO@ z(GNc}+)E6|?tcFyW30IcbNbJL?z-0dNT)ypiBG=_&B0;VRnA#t z`r&up=qS6$?)urW)?K72Uq79sFPYiO?-TLrkC)J?Wu_@>-Rfg*V%@#1_*EZ70NGt{ z`r$|2_>4Pd_Hvo*Zu8$x+!10fVM`AD$?mQd=0Q=Vm6*~G@5X*cn|$sQC4RsXr(JN~ z6$sVWMd&t${S30Z9p3Ci(GP#hrVgD{5>P{Scjf#U=+O_asO&8h$?p2JHI@G^+`(S@ z;bYc>W9qo8&{)bh7unr)Iwk_P=*y-2i;$8~%t?>loUQ-5oMb!SI8xQ69`Q zp6t#&&{ei7wvm4d3NV=LZZ`e!56Ap~N|J*t=Q-%D?I=?(wVGiLxYH@5d^#yQHFQ zBq#oa&qzl(%KIx}tb<&kAO6OZObFTC1}Vg%d2M7W{qUY-ccZ&o%kjEKC>19oo9u3m z#&LLNX~;9Oy9Bbkuk^#u%y|TFk60|49Ew#-FQE5*JyGt%yzTouVaDt>M^PLLP3mzQLBmSlHc^uss& z_W)MIWAOWRC{|3ngcDEb;@ayYGs*5A>l(?1v`WnA7LK!%qHuTnT@0MYI*aVCjDGmh z1GUB1^bDGcgE4kS0xIc;UpJK*54{7VHQ8Or+jg>!e)utDcWTMI@NM2l%=lz0?O5yn zo8c^*QkBHdJ`Zi!`!wIgT31_9(vCQZ4cT4PeLJbAA3k(2XTW+eCu8v;EZ);rwCRWc z{7-=V3bPdN)h{qKoONiIU`$Rv1=X9Ha&e@W#9DJ^iGKLqbE+`uZ~7zt4a2mt74#Dt zh;mzB=}2~WU(-w6et0j3X#7sQj~Amm%2TqtJ@mtm^3#%EeNUq`*rwCN znDA7R${CztAiL|LYbU0qAF)R>9c@<9y{KD+=CXEjihlUBqnK-R&_Z&0J;&pn3Fw_2 z43kBrIQv{n{=MWSAD;S&_BK_e9~WRXFrVR>Ho?P;Qdl6u72 zw3zKocK6ws{*u>UFme6={P1S|o#fyI1(`M|2hYjw7R}#>$D>-wTQwJ%|DAXH09$(Q z-{XB1eQF1|Cv>tH!=9*0fBNA!*ayg<8=a-Q{xcj6ibqE}CN}pui>+U@r9&6`(aG-a z(+?l%cLyo8k@$9n%y`r_yn16Kn~(a66WN`ag1*S{3z$YfeAA;CBtLqHG;eb;i{+bt zk*8czZ!b-Ujw3rL2Js$Avo=r-~<{qT`wceb-KnU(h)o4Pni zPo5PsV>!dH^cR{MvayuxZfavbOjfbhEp(A}WOthDZN!6q_@(<&G38Srw3n1%dJA_6 z=!dr>yF2{46W@W4@gRmCp=kczt;^t{qAQ^}-ZGW!E+g7RJnXoSW=@aL`Usdzyn%)$ zW6@Fflg+J}8`sR7jO*pN)DQxnvRI@l)zWR>NlYvKWjX!uYn# zmX`>JzW%R>mSF6drE+wdV%^uqIRJ5lb{zQz8c#_@iN_LUBk<1;O zlMj3LGw$f_L2U6ae8_X;Tb{leD+g)y>Kh!pW#XbDcSV&C;C68vdC}-9>&Wiz-LsND zS6{(yelqN1gK)p$IBZ?oi%Fs46?iM zI3u}Ya1F~#IE(y+_w(O((DPXbc}#Y9*4bN{?rO_PjkB-*-EI7phF?U)`?hgO9lx6h8&mNtGQnI_=2TmfXm!=rg zF;P6yUk*8#$-{rD@#Q&pGJNP^cyS3Ob_Sw%--m8xI@O{L#nzw_e%j#}UK)i9A#`R| znTlDXpX@tMFLIfdbn+>MF4^71oCNIO{v2ABJfFz!6v*xxGT7IBOy)**=jNS`4)nt- zINFJZMi*H=)LAmLl;rWUTxjzg%zC;P!_pLGp}Mn7X07{px1H>o^$AZ3)3KWD&XRuk zG0kmdx}m%DnMXIVr6t)M9TWW$@z2;`gl#;HzT>sz1KFJx*#h2KDug0LzjN| zKWi({&ec$UlHJv{_()WkfpqRr0qJ=r<^KmqaVJHv%f6s%{jF~O>v)f3if1o z-i5qJj=g|iF6(%*yQY(F((}Kz(iw*^hU`vzMFvLG5C7z?oy;Ses^)9R}_rshfjQ1i8CHXa)In_bqe#x=!dVj zCcAqOir5A0>z;prMfAguUf?e}WOu$v+&@k_ftg3?Dma~liSIbCzSTrNIxMYzv>7lM^?qz!?*0$Vu?S*N;rY zzB-zm9O~%CRuy)GV{tnrHm?m&U;lFMb=nG*ilGzl0P&=|vV6v+juD zW<2P~dukq#L+8QnhQSX%YGi*dt3;n6CYa!b~m7sP?kj6KeH(`=smr##&ZztPVR#p-jAe^t16=ku)70+hbb}UFX@^i zbAqp}ZnqU5QSO1G=n#A?2dH1`aq2Se%MY{c`6g!Fz3Y(WTltn!e`k^GvPjZ`AO1if z+;l$9tOIt}8n4gw@WWqSnnGeeF;w`wo|;qDxEt(l+go=yaULEeiL5p0u2Y&eCWp(5;64?{0G}`3dPE+BBmzKxp(@^xaPLSj4i_X#G#t4cx z$fYTHA8F=qWF9|zaUA^c%~F!w`|lV@fZaX5P()eq!~3;Z^JcI+%LYr1y@|d%_~B22 z-R)X)lH!L-bB!B1?lJ2s<(P1^E8Y&CXVR8h_@Ay+<4#urc~fW3U5oGSA!L!^hd;MB znNqL9k<{5ht@-M#0>{J@u)9H%&_Oc*9+!of^GK}qGxfV{&URpTl80@1#fv`N zwCgAZg5B9!pa&U#`1R4?USM|x(s;&4eaG)&0hPE$P^#Q%nm)fD@4N26=fUp2Tr%eM z|6bFZ+sKd^quUvNc;{F}bj0I12zFO?MvouB5C7q45(UnUrP8O3wB?8fYrruf3wHNp z9J0H5*D0VD9Z;r8WIFI684b{5ss5fkcBcy~kP~0zS@W*P!d5>ZV-37bboLWL&@WW?%*>XMD zo##*s_8Rk*tl@{R0=si8Jwt}uWsqrfcShBPYtgV_Ln`0TUFtYqNIC8c;TZXCcV@WVSPVrP9Y zj($G4NuN$=u>f|b4R$wmkUAGRHBeGg4Eewhe{6aSE&el*z4jwpAMMP^@WV?RR+ALi z-8Vn%Gx~PZ&$lLg6zuL$uRY%L`oa@`g2sT|ZBRQzfd_xlS9x$PFd!57;RF8mQY{ss z0~WruXfSj5;V<=a8XxHq8-$+?-l#wL*3q1kUEz50$`H~8TnHY0-vcGsfr z!EJZ7alh3>yA$K-YCrhY-rS+}r8@i(>@N2#I^ir-dCa^^l(#RM1o+{P6t+>yZ37nG{+Y=fti@GF_dZjT`e_~CO})wmbz?s^@vGxxMO-0&7Dw8zoKD+%=Y z$6cCNG=S$F^x(3Qu6#*Lg_E||(h7Op(!dYTP3@F^7LIJNyYzpKJpM9rYM;xgAJ|>x znOq93{6xy|!>j%DVrv6CwhfWs)?vr!I%eH-2E{bdU5xiR+pr1PUEnfHbPD{WcY_a- zw|)f8@I+1xe)v^$9rzO1ox6((%Nlf&;>=7c2D>{oq=q8J6*=;!Gyk0e|I-5OGkPCW zoK7;mR*t2xr&md;6a4$48|MVOb7He5SHll~-y@ECViIXv{|DrsqswQ&?$RE(@L*|W zo?CQ*=4VFHl7s0qNbDtjXr1xb!hrzuC^r*FSN zFD;c>`xShkO~~(u>+;`*`*7PQl4vY`HcdCk|EnfffZes9apS{R)Y;eJDyjU7p&@R` z6fyJ(-P$>jHIP&L_Xc^SH_~DDCPLa3{ zvXjdX($CPJ)D1tp7ItU`dNzDoMT`%BDIzh2wtUnMpR%wRZ712&_$70si#k*~FEtz>^4hnE(NwB-W`v&mW zGk1~6NFec_aa0IDe9t#6o`2JwCr!ewYYm*_@Wby|8AB5^QfT+Wr!;uAKARz@7P{P- zXI_zK%VFoq8tm>*Qx=WO?WXx=rkn?Mr*zt$rN;K=O@D4A`C4_fp&zO0v#F^gMyxrNR%t%=rU7pPxgqV0T|yDrhDA@Qc!& zxZxf4x~C2J%G@^6osmifsp$R8zC>e3tFl_1D_elwor>1srouZk{8K#bD2bV`NNdb2mVj^;gc}yo;_*BvDrOz*{hHoFzfaW zKTflrNwL}{dp-_!*9kv-`;WI&%=qj8yHiYqV`8Q}$Lcxr>`G5gY1HQl9?vKlet7u- z=*ie!Pc&7HXIs0m(|a(WIBjI!Zd1B)Ty9_Mi%9!4Lm+ zA+jAGQi;w!C$quW@gt`u4R*IVNse=GoujE>cmL_+Qj7XWYH7eT9_-F?J!W0_;XltR zrK@0f6!)J-Q z=!8iY1sy~0=ZtDv2|s*dwllYGM|aD!fn2ZFLet=fFFqVYUTYfYS)e-aigjZ>u)B~r z+})+#B%2uAn^+{0JpAyQT?6 zJAa5Qp5^7Foq^mZ{P4O{;2uaaXH9Wrxo+C>O8DX1o*gAuu)8z2i)qqjF>X3w!*9Uu zLfmj~qWOckpn%ST-Mub4MJ{^%SXAu5@nCn`;D>*@?+yJ?&ZJ(jyFg`yHde>U;iu+&5Wqv+og590=J43$S zGQ8Lu^B~yWBlzJvCcmO+=?oHq-Oc;-mL1V8)-qZhPL zDxD6uMZwK5DnO?jDVYAHHf!9JS88NjKMOvE4y;HUYa^z8yKr zk_H-j4tw2#WSTavg?eiTvd$JyRsg#b3sB%-_~Dn6gL6Pvo=V<=I&mH%x_sE1Sf|a+V0SU_!>epmGdN(+(f_3Q^0DJI0PL<3e)yX+dT30R z6>q?E@SCDFFTebUF1jA3BCxw%XUoWTj1;(@J*!}!5vy#*;|IK_q0QO!2JCL-Cd^3ojC8VCqINAUj14teb!E;Z(w(CA}-U=o2q=E6`%8K_c(Tu}ziLOf}EBN8BrWx>8u)Dj(*k_pF|5a8&)?jyU z2j$SVPwy#gt{K~4*3Id%WA#=^{$f-{lfdr&o;yr&Ie%&W6l*?>&##FAR{RKl_+71s zXd~F2=a3V$V0m9&S%)k#_8IMD%1_$5N$pG)`GiN(dZqI;ph%u;XJP*Zb~g)t_@YBk z>99iz&7TuPch+AajmK(y2JG(71N45XYO{9EEh>T^{#9B6^=`UL6PM%eF5ZKG!!faS zh6-Q%QcE^qclt}xs0@Diyf8zq1H0=3c6VI{{onAz|7$=8$nspeVfTroY|Xh8>`r>5 z9rqJS@XNeolmm9>uU$;3?f(Wn}8-I{yg59OvKS}o^qzLj7vp}C9-bNQ25~u=Oj`{e)w-ekEnBF65ZVsi#^LV+TEqWg^n01T4((rf}hye`4E&;0RG}~kNln`klD>RdOh_P9r>umYE_u0 zmbvk`UN!DGcZF8K41ViO z!S2>Q1gA>q%he$#=s&Q#)r$|&K$z-#P8!S4FP53d>`$+{=) zcna9v;ZS&l_ya93$)OgoJNb1LG_F^Uv*3qs{^iL=-3GkrC2|KzsniR0ch2Gx$;zlA za|w^o1b8tMbU53jnPv@4pd(@NR5YiF4t>z(9I(6VlDLEVtjdNrE>ruPXzW2!Xl~zD zvNkf{&0u#i@WaP_?$2XCRZ^~dByzslR0u!((|XLPV0Y3!_Pk)N6!&zMQ6bpfwW>nO z-S~&Tf4BNSKm4+1R=m~n57|Z+(h#t_M)=_`rAe{sM0=ic9X{+5Q?7s?-enKEMTSHo zUsFY2H0Ak;jx#R=yVH>{V6B>GRCYLpe6?aoS)rcxX{d363LNU<=<-X}=E>Skbb3)d zEq70#+-1#lsaprU7|yW_e8#<0!F_Wb?OTF({xhlY|2?M*GH_;s-93aKzVe|QKl8x% z6YQ@4ogDfu^N~za&3Fdb-JVQ4&Y38Qzn@Z)!9K%FqlhA^dda%E{_@tIM}XZ0-Z$Zeja@`bvglGuB)xlko)jDu_&eBLdyFSnYU*=p zS_`eYh@2Yy@P~fE*Jr5C#{P5F{ronU90VZq6w)Ws#V0T+Iq4ojr;b=2*~1TiILMqYgWY}n zZp;5UNO0AwqqGD2jFX#-X?(aCFPMu?Q|u2_7g}O(@`KKQETHF>*k|P89rU3zSMGLT zYp^?yUSm#!A3kz&CN2Gf?8%uL3Ll}!?=)O^4A`BLCH9yhk7?VBBy_#T(#YRZ8Il0I`_Da}j{N9fPwH#R->~6J@F~`9Vuj`jVj?=JL z-YJmzOeGfJhtF2^WG8n$R+{yYZYw5{RdX!eZNEmh9pDajMcxVQt`UCtQ<~Svd<{D8 z6O-ui(MMzhKm1v+yS!m8taDtE`_C6qs8$s9JC#AJM6ac5qoiEd2fduO9E@4lpv9a=8Dh7y zK98P&-D$!PAFc|oUXdd=GCX@jjW_~+`0qo~$?$R%U7LE5I)jyYMISsXZsFEvh%Sc? zyH87pCXz4w@LFv*XqSr?&xt{oUzQtp!wbNJy!V0WI4&O9ky zfq$k})1`_?YP<-K;MY##JtjO9?Cv!D@P`(lM<(wi>4DwNsyRqma=*y_lqFvQyL<7` znwKR1qh;{J&%-|BcJndX{Q`5yY&+hMS@(sqIWKSjNWMdJN$d_f58#L2WiQ7Qot=1x z3i^@f8S*vw;Whoz$kQvD);rfxQJ4yU1H0?lha0WY1NgAxUGfW0ph)=P-?ZPN-LBf) zr^B5+!0vM4hac&Dg*wK>klXkaDtY&mN=x+l1lV2K82GVv$n*WXRpbD6ck2=I88&ap zV5TWQ0K1z9KfL9OT|8;Q53knUMAgnZe8Aa*w>RPL5Po=v(U&Raax`Um zrc%0E8}%d_aO@N8p&Fd{!V)+$wJPa691~i(c-KdF*;-Scwd^e& z{+UHNV0WhQ!>a(|fM>lrp9j0EPQ`37L5s6BZ_=U#arA9^BAu7LPaDm3 zc}N4kwr?(M0YAJ{^MpRV0Xnsn*j;zBh&1Ejf^^3_zl$gLJFUmos~%BENfHIm zi=_e28)@iK4XyyYTfy$EJ{CJ`?dvq?BA)-|Np#cjAzjzi;}EdB@EtDj94MjdM4*fy zbSqnBP;JgD8Vf&s4cMKbg(Lf_%kXE#Gt>|4ZbDQ(*%*H#uV_5G!R~zFhd+HmocqX^ z(0%MP#4}3B1%CL;-nLu{cGou8g8luzlC)YrX)TE$r4wgJWv2{V&voSg!0yhyLWZH? zC7rHMr(IES7sC(Fg-SfE3^}#WU?GAoGyL#GU=fCZ4tmvn~ zafXdF#S9tjKgeSHKBhj3134A!&P~RJC%_MH^{R$G{)LyzEtB+`-%u(1@BtBc#wR(j z$`5HaTXTxU!0y)ED4?nZKgfQ9CHEib%}HZyxH?CSPvsTUGO)WL%E#zF{P0g+*m6y; z7pH87A71)1xjf6I2?Y^kHoctA<;e2!w~lOJ1t(cwBX&&dpp5!7TJsnC6t1Ps=al&# z*qtrdo!->}{Dbe&Bl$$CKNLq>o}lmUlon5}fXjL(a^$4We)bKNl^8?ykCSP^>=u&h zg+BxAt|7{q%ixFqq*qN#^CGEbR2I3*c2h9?@a@=Vr2e(%FAwmy|L6oMfZbJT9HNOu zzlapAc+UT4K=8va_|;1@eTqn>{s7I1!>kKGeER`AuI)mfc&QnuT>U_)@WU5?-6hCW zkWQ5xD{pk-N64uiG{Wq*{5dt=Or--G;Squ#zUYby4~lhVf3Ul>TROZSe)tb#5@^ii zcz7jm)3j6C{2A=7QqPT3oz+-oXgz7lz|DIhg+g{bquNe=P6WGi>yMcee)vVVcM33jFZ*tgsKh1IN-+Q+N>I;DjGu**=oIFIK{3(4QytAfEwt7wnB$cStJ@ zYfYhBKceZqL_L+>Q|0}?TzMkcomsOs8<{lG$b@)Olue+c-FL|9IBu(_d+^IMt~}jL zmBnXYqDydiT6v_6EbbWa+OM9xeV-Gjzz@H_pn`~(zhRyO?+3f9b~0f*-A;0XAKt$OpZ&vY zXtjhQ2mW&AgejiosWavNrK(oZqnqT@WYq8$Km%K z9g|8A=x44jTX%t@-*@5pQp#+7=mHgFM3Hr2I;lv$M3;sU_ki7%u5siJ_~Bm1!4Kc||MwZ! zY*^$Z&IhOe&kt|ei`&9&eOM{h79Jzq6v7Wb?${UV5b`J$>~2K(SxSW;{?H!m!bc+? zzuAaSoP9wfMy8WSNfdo~d4cMz;g|rs^KbItoNd@yU%pRb>#@%m7e^uYZjjPOLJKVv3PiXb-fgBEYXAX9EpapqdsTwi>yHkcAe!=oiQkrPO z!@=&RjzLasnKVD^a|&6k2r3E&4+;NC9`M7PVu#i>z=kW8#Mt9g5ou!9-M{G=mE}mV zm75)J0=qMSA6_>26Ah2brFyWt@7Cw20DgEm6?DSsp*u6xkoEV$4GurNt6ntqyHHDF zzUV@m>dLzgfJ^Qjz%x$XrPk5}a{LuXR`A1D{m|mCH{rFKh+CR!H4gW^LQ%mn^h`5_ zcC4B0Ptf5(0WgWcJ}55LF$13jCU zLrVQ3>2qrZ&43?%d>@Y~anVaU5g#{Q~&6YMS@j)|T~9j1ai zB>pj;E*y;~@3tn&`>xHK3_Q52%a!F$sInjY@IEi2X_#0l-0-dRbEW~hp*{KS8z&YW z!~bhnC0T>r^@SgP(bIR-`P-BO!0zJJ?D-P>@P=(= zyHfzWJD0D*Q{ji#1-tw55q*G_&*{t%d?xnyVx1s#V<*UQ@%3}m`WP%*H<#+PKhls! zGrkUX*MA*mUHIWA&M2j0V0Y!gMYL!1KWcyn}cM9S$B!bNwN@= zW^q*qUIliilyAa9RTniHWsz8EBn8Z_rYZ2lug-Di&0u#|TL!X{atqysAO2ub3}vpz ztQ(}xc5&zcNk`99j27=tyh&2gag=8XujIdbRNoEX)j51^V0XUp%KRYdB3Xdlz3oh+ zX|Fm++{}orkyA5y1pnhkSstucPRG;HZ{m;p<7uC%IT=}1u)B3PY`NMqSz`AaGEvH; z5;@GHW+DoRROC@Q@GuNTC)_(dKK1Dly@MZqyLT*|U))G7%Qe{QqZ^L}yZaKU$>Wl) zQ~PW51${+VJpAyFpQC@`j|X>VyKr@w5_iE5KX+Rcyqa*|YoKFP%9y*r?&QZfvh@rZ zcHVG?GQjSt;fH^h`jwJqTW|{4-3?<~E{7l9Y*7iF#;lt*sDykw#o5EdmftEP!?4DJ z!|r?~`FZ*D3+ygtG#nGYG92XN$QEFC|G^KhFyR%|NoCOHwYUlSj%R!>ZbZQDY{Bl1 z{?_Fa_aD$W_~DZt#M0S$*JP44TDuM3WesJ$92%4notqhl$)A&Kmr9#ivOJ>Cj- zmv8366PGFSnGZEotsX_^zGhJN=r<(SZp`Tiy!c)Px&?gtaoXKelm&Koy1sy%AAP6I zhL-#m>`r&14PSsCzVk;hO#!?6a`q^Fk(J;e_~F0x!85)Dj){cN#1?tTibc?qPiMjO zW%*U3BfoR;VmcOkpoI6F+8rHUHB4RJ$sD4CSthtIx*cQ&v)m!re*Zaf0odJQ_~AEwdQMMd z(kNzHH0>*?BWd{Ie}UcY+~C1F{Ri-&vSw<7AAX5NJnav(zK{ zWj!@m#!$}w6jI#yjLc8!^BS+E z{FP#*lH>H|CHjKkhp(H{1HNd5ZZKpR6p(F&AAXVZVQK=qbF3&Mld)2~AAb1P58;GS zGUMf%@5!$TE>8I2R|iy*c^n)QPn>x8T~8hYKm4RCtu$E+9Uu>)$s^`6X~7RaxD9vX z7Vt99mW6UnE2r79Q_{3gq5tN_kZs5ndi_L=7y5w}KXB(7Wo=H(xJ8>^z@eUz zKxv_ONg^11cL^Ts33gX7O@$A9tfiTj(bTjwjY8pvx7%*WYTCHDFmU2$(#YV!4<7+` z7Z#jL=bUh#Z)eW8!0uj;fWM?#g2&_>qy1oa=i!HseD#kW%GmHz%pEIJG3&w)uOEAm z^3@|~-lLP$7$wdB!4F>yb|<^ogx??PB)`l|3V0kzc@L^-#07X`pCH!=cJ~Q>`0Zg& z$aq{b_4yD(yyPkcyj17KC2s5tb~i&wi}$77AY=I9x6e%^z1aIyxImW^!S0;F?lSz8 z`I`DgT6{K&9xj4o;^zyBoNvU}hkLO`k|URP%kUZa;SYe_`Q_zN)0!`&`_ugY{O}%m zwj7huhxJX4(s%52wf7t$e|2&0t+e5Pn&_puXTj3HzLCu+WRbz{=5IJnQ;ve&!4JP4 z>`pA&nD?%FO=VFT^at$jNCD>3dx~&pxbRsM+}XkpKXTq9+PEQ!UhIV@eaBYY!9C_T$emtu9H06Qa4SkYN%TIly-!&F|5VP)?&^?h)jT<89h1OZR-yj1Io+CSf-OW92#FD-*Y3AN^Y5=0Zk#Wr!IKNGk}3S~ zFZ(2u;-$xQaP&ZK1G_ud<;=zUiac_C4SjzJ2DCYo4$keQqq-(6mx$cwDsbQ9(mWG> z_&dh98y|R(hJ5`&zrvA4#{1RT5t!ox#5mlqnB>nNpgj2Do2E&ylY$*?&%D^r&zygR ze4;D&a!Kk`1l_DFC)=0E>A(-)X5q!IHx0R19$Do5X;h#WtQGZLqt_rSQYM|0X4Z zM+nb~cZt@#Y-ukT!(l21yE_IyyzCZ9c0XmuHZRc)y#*d2?+>(~IEQ9@kDz`VE67`{ zKktJdejC_be5V0AINK;LA(dK^qe;x>61B^!vSbNvX~6DW;&r&6VKb@eB~V9rJh{$q zqB)I4pNB#|_tb7|cyk7PkyjqIKzQO%5*qu>LA;oR^L+(GV{?89@@zjbBnEs)M5rwo4 z>@IxsaXOzN#paXj*#zuv^$}CP{}1lDUD%U=-EFq7qNm#OEDXR+H`v`SarE6)J|nHd z6x=Dr&_d;UnxUn}!&TjQGT5D55^`$lO|)fUJe~1KAh(sxn0Jxc0K4-ByR+%QtoyQ# zjxUZTjq+5Ibbd|~<>1T&yX%G@{>FVdxCbiePIm;U-_4qM^ zukWHMi?V1=DjZB7&(k+Y1#ZrScM9xofQCLlOKQQ+IGKvzhnM(!h4ve(^ZXDub_Kgr zNYdhZ#ar-@#?h5u3CMWdqk>lh_zBpZH`tv^voa5tuBABm;T22K<2bj2)_pMKtzdU@ z@WV%+mj!Dpr_y3{gZ0THEq~l(EHvj=V0SmZ+44*~2{wCilvZJ%VY0QDM(z;fw0Slx z4R%)-WXWrCf6&hN1!QX#L6-2te|v;`lRfC_iH2wVk1-4I!=IR#Ny=X_*PN@NpplAP ztLeh6ldy*}AIO{6KPHKeB%1CKOQ+snr3p3~Jogb?rvdN?!4L1Ic!T~az{^;dNE*p- zX2K8O|E&jWfZf?$1S_+@KpEjtG~rk}{TKU^bZ#55+YEfZpxaqGSB76aK0^nCBPi4} zpT0+arL(pcOkj5gV{Q3(|2}*#xP)Zz94vZ$goYdx=PL@f>x9hQW70F{)leF4{vJc2{*b6yO%2R zn>ix70msDS(-}0P_BFYdqVEput`mOv**p4isK;qK?G-_rLJG(Ye)x#b790b1w=~a& z>$=2vCH(N`!0xnW9wph!eYm^AmdAtL)jdW|P45d`TAN4162K+joTXLjvfNe-mo(U2 z{9q%lf*;;}a5}vLyOWrCk?L0|vy%jFbiwX+4bnv(_dfL>l1TGI;wY)(290pj;%{Jg z#$a~?;fL4L2fKR|Ls=`6Dd^@CDuo~3dyhrVikYU*3-yDg54eaU=2UuKN3qXq7dveR@^^E+g?eq z#9TWL0=wI;WX`*ueI)zAxwP>P?p#yP(HciN9_)gAh6=i$=NPj1-{-VwIC`GEkzsJF zqaE8-_{>`L<$&F-9yx&J?e5agZ3!g655Mx&EgI^sjouY>ntCArzgdmn+h3v2qhqLH zLJBSZ^pyPIhgUfaujFWF?hcpd#-=LDnHNdI6XY`-&}%Wvlx1phKMX&-!O*@uLiPk{ zfZe@RD5UC1J+x=F70YAR{orNI>&E`2WccC7g5AYWFQZM;QY<5G&nv+0)($b_4f*fs zu4WEh2D|ecTM2Jle-1y7XH*+{ra}z(D*W(m6H{q!MKrZ(U8VuCs;mOXM5wn1FC2^8 zYJ)puUK>x-;fFu;9-XGHI;`yC!B=j>l^3ka&qrLQqnDz|#~c3*tu{D~47d~QP7Cbr z-XgF&^-6LFyHo#@O(T}Srz$%$jsUxR0zdq1BPlKnE2E`gcf%GPrY$LdDbv!Leetz< z?zG}MnI7_nAKne@F66~=dSKp{f9BZp60o~Nt4%p{(OWwEJ&PQc@7=8AKAO7H?6l(L0p#bqKv?)`K#|(GlxnOr2$7}Oe&D)fsfX^=Y;U)ewQ-i|* zUccRgI~Jg)5q|iA3+gCgL^S=jO(VIX?X(Vl_)@St8tlX){pI+@ymR#7Y6KlI$t9Cw zEbbAuh?6ejlS-V0R}k7ts#*;VYh5^Fpw@xXYI4`$pd#{P3>*k?+`a zl0yBY`IZOX+raJ&Gfnu8Z5KU!nn^WacQ!ZSi`}Nc@ioqTdyOX_&L7Cv;D;~Ul}riO zzzaS!P-vk#oEmOy26k67PK))`Zqm@8IGPMU{453dYAtoS8tm@rR~H@yKm7H57wPcr zC=$0!CzmxZsAIMf&jY&~T#ow!2U#9>`YhE(;92os9^HHOnJ!&1=Otiw6$fp3OIsg4 z9d?vPgWVb09w9&Y;UC1>@J_J1Ka!TbVfS}>1V8*}w+Ql9K25x+A4lADU@x${9~X_e z_Qz}D8yOUAf^KK{;eRJ6a*&4$JGppr&`G?%FMmW5N0MlBAiR<<8mWJ&2G4*W{x-Su z_%WL7r*WOEFT_$g{O}>B4{5_dJ?`!gU-fqEPvM7GI3-Z~{3x1elR>Ek*k`~G-)A9q zHRg`&uPVb@^5_78V`6hmKIxc$qxcvLt_QnQf*-z8B+lDqOXv~y8T+$J$QFM1k3P6V z0lV8X$b#n%{Yo$3hu;Kt_wm#jlH4uBX7e0b9_-Hfr4jGD^pf6PN~gimQB(^*ync}q zKDXgF0=xTKqs!?NA5hNyL`wY!_x-c$q;g7=9pIRl4t6(exCWaKY$SivSegqzy#CjpZQfSU~X;q?MrsNml~mO6mlXQVUk8qsm1 zQ%yH=;b9n)MH&j-)DAzq)-m|u|JZZyy}oSm0Bi@d?msQeMn`|qekChj2nJM|YRzlD z^}-)mM2=v0;)$iyAAb1W2s_RMyHh`gKKaWZC=Pyjy_d)`_phM7)pGn7>~0s>otmK` zCoXC!+qWR9y)mKf9$Roe)zDHf9UMfLi!DM_f`8iiQtFtvbN`# zJMcTTn4&A<9r?l!pAL4%mnuo(et&lPGzgdg5@Cq6@V zou_T@(V4Z)nUBLU;X7U*9zZy#zbBLTPWVdUhac3T#xv*PzNyxo2i(!(5S?3esx^+5 zHYSkjzq?dhGJt2nF<}jMC$Fx;np@BTA{R}Miqh!8!*)7ZVaTyycW3`P@*4Qz^WT<} zDcD_GMJ|oM_=)bq4?hj;u2avBeb!6xNj^qn--io7jNF9~rC{%V*oO~$6wdyQ)=a0i z)U2L1L}=^^6Iv1sgkiUT`yF#wW>8a~Thq5zL=O7ijsko1(2pR|&GK77L38 z6pD8KeM85E_3S6iKeYuz zi7-L>gr6X%d7-9$uo)Mw2zcz#kFC_JZym^XL^xrs+Td|e<>nWoTb(F|*=u{yuYO~N*DJk^s+(BvW zbNGDR2EK6(zL(}~QTnBYLiOL(g3I1!QEhf5P3c(9Z?l(k>BI_3%e*g2_FN;hY*;94 z-<~D%>H7)qLL0d;Vh&HV+D?*_qy*hFn+2(<0m8DIBSe|Fi}-V3D=WMm%O%DAXnwq& z;C&!WP`@)wnD!&HW_YP3o1F;f``BB11*Q3ft#T2}d%}gx6ApsMb$#DCIJ*p)7tZzH zO;|twa?Mcvk%H*$Het##Z6SNCA*ueEz;5}WydC#WF(YP*g3G20&EX-!fxBWt{19}1 zs|B(guV&O;C_3mV0RIiKZG+4zi-)$W0G=@W`z4Kk5 zXDD?34ijd)7%U7+Syf{OZ@+14IM4iq_q)H#z1(x%h5GhzL1wH%s9-#3qEMi3EV?PD&4o|5aov)UoZ^4g zcXO7BU^FdUPG9pZqSnH(f_wH>Awfw+2>3S)cRK+rd2=)Fu%tLCGE6jY!W?0z)kb0J{ZArM zZYHf8ypaFQTf=p09?+@{=S0U>Ef-XkR|q+qt3qLry*KMWa@2 z5F&Nw3WFE#6uq4y$NIdke5YB;YX+?=*mSeiLbc;T!pDk#$BFH6F>BW5^9 zITZR%SFjLT^TLIVkG%!spqgs2Y-f)A7Y_fpBd;1KU*mDdS6I6~Tv(ZDCfF>m_SIeF z$ETIU_+x+$hhITk_k;;T-v^<>kBJI`>%{=-J}`|Z)`zg{UGz8h+b=q?cb;I^zFvsF z)hYVoeTa7WEXMa_6_0m-H~57hx?vkETpO`W7%qNFw5Ibh`qtNS{-+?0RZO9zZ{J0u zj&Brd9?uf;eAbD|9r__7u!YU~{l`lqJ*i`@n($M3o1pt@jL>IuZ_P_LxQklD*s^d4 zZhUw6iq={SI`_hbt0E6!+P;(L*WN}(?qfLaw2+Y${;LtYH%Qn&FkJX(Y%ExAYx6zk zI-199-o|#PHMqUhO0=zOl92yri|}cmtng^f5|V#AgO8MK;?FmFXrmA(`dcy|&xv({ zc=>ZtPvbG_W4e^LxUOXHXZ3V=XoINZ;Yy)m=@Q}4mLnoPl@74!b-d z#5aGAy1tdCNT_n+ULVn(b^i%E3R{F*r}_#H6t_^G&ulgh-N@Ok$V)6w7wOb25Kd~W z6{bf#5}9GgeCziz?%lb9owthUZ^CWS?bWLVu`P>)<*)KY@fO__qO*aGO6T&ecy7F+PRca{gaE%Vp?LHNK_{@hCK3DiA#vfX zbI^yC=C}KZsW=He|H1|DnNC7e?l&J*IbSwP!E-Pa_s1@$Yr2k(5T>|>2|e<G$q*ETH>Mh#CBS=fBRZhj*_Up1Qpsy4yf zAuU+{+$>x^`kxT5Nh1Fps$BSDD=RyVW#48AiW{#ltgQli1HKl%u!Hv8BvBk>sQq!pEx98U^M#M8u_7N`+||Zy;ueFM+^D^X zrpd?&o8D{@d`u<_rh{xmG4`5#GG-fFtsl*v#T~wW_lyKR>u@1y)gWQ2-mIGJkG6dD zeK_x4fP2%uz2^spcnIenh6`&%)AhVrD&FkZ43-kb3~HR?nCh1lO)h0mAO z1ohwUwENZ+e(t}8J<6on<-{71M$s%`OXEf%`}H?bmn+G2^J>hUl1MNaFZfsSM#vni`ek*A?nfY6wSSa=Zw!hLAiCWNF822+p-WIdSM!0 zSu};7H7N)MmqLZQ$O*!m89Jg9O**{DB#fmF<9U`U_`V-&CX5dY7iyRJ3cE{HYwm#u ztK&InYVXWj^KVz{Bzp^^4u=ao(?Xd2=ZNp|kHgtxeHah>smq^p6-7?p#|dX?tI(#X zEEHZDMYqRH<;CMSvv`vPzsuht+R!yeaEsa?9PWB2N_v<}h1V8xz|YmJzOZKVu5K8~XS=rXzS-J*DaufECT*gSy)sm= z87D7D?U_mU%ct{OUwnST+cPsTLiA}spfJd5y^!|!rD(;D^tjd{!Y34Ffq=yVElx|_| zYm-^|mIKis4PpGCZNfc^F~X`xZ)+BRG~^7GaPE9G7))ZN?|Thf!SOYo@y_nTSQ(qD znI-N#rahb^)6sSBxVh$d{$SyqM7WT>!ch3G|Jhd~ZVWg3Z{uy{>YTsAS+vM%iZCN$ zi(t4zM!4z_ObeA}@m{S>JmvilDpOAs&G8NrPTgHAXjeQHUHE>2+zXfTw%TA`b-9-O zr(PFH9b6^2zgjH#*cORwWna_373=v%<2>}fp&xLonBcoOMDU(HT_}G$O|-PHJZ=L* z`JLNDc7HdJw!hRC&a`e5ZqFGhjJ?rNvuuSa_gx;&OIwJgt<8LE7C8tH|AY$$eO!dd zgONTa*Wgz=8;)j7>jn<^{()RXnWDIn3xy*y*9c9!?upu0SJL+l%kd0c!S`oY z(Y~C!qTP;bgq@!j3Y(L2M4L4~kXrQyKAtj%r;OZ5S@R`@Ya2ETM?M4yZ8m)H z1kG^@zMorsdGWe%j=c_-?ej}DMyvgV8pSZdZnTc@{}j zqdQL0nY%5bl4EOy8C^lbpO49+ROKJE_QysppD>HJ_pT+Te!_pNw+N3{PZ1PnyNWu) z)i_XX8?R{@!%Jnp`7u7VGY6OnH`)2lR5O)77m9i+wjB^ zQB258VdTP1g3hTPkNJ{n&UdA8?B_)c}$dLy;R7xUn%T+b4m2jy^#(+Tgj3^ zOE_fK5wiOALNrxvz0i3(PzV_mEo%SKOJe#VtoUm>KbjVZ4x;|T)qqgpU)x0CRG6{o zqm>qaufQCyF^V6H-|>}+FcEeP4HrD~SvXqcQBxUCRbpH+FY7ih;nfrAwqs5d|?(O!mh_5CvO76cZJ}KoJGwo!|TY_gvQrw%^1;Geyq!?UHBnWKiI`5iM~OVUjnB zRy@!U-E23Dk-bL=&m>LB6~5he9ukJA-UG4b!;t{z-#vtL0?$mT{$l@}7Z+t8x+0IC z@mEH{+%9(Rc>LB_vCwnMf#GyM@ETrikrnMUkx#C z<5>3lPC~W9Mwl^|xb$&{B>Ue?;qqav7=QVrM9!8SQ&$(Dx92K$1M~eteVOF()n%f` znI%F){gkAstd0(Mtme*!dAMwoN`cC4k|&3P#nXk;#gNVGB^;2$NLT)ULT5bZzyfGm ztd=l;w^{s-9WD&L|CMP^GlR_aFz#vW2i>a?0ncXj5;Jdx3;+Lo#LF9|7u!@kvF2+y zToj$q-mibz1moVKyGOVP^|TT(m5&2fM-0KYoKW~B>*3Q#J4ydxV}+Yrh_DV)5q%x! zQDVas{P?mS+Nb}}g9EXW!9C}Q+!&r6ieF2HT_~cP#}{F1#tNh<-=IR2OCo|+i1g=+ z#GS~)lJAY2pI#b-%Bgce8{?>~?YCt8!}TInf2!EsnXa2oS_7wg4%J>iGo$STxa3r7X zg<<1%mMzrkCrWDgS#i=#SXXxjoE<)#`&Tw&;Cn4J_wbi2d@@e#9I{Ci<|>Gnch=EX z=jre%4@N>&8wHO`kyO(>vE6iy_^|z{qyZ&l8npzuEz9uckc7S_R!a=Jt`al9Ef6)o z_DQP#G?9AcT0C-}1@B8c+4bB-xPRIpE}xksyn2q4ST(3)Tw4fYTSnoSks&R8ZYaju zgoz2A14Xm>nKGRzHW(cq4(V9|@HyTkphU`5*zv4;-pWmU&b{Y^Igy9^iX`8n@Z${vA7l7(rjMP}PPabaq@ zq(b@|J?R#V#?I*|{~1EN2FZ(`tsBLz0Taa6xqT%nncWa`b~D!X9)WkZiqsZlDyAL` z6F~<3MeU!&G7n}57s|p>G0Yd6ZZ7g4@8=;dckp*}gOm7p!zG}{B;fL}aEy6wjbq8x zWmE4B7CUZ)iY43h#m6cOdX+Z@8B0T;u~Zcp|3p*j;~aqRUbVR9u_(H^#zAw}CM?WQM9$dNl6d`TVv_cHG38#nWPxciS)QGXeKl(^ zFyk3r);=M5s{i=q?PGg^P1G zB-S|Ulm&8+tJI%xT>jyPguEjc&u;e<6HkSU#}2l_q+eb@CJjJ+a2Tu~a);tj70L6_ zBgKd1n?HXa(Cb}Kap_pNn5ye1 zKFv?NSW)7Jhy1gxD|JTeP_r_Q28$_c!o{ph-NnUO*8{u_2jhB6DEmSUVeV@pS=c^W zl)VTMu#FWO&2h6k~qW!@^fO7 zSedLO7JXVomH$nJ{p|JZP5wo3?-C_X&dm|G?fElvyg_n??_6eOEXEbv6_BaAM&9FZ zNlSMbT>5?&=gTFU9g#X!N z5DzN~lrs?N^`W9jb%@B>dbe!wUTefo3`bBi!Ss+t74|;RY&D;WH^Eyed5w%1<+)J|-!)N;Tt7@=a=I%X^xBM%o}=&~ zP?G}kjD_l=FtOZoppaT}q^!4MPprxf$M*(5c+P$9zxIWzNaSa{N3@Hux~&?p&pd!# zIN|(Rv%wX+i)G5Q2Z?BxFfq#3NNhi-PmhL;#-=ABaB|SVhcja(89OEkpNb8l%TsBQ zrM#1-D$GK}o3;4z>;vty&zF?mTp-RmuM!5Rk>H`A%XZ6g{rD1?eJP=Pb#;=av1`Pr zwz7Pp2GNRxCrdyD8|$HfOj^%@!BmM z_I6gN_kU4V>O54GXN8JMyYvvplHEbraqT5txlk-wduEX+OmZ9j0uV*OCz;NT7bU zh@KKBiO~E*8*0~MMbs4BD4k8t)+*wkd5CEEZ>;dY(?gP^rw_fPP%ODR1XiVW0lFnt z;z*xx5i+H>sIwYcwvnBTh2O*RH_ZdVeH1Q^hmSC>4(FYSo$#!Q4Y+L554P1|ICj7c zp{89Vc3*~zgKe9I&r>Z?xxk+^R*uI*jZN&;Qos-OU`ed^bTKwGSUha`Cg~ECLMj92 zVXOf^4^nGMA>g!RXTlPZ@NSv#jFL#ax8EV96|1nJc>%(o^4+)LM~S8WI??sSOffTW zr^I<}7kvJ@0pT{2u;||?TG*^Edj1R%zHdf}%6B@Ff>I;+_YA|puLH3=uQXt7&mN*O zE?n@hL_{5zE^|?J#V?+96=%C*cJ70VnUnlQ>fUhiW=juo>)XkI@3I5&H!uuywT)q< ztu1lL870Vv=XlGmBG`E(>AstYIcgh`a#{ve)!QWht)D4+`mPh2%^xM>b#m$7kOkPW zcNL_jao63kGRcie%fu3!r9#@URPyc06RK%ljdh>qAs`@~noYh*MoteFpM$20%JU%- ztMT$!_G2T@L*p^oi^!(1n=rhvS-kTcAx3|bk@ycW!=FQ8u-E91fnkXOi{kCX{K{}~ zx3{mDST*lr&Oi@b`oVL&0ej$YxR#aAfk+q{E?%lx3kBuJ0R!p>vpXpiiNX4K@~69G zcfeSYx+p}Pn64_)KFp-55mWH($9gDa{G(k_(UKkcvqj+RATjIlYe`KJvuVFYSSGa+ z-s+XqPpe#_skTDQAG=sMr5}+D{qll(%?pCo^Et@mtf94fr=&D?y-@BxRqQuiA{pIF z8TE~uP}4II3bX9VVv4RPUmPk-HVzeA-@h(XuCc^uop4Mv>4Tlw(*lk*IfxKucWX?% z#G>}Kep}bDzv4+aR`GrOD(8^0>qbo(>(uHD`>H$fz$_J9VvKC<%W^qup6FFD%%5D!HAdHyZ9sFx7n!`0{{FIU4P$VLCHSzi25Xq`R6GeUG zMqyeaD+VWSp^(NI+~KH}=;|anQFnTyC>c9JEIKw&VpOV$K~bAA z?cE4Wj8LS^6ceEp8735O_ZQq7SLVdGT#?M~R`b2_f(0r52J75~I)%(8mER8^QTIZPfivsp&P{M z+ft%(47(`wXQSAEExMh2MN#mmJjMQ_I{Nub^pip*J#tg6MhQGSdno<5g6 zDqSNA7R(h*l}VD7O24SE?|LllJq_=&SI`+3C9&l2Cb7RJP}n!POH7yOaQj#&E;|gv zaf6nC-BuPNt20cvH1rXw`YX$Rb>$A)hH%vLO%lx>?t3oZOGGle^Pc7)&Yf8lFh8>o z_Q{6h^%@JLwtOht`FW`LHzHK1mg$Jf;cn!(HxSyFHsRYYC5+WvF6pm2O{h;_FFsxP zC2^KdqI|EpJg=?6nWP4~CVx!wA#btRe0{l?_uz^oWz9{>8L$%h9~NQ<=bj~nRP;|(Jc|wyd;P|U>uv@pd>)D?a|iQX zRAs=%t(FpN5W3$jv?k1+14j_r)1Pr;g5#Nu=!KElnGHl!oF|lW`SXA*< za_eXYwfCQoDKl1M`P|3ky#0)%a?Mg9tG-M$E;%pBD!WHpo~(jK+kDjKXOq3sXUXo> z>%?LGnd0Zft&-zYWzi9|5r@MjBF}pmjkwZPSWVe1#NLr2U0YM~>WDEOEats#!~jHm z%?}u*-BS$D3m5ti{ltU?7ccJH=Z0|HpxJ8hGIv8C&fVR-&b^-w&*>@q;qRHml5;F`glXCc za`42CW-knlGC;3{JM_ydfovBi(n0pa>;5pryA|F@4D*0Yb~jYBAAafF7#i|Dh0-s* zr27LbAfx7s-u(S@B31<#%FocJ+DO`9oRlR4;f&aqcd< zn$ASV(?t6u`Xc{`Y(5*I`&n;n*WkQpj3(x?AAZlrXqwrQNLdyoVA4;wW*&OIk!k7#@7K7&fT@G=!tzdztg3uc~r`N z_yMEYG0{f})5rxGGkp23o^QDZH=*y)9A79N&c-Bbooy04!;{t@>aypYIaO;?#_b!@LDf_kcl-rCiwSqHpm{g z*$>~*dWcqX?oRK*VZM8nLD6dmoafwK)IoMkM7EN;HUGXjcURngnmVT|DvhLGM^owj%e3>fCiE3Nu)K^r?xl?(f9@gm)kvaVVevG5K{Y$g^tr>{ z3l{8$mkQBA@7xL+bt;y&989MFCf8H_4O5H?@j+;z8%B&)hduk@rA~9Fsd6UO>~5m; zGHZ5u`eM1cGnN}DK+5w3d2#M;3j5)MtbUQ^8+Om~=c(;-FYNsDm+nO$q~4snTjG6$ z{GDZSmi_QWpSeG|-Ug1tzfi*WY_ep>#EOD5RJ2hARug%il;b=K`{Cbycu8C3Q+e;s z`LYvN=`8!$*M?e)u@f-3?W< z!9I;oH2QWHZDl`v(6n>(H%S$N&s-6E-v_$vhrcBn$UrBB@@r%0>7MI!h5hiyUV30v z4{y9zHAG|jT}n#gOhyCGerxa1gLUkOALNZg?1z6bR|~fM?tF273{BaPLYvtSzdg$w ze>iuiU*L-Orm8r|e)v4~DEcxmo1Ff9q6>3vP{4O;6W=+)u~rtp4UbaeExu`GKm4Mi zKkQ=dg?!G^Y#wU|+29UJf5Lve)(BcW^f(PzEsxZ4CoJaNU8HY!_&)qd!;W(Pm>m;2 z1{dgIu^ML1bVFCp-9@k;eofjl`q^tQ`Ol7}w_%*gXwXLaU{4Hr#621628c+iA!GK# zKgmpF5BCFFw2JrON$l5UKYYu$Zcu2tLg#wKkjL^=(qcdS&}|kt%egyeQ&;YCRKWrE z!{^@M`}fs36zThw99_8Mp6}F(`a8n(tPJ$hifJ0>?$X!~zxvZZ8lY;AYn;2gx5pNy zZa*nEazEMY@=fdW6I2qffH^ar;m)}`{dv~#%=$o)DH)W@e)xyaE|TUobqspOtac9j zr{CuFvG;eo0?5)y9HYxcuq4);y$eMJ8ja_3|n z`>Y@TUxu0e@EVutWy$|}CDTZY{qPy{`4)K~cl9N@pz56xn%NJ3VK?Vc^K+?i<2QQH z(F5z0{P(@b#C#0Lx64}ZtGE5`W=avjY5 zA}iAg?1x{}-wGWoxKCBu1&ePgB7^<#pLqt&Xvm`>=i8{@(*OMM=HKj5xmFsr#f7w; zf41;Wc1-wn!OBGrXjJyYEj8{;Vn4i*LLS8i^S!U>X*zpL3AyZt-xuhMz+y|x^mt2; zx2DlB_QSv0bcv{?D~fKpL+hnCW`-NX6_07iz9jOqji-OJZj%@L;r-|H-Q-PAD0b1s z_UuYBWk38gxnx>fRY&heaPQ|d_A7mGLxU0jIjtz8!Z)0+3eBJ??1$fHW{qu0+y}ng z86AfeP{Drq0M6aLu-s2pzuPHg>;L@l!~5Ig=2$6Y_dP_O7b58H+G6fPl)<{Lj%fVF zp4R^CL|y%rvhU{5Th84TSC*2>2W8IUyP|JT&Q@1hKtujDHEmC&Y3zqzA9#hn{?&vv zdB8WFyZm+=!N~3*4XjI~;=A#*W=;(?w&|n#f)~~;@AF{((D%n)XBNKpKV8|BUBbkPdTFgjW3q2w?P{F;V1D-I28)3mOoYLa8b2Q**sWiRxMiIn0@aQS6nBF~oC&`?S+A zk)~`-pplF3(!K8n=%?K( ze3RegwDSPF1v!rrc8m^Xb53uZ6Uv#F#}{-**PqQaJUojU`$f@ikMnfgL=D?b_Wd*V zrg`IPDrZs}x}oyM-YC(@>!57|j)gvSNmIA!O7&>BrtOI@MP z7WUB`PbI^Juerb50(#DzS!X}|u`|ke8CFVn53*}OK9^boTd8Vc4|M;-x$$ofSYjuG zf6qCS!MQt?Er-Z|2k+45{Lc?>HQSc^_S&gi(>}Vvxw~@q!`DAkz>Qtb2#WE=)n8Un z+5Mh=jmY3UdKBF#E2Cb+HL%3M9ZHd{ZzG>WdE5uh9wPR`oBdYA6eAaSEc(AYZ7Xyg*GQ#((rMz9XhJl1 zJJ0Hh9_*MH)XfLpuH0KahF!-hduZjOIC5;MA{$RV1bcBNqr;QCWppuLt&09GkE8u5 zd+1$hEg9Z7fvl4c?E1Q6b)g1wCP`?GZZ!ROCY{oEPU=mO6R4$zk2y#JFP*roH0 zqF3aSUJ`p8*bhJ2KpD4-T=0&35l)scwt7S71JY>qjc9(ayi8kyG$E1oz_My@^awCU zs?Q@D3wD}pil?}y+x#A?536`~xTbqz3;W>%^($%l<5=3iI+>~;KBZ>%!?$zp?&u9S zx$cacw`XW*xn7U?3mcF(;G?yj8JRyfTo487n%L=n_Fr~ z;%k7DZ`ghB<%u4_+VHi#L0yN$(#OEP^z!F33OQzm2b{Z88^Yaeo7Hfl>O6(a&=Ahp8Qh}+z9xOne)wx`cd60a5LO=Em{8+^jf=I= zulIF&dxigAN}+dVFQ_!t9NG0g7*g(vzjHWur*V#4*blEMl}(yKpD4k_1`YMTC}uzW zOH(7t)>lXOd4joV#oMz+U8D^3cn3f{er$ zmzQ_PnHkOWpf!^wE{mekT`!QsVl_!+rL{htAzgmk>*eiZ{qGM;pcL zhwt6wh2O&ru%hc7-Uo7*AN%1wWFC-{yAd{Q@#bDi4)OPF(wfwWefN%UPVLyE99~nfn6;l@H?jmj; zq=oE>oERosa4bO zS@EPfBZ;zg9?=$iW1Ohu?yhg{h-E+glr5LZpoX9E_G$EB!y9UyWr<&$yBmMX1>5bF zvE>N&7PDibcycZs`P54P-R!{*1nyX5Km6|cE=byVnA|ycr{h{ke(Z<$NwmjILqAOD zVvFR>KPZ^}@E<)RsX@1dx-U^g^c`oM4e`Zqi52%ezvG_1bh_1@yPb0+B=%}R!IyiP z-F%>T)C89EYDuqP4<*fwqj?{BH+WnZ*@|A+6X1nQgY+;<;}%Iv;wZk?9;&o|Lgf}F zxTod=op65VXFvR@!-Ce#=6!-wI`u1Rq`T~gkDkwd!tO3usG)=f3a9Ar*hs2M%%e8P zHbO#AeAVK;H2dMp&r4&oToE%1{WvLC+YeiGf5 zj;EJzZ&BuHJuH~Zd5M>vINFCD6M8oZR&g|u{qS1`*HN>UDe6b~;IzCu_Ol@cgR4p4yXSZXHSb5B5>is&+CTXA3XR-Th=gd`+qp z#-|;kEY97@7!=cG_QMZ&?|@l)yp(tZsoy$-ri8XW(0GOhjd#hiTWIhC#gp6yE~%~we!4# z`p**vK04^xy@KEAVrd@x;bRunQ$rUs)Nt<3HPQ_~*$;nG`y%blj-upIne;&CBi(AX z#!k-N1$Q_>>z+J{?jEN}oV$xO$tRnVUu3Lhhqcyz@M1rFRLfto^*Bf?Id?ZG^$2yY zmxX7HBSvxVZcw2OUNJA&#(sFM#z+e7c9vcU6+GC$ZyxNJ_-0}OxmhpiPGt(|Z;YV? z_QSj0=6yzj2gGb|tiNf93iiWm3{Irh2?^{NyhD*^43Mhoja(zXgLc%0vQIfpmXD>2 z?1w)S{hXpcnc>J~A4Dp-;UIJIy@BUx>X0b9w=|2^k7}V?<=yd__Zh7-oM3vApV@tn z(LtVdjaD9@9X~p$XSE#;@^f$_`{7dxf0JD80UB2yK@-f5Q8D}BbsU}G%DKC7wd_~k z!p;iz!xuY8(U6VgjXwmqLlcks`C^f;YlKYaa7C#Z1lu0XCk{tfy_ia zt!j`BcSB>W59EfMac99Zn)@x8Y`4c!DEr}GHfrO{Y@R7Ddm*!0A3^MgpYSZ6v~DNT z2>FL(ebfk(*fCKszylLCyCE&;3b`xCaE>#TR=#*eRTnJa)SY+Ioi0dXKm3)(QtEdm zlH|*BsC(sCN?|{I2JbTt8aZOvDj96|EvBtJ>#nss#I0FUnCfkhPyBhaod19QG(YLL z_kNmX&U1YKll0-S0wU%*<6$!IJ{`H|sp|*IV?TVr^C;>$s*FsOH1MX=4Wk2npf$r3 zMqlbET|1e6>%@@-`{BL5=pg^9Cp?#Waj&-?p0FQ&qI*2;+LJ`xj2=^GzA@rIdL!ro zcf$Saip2CwG%q!p))%GGE{(Sokj4EKqq*y0g$p$Dlt6x`Y1SroCw|SPBE|3QhGwrM z?=#*7IpE~TE?9Wwe}4ELdkV>WHg}r7vxh7@Gt<})-}_}7^=iwbT_f35@T!Dd*bi^` zlbz42efhuN3SXb{UYGsw-hHC!w~-)6)2_%}>yCUa_6RLAfx`VKq))uhI37nuuc}C^ zT^Fk@yinDZbGkQld2dohiAizPd)^)jbFZa~sU|q?-~*GH?x1GwcCL`n1;=P=J&{h8 zecq96h!vJ?;H>5|?#^aEyk$xWr3~N>5T|{VHRK2PclU%5?_!K9>~Ys#8ZKiCshDTo zD!Id?zO4&(BsxHyotZA|hmXkpMmx^s(v9tr)VAw1En+`>^llem2+tjBEkQ+ZNV#7c zO)ZY5gwK~~hKnY=Ke(fz#v5rHjQPg>5j|g-L<7ghQ%l2bn%b!c;}G7jo#g)GOFAfG zKfHWrEcNzJrm16}(%1-7oLkDyvA1rhd!&xE-xta2b`+JcAO2_X2MQf;jqe%0z+mo- zou|NA;S=;$pZ$evd47ocN#^W_AI3YhA^P^XrOX{59}dzCo^{8C7gI_;&mlgJP-kam z8~fph&*sj?ojIJdU=OVO8CvF~f(C6@)Eo0mmS_P<$Sdl_e)t1sF;rQ}{*}JnaCou@ zq<4G6V~Y`X6+EE#g^6_KcRY9f)lmCyeH7l|@8(gSI9#gDx3f3s=#p64ZM>J(tbIns z!Dc8v-~*{eZa6R0(BgH09tTCyxmw;?AN@#<_T91a0((}EI>CYc@EhlIr|I_ys=1s` zqPl~e*bl#&cb{wL^+L?uKXfvJ_qsgmo|HOD$FIvGwB8Y2*qPbFe)xM+WfcAU#s14eR7n_A4kwcy#kuk?GKG$KYR>7;|~&y9&Nz`mF}bk#F~f|n#xpP%=ss@)Jv zS9;??s0ZvayCHJnRZ5x@L*Ls{Xg1&B%pGKb3C!*SXSs47L<)9t>jsk{qQTs9-#@$?!K%&NV6yWqczWZA(vUh%P+RrG4vPBf4QH&FuOZq zc!Ct=xu0Cu88?{S6=hn(>R1!$n`F{cW_OtjE|LQK;k)FyVIZ@+mdB>}C|6Gd*bkpn z6iXtkf(#bxz$wua*Vr+!E_=>r$^ewFi5}4h|-{ih`_QUUa zz0p2r;p^E5D;{qU!>qRF=-oopw(r>%`v2xNA5`?NFWdn&^A)=4_)$epI_ zhwt+C2d(OE3uFGh>{w%u`|O9`_T><5=HJ_)vcvSes|>2&@SdL8oqYkn^Y3h>9~QYZ zj2#on9jEEzOl4fZ?E*Pwcj5AuDA9UDA%9b;g4x}dN0(_X`{AE6yKBAZjoN=kSaA9w zePBQQ*zkC&UtCT1H|k^S0sjAy{qU|Eb?`j1f&xqUy|XZxeAo}axzd#TvDnXk$PEid zszcY5JHJk`FGe+!ilduIQ&^*c+1*?-XKc_@K!Do`YL|+y@Gy|u*{{ybf{ z#C(hW@QWf3(o|-5Dt<@E-BlJ3*$=;y+1-aa8w?u!g?6=OQx>zkd4*>vHH7`}lh}2} z?5@<(0>+Im>5(k+o2fB0`P5ZPxX}&n>pk!*h&xT?jqqasefr0Kcsct7vRZM6^fnnl z^%uK$*)dVELK}mQmD4|)Seg;Dmxi++{`+Yȋ=?8!aEp4J4R!f-N~^ZK6ObaP0zDKC_e|kEAumh{qTYI1vHY`ozJDC zq!cKJ3OgsvWp>w~VgrfFCkm{}B4u_=tebg`-Y2WV=ea9fnce-zet5-;4HTlq&*!=r z>Xmw(QrHi_@RbMN*?7ZI#SoKI?$SiQ*=m20K&9*NQS^F4tRL)+7IsW1&C%kH=xbEJ zFNXeZOrgH)hu6qho-giWaoLiZNaz>5*2#D4hst^a7V z8h4KI+_62$7LHCoY0mEbynl?OGcQljp+p5N;k%p8EMMfzvBu;y_8}%`(13d8o-Zy^ z`*n4gKX*enW_PM{O))d*Ddi4HCWY2mTC=8-F0vnfPVxVBM{*~e3iph~-lnVUhtHXt zL~`kmsCcn4elxp!!hU$EzM8O8y-cRe?hKcu(QBEvWVz51hnd|uCAi?}TO}yyR9HK?;Qo~N+(@JlS#3Jf}d*O8MC`N%`} zzw%ypjWnhd6;c%cY|4L&$T*-2QWra*irL+2)t(3~{zgA#^XMzHyGV=Ebf{VhsqBZJ z!tAcP$PzIwZ%JoM8mY1$UOVg(8Gr7|E@^jUyx@-DP-8^-J*Kz0Ni@Zddot$Trr&LP zc+Bi>ZKWstrF3DEQAtMZho7vFOg1%j6fw#aX483QdGCf!0}Xs!Qbt{v-AxG3AkU>A z$i%`Lg4x~SWzLW*QoujO*e?pk~JBC*l}{kps+&uyt>&wlvQ z30LTrR5#vdd*FN;b4m8Yhug3-^GPDr-ixR9`89Na{qS=ydf~=GPpGzQV`b3|N@YKM z-rHoNFZFaJzzpiCJ~+6<4gF=B*V&Zy)E-e_F%s6nES|aKhn8dE8ol zoaQmROIpQ#_`V(VzK^SX+Q*XGRGS~(s z?1vw_FPk1RyXy=)OH%SYf3hEb6SKRgZ_F|M(F@9rNul6WzG3jVMvJ>@!K0XOW=DEM zBiax{b?-ByPNd{*2~@xAE=B(^z#V3HKczhp@=*&<#C00(Zf_!Q1?W-z-e^>V`QC35U3K1v^-MNp4x2WZL8-}I}~ z?*IGYXV=;>bNEel+YXTI>j+BUa*T3v#TRG=3ogGQc?)E?HhAmI8Qg>!|Y8O&yGQA|3t`<1X?5;ce;pOkDpxyo~&0u!t zRF_SA*$;m;(*{47-RY$`LNY`atEL>G-OTQ)%?{F}n}120Gim?lhfg%M!>~uc=%!0P zNz1UGm;LZLm*ipfl|7=&?jrA4qxpOjrOwJE$FwL4_;rDbywov2+YMu4e9&Lh3?&iu zH2Oj^rLZ5~Ore6>d+K2ECQqy{_QI`LeL${;a!$rm&7VZlk9VVZfWw5ONF!}I4BXrv#^4KK>*Ln7+<(caKOk3tK?Ns}2A8GfB zV0Jg?rxje;4{tU+gH(S+(Ys4!q&`vuWXPRxW7u_S$yw3Gb+q^S9$L$O_?DlQbkao^ zqn@%Wj@g}Jv>tBC-e$LaJdL@LL`u1jshs`rt6TW?MCH-zuhAH2*4MqOl9UEM(8Pu@(H-58s4zQkxo0 zOXCC;&FPBo%l{ zvC+>MKRg~$TR;-s3yCMyPq!(?PoMosUa(EPOiBZGIsoh(vka!sal{Xvb;qgpK*hZ;q~OOL?9A?Fu^-;g zONBeTxzmio!#(-*)Ju!~`@GrXGAhW@-w0ZnqzO#77w{B+eJm|w69d2;hq=o|(=c#E% z6ve*GBy0a>cBpqpHnTh9-AdMP0jOQ(N#S`t521#ppAEnm25j1^y0e=_$p~()t;K4uJqA)wO{OO>v&+_R3?=!NRkI{Jt_U7$# z!X2JyQR|7 zk*~<;KMNEvyIVu9?D^)Nm`P_Sz9N!p>~iRN$rlmqH7 zf(<`tz>t09?-fZW3`)pgnIdBE@;hmWFT%>K;M@F;uHHze%{JWa%zpU&sT!zbcK62B z2WyJ?nZ|zj-26S1Gmm>eKi#B7Cw0-A{qR5hz0fg056vpKsQDuAI_&q*8s{gp(#iw_ z)qOB1%pHf^yJFWNLFZ>hQ;ADD=^t&Rz&=*E%k0kHnrB@#B}B`fq65tCzVFGSH?D28 zH?b$=wf&g2^1bd^X>@ccqF4NkH_I;~1NOtO=*x>@H{$5z}!V|Bdi}zYL$=foH?z11>Wq2LM>6l{3NFThEbBAC*e12^i4Q6&1 z9FRe~U%jVvV{3Nn`l2r08GS!1;QRcO^w27jMm^d`o7T3|>hZQ1$SmO+`{5t%mBP!t zhv+D?yHq3Y>6#>ig&!Qyo7tWA+8+4Y{*_)l%psZlNP0N0l-3p~Bjk$<-t_cEKPgLe z?0!vUB9-<@#n7YSSExy%iMPz|yqMihy<&td?1x{gm_$npxtHKWHJv=Ck342~W#OI} z>ZOB9iwZg)7fYXCB-4Opyw8v|gIlBzCPcVFQ%N0}nir`fD~iI#WKxO2N9yz48fDDx z7XNaB$sKt#RUaoKW_NeY@=5;;?*p~@UY8kAC;Q=3Km4Vut_LZW*EV%P{U?SN~O5pdGJ2c=N&xmUL&Tq(fulCG~Jj!XMTr7>dv6t4x zKPSsCW>CJubG)J(%D$)~cJz6w9vnp#%d=>5U<>)NAKvsP|F=zdLXYEe*p6egf!W=v z)qJPc(Md~d{>SdT*bkq7;5Qw4bbz)pyHl_{M*G~kvf0fBbib|L@~STJi(o=rdp^Sb&db|G5pt4Xy@4%wEBrTs+rwA*yf5ik5sUC z?^(Ls8A;<3v&kp!Glj4pel4@R0Er{E#qnP0%n|y)v#xbj0R_@u3fb8UmHd0#%zk*o zIUQ8EHJ_rG-PIjGP6yA+<3p7b9-ZbpH`(qm@BfjOu^+yZ+1=pi3*`P?4KJD9ZDV$~ zc&Hi9sXe2|Uz5pvM=U-4eS=QE*T#Z5p7?Z$`&n-3;|u%YZR_J{VRa%kDLtfz$Bgih z+1=azd}prF4O3QKp@mB9MJ`Sy-#4#lVwnZPnB6_^a6$eBW!!zucWR}PWGHe-GIbi~*d>}c{Trd2%azOg?F-Rp}!NTaiSI0@=aCet6SAdN|GOF7<>b_Dgio->i~kI{2=TU_bo5O&{pT1nx%6bZ% zXb-bH;dO?luphog#}!e`?#vP_@O}L&3SmEduz3ur+`d9f`**{#DIVNy$?p4bBQ)=O zKzk1*(zieHG=Tl^r~m51=dKstj`T#`DQ&*-yg}y|$C9q;Uhd>!Kl}#XXYiex#X>jS zD^tUB_Y2gQ+1(-b!yiBSk@6h6W8Vc|Tsz_fFZRQi%{fk5%+d zcBjGYF7~iFJ`aCE-`Ee|k-=Q@$TbQN)Iv$Q2e$Zm!*qus+LG>3Yf}PUIG#WoKi#F> zf0%hOyVGEHclwYP6bD_W{V#dXBcDP;vm0pm9CN&7b~pa5E3^)%qTl*+)Ro!Y-_9)h z-rPc2QZ`U{%suX1xv$Am4htV1rA%gbCk+Z{yzU>0WeBCpkGCAa|a1 zBcqRz!+SX_-|U3**O-s7AAa2%o^?I4=q$6lK?l#%0QSTGVRpBb*54~@DyY76hJHWcn^gA0&$0bNbu~7a&Fn5^g(EuH z4_`Ct2>CL*o3#EQ1y1=#MeK)9<$XrVXItzU_=_wX_LD2my30*Y&>sZ_^wM)i9J4z& z_QMZ2+(fQMnY8g(6rEgrkyQ?p9Z%{`N#x9a`0cGmFg@puUuqtxjL_sf>1C2;c6aYfD!V~nQ+Iny zO!H$d`OpRaE0vM1SxR!u?q;wb{?x)&8k^Ar$C%x{s&GI8`{AYU9j0bxK(fyck!y_< zD%cO7&ijlPPPVX;Z70u^eKd&g)V`iRNk429QGLW26Si}H>xvaJ*S@E6#SHe=N0XMD zgkG~BKHbC}w#@D>H=5wvt6I`#KYTvoXz{w6Bp;-UbIk4@vmd^GyB^l=yhZ=Lj-#xO zBpSwk_-*fv;qjNdhSJ^Px1Kvh*$=;JQ#6feO{0M}jZ~*%h5tv=Sw~f!es3G;5(Gt% zlt#M4xVK$n&*<1ScE>n&H=WWUC^jGnNT?Vnh>F-ewZX=6+_)qu45VYVDl2;hY${~I-nSS_b>)VS?$VFW3 z8-^uUqVc8sJX^GM_U2r4 zWiFOQiJ?9Q>- zM9v?*i7@)%S8NE!`1w~5v)YK+EI+B($UE3_16dni!9IK_!Vbo8F7g3Ps?5c74R<^5 z^OE(0bflF@DbC~tp@U8mg2G>8dzp>2XRSNN++8*ssL2|4?zG{%O~7jDGmeE)h7i_$qF! z?jZ4Gce-SEU+DSxaESXf$?m>{?t&)$@F9g3@`3EG1KC}ucWb#!KYZ%=Ak1(`Mr!3- z%!sj-5VE^Vo4KR;tBQ0<%|=JEyD{{`2QB!8-RX|9f#+bJhLapQ`WxRn(XCE)_wq~@ z6h^AZLnk-!XFp?!rk&hVdymnT$%rPqTQRi=*JHKC;xT9Nt_8^JfR57V_+vcOi^anG zAy}7Kj(NA+ig~S%j3>MMSHoB?M%BQWe)!JMn9Epw4ZT+yOB30h7ulWXL0$$y+bLv9_Jq4P@_=?Cz{) z2iXu@jWYV-SEX_XNZ<`zTFTsAG+!IpUBGaC*;0E3hGcgMi{fyIe)#Fzm^UH28)oV$ zHVRsDmVWrc^MN?IEEN-ae!>?od-?kZb3Q#?#pRfi)I}YJ2ie_9+x-~*tp&L{&T@ox zhficD>E`qUR@*Xgoa}Bx-BBb(tI47%?oyV_p7<0SSrhXLm!lH#n(R)qt`rY0=*V}n zy8}}KWbPDmIkfOTdt@=tXb8ofWtG_R)s4GWCd&?)Q0M7TBN#yXmII=Jr zwn1S~nOBAR4ejL$+1+HayG|4&fJ1cJ~IFs5>AJ={-2#?zyT{M zb$O0I*RxkncGquX8Ge6kBi*iei#6Gu>Kapd;ddJwQ+WNliiK7&wz^Z4|s3w#63P;q^<|+_#p~%qT7Bp*tb^=mt6yYB+h=W+Ms%NTrb zcz^-Iy`N-v6|=qg3}hX@y9DjX?gqvt;c4a@*f!e8EV4Vd>u!?2Lsj(V=D?op?!gl7 zjOfcbwO-r-!m~ml%t?~RG$ZMLCc2Z|_5F1OuQw`7T7jzs)peGHf4OVe=>rC(rQok0 zf%v}hINqvhi(8_nSd-litnDbV*Ph^4P%Qq6=PvhvbLgSpRz8v4btbzT9cV21`q%Np zklE@j5tzNG27P~Yka4>H(u?da^Hp2fURsXHbs@NPndeQ1$1ru|*;*YS`eb)a_1fb4 ztO#$tgRnb4875EPLFJ0ARFd88_H~npSt=6wFbgNi?$)2*i;021(WS{zB6!AI+@WXO z;5W8y+zVf_yCGY$u`69g{vG8eGs*69Gi>FLk8d$?buwDW?&7)>V~b^L8DK%j9NC@C zFAK8hM>vtP3-?V!dG9@sgBIH;V_fBYKV`Z4_YuruKf}vvAO5(~j3IeWa+J^J0cMUe`r1!yaodCLWOwQYM-hES zRop)FZbNq0uG&U)3ST38ViGFJ?*9B;f`|Ye>5=LsZ-coDNtb8*)(5C4;x459p;)I` z!Hk=M7_4S*@Q|Nqg&4_+R@GQ`G#vIVkthtgi7D?)!FZX+ ztkHs6q~89jT$DdmlbW6G@-vvv_HWkm_lA1( z8kopERzXld$Be;HU1>|lgeTeE(~jnnH~k(=A4X$sS7sA_S7M!4d-+Os7x>t z?(IYe(H`;@bs4)6M|RhLFz3`1G-RZiha4umt24Hi8G~NH3<><31tTu<6oRI;k<*dh zGKK8!rK_2o8gK{4)uWMeCyczb3daHrC6DZ`E!o}eE$wB8Y87hdh9M*&8huaR#q?Wd zQcHHXt-Cj8TXkjRh?8hfc4t?TfPFPD;hJYHkI3#G7rINtN_BB}$-_LdyS^LK(eQvf zslLjsRRlZJY-yWL;% z;cliWC5Js^80dNEVkK)$>VVGi%N?mA^c1A+Bo_eyj<{ z&QF&ytecSxkMNV#@xD^G#y|q~$nI{2a)yt)YaZT*<9TzbAiMij&RxA>IXRPpxSH12HVJlJ-oA9y350{YO*f!D5`Vnl5RL)NRFU1)wdpL&MqA4L zcTce&aTtdXobPoOFSqMU)-rn9$nG+Fm~iLn4V+vNiO^0F*!iUz0q7t#WOrpgzOrcv zcbZyVfNQ@{=>EM66BHidNUnv%lHCpJ=Oz7Dw3fK?VvHucTUVO|weD{*W~!}Jlig`; zcawfyRAqWgHhPfV*%&dCG4&UcmpO_l&%v)gPLkQJ33eWtc*%Z7z_cv`p{r>Z1tM{;t93uEyf+%iiZ@AGtKWtz-t2qvPoioahmY1@_Ej zBywN$!vI-%&Qt6sX>(^?5gf_xbd*zYf7N@;^tKbPhn;0jJ$GvxtI#==g-c|2r3*9B zFS7};u1@04*JicWQ5t_XLh1e=x%aRmwk{BDAWd54`B@w;Pvxj%D6iHij&iVaN_$USTz>U2z8iuRLZ{fxw3^$H3maz0}+_of>I<%U`Ia;#2*}3dlJ7~(Wefh8_yL&bv4coqbMwiR> za!;j;bWC-TU3U~^>A#0yPIjl^cL1)2tt2tpS-O$kWw$s;!N1?ou3tJr$nG|o=0SUb zx`bVG7r)iajh9->l=_$WP@Vv1dvgB0C$TL-R}OdamStpjmkyfAh4FXclo5@?)5Fl? z^F??bYcEg9?naZ{dG$7w73!SfEeXRs*Jzk{GrwVLCPid-E7p3;16**D$wz3IHCJM^R;i!3g2{uKBl1O%U?y;{Ja&m7& zn~U(U48u_67(5$#4_=0R=8@fLlHJ`i)0NG)P9UX`Ia5i*)ED(IFt?E)vb&pM?lR<^ zn%Mu7i$JowZFkZ!cg1&R>pICuvV`zH&T>CmLFPs8$0@Qq7xTl&9jhckuU({LGi#nVALnXTPQ__(YaVc30GFDK1-Up?We73tG{&Gwck`m+8rV8y_h; zNM2B8B6Th|>EDP#^ucf_y}N=XB}U>y$3z>lyH!30GRM3EXF@~i%ZowN!Ur%`wU9Wn zyOle5|P;&4S(oG#_yF>74|t3A*v`H5@; zN7+mUbUM;WGM+bMg8M!kA-fArK7u!^nFR=ACWGv5MW&s!E%<={GE<=OlKn%Y<0v|- zC5KmeiWAx0HDgQpZ}d~vnX$OCDg@SU-0gf-Up|oCuxT1U4_ksTHmn3D^gf>4;3ei{cbEEGNQ=fp4Eh)Y!)>AHtWbf?uiKHO)2Bvu zXIf<>3$&}zcrP5K)sfiV`X;s?HIctE{l$vxZg*>aF`0h`@#-NsniU7t`e&#;X(?C8 z?%w?Lko+P#CY}^vGTEK$=~Vo<`UzuZ*vljKGp2WNm9g`bZKs% zoq2}v`?h&%CmCe&16#c_&_H%)HZT`8_thkJy1PsyyBp`syFuCc;Uo@-r(k!Lst$3|hC z)oqN}Zz}7^?w(zvr{Z@TafvR&^IgGE$cl%f;d2Z~vyz!)cbDdQh)c4DEcfAEo$Rjj zw=`^N^M(E~2QlECK6bH-oEoSoqfQ)z3h(?@@dw~Ey_HnIqDP45ZEk zenSJ{S)T`kM&|As-9?-1Zpbie>3ib^;(8?@t!FTXSe?R0OWyha^OlqP0g^bI8M>-F z=+Y$`&biD_zp6q;V|&RVyW64bC+_FkOH-RFEQ$z&^~`A8>2eq9NoEp8cDH7Nx2&pX zKJherBrd^-%}>CJ0WV>{&RRaK;4IgDciFgJU6w`U;a%@Q{O6U9)x*ExN`r&+U@zuG zxwAZSXeC_+A3zyv-DjGI&}zFPy-%#;Rq2wu%UUHXCn9UzD=Zpq zBeTiw-21xAwaKg#z8uA9vb#qsGLRhg1I5QW$u#ye9vHA^t*Rh~&-Z~wVr<)T7(4bW zNhNoVRgm4e=-SJwF`w{pV=C`foU7?vi2hz$a=N{zbSArt4723EC1y{1#i6fd2nJp~ zgFb!r#c7<6v~2U2YimtpcZ(x9pn<(oe$ZaMqWE9alU}^S)tf# zu?w$OKSb?H3$jk~+8JK*x`cgQj}jbN!aMmxCXEW-z`%_&SS6g-J?thudZ=nfYS&v-#KW8s{*olhi2h@y8 zK|I;roA<}@b+EQPP4SfDZ+MS5&`}%*Ji&-zvAC1;f3CQ57&Wr3%)8(tKRWx%%+1C! zHT)W~UPYksXaqDq*Pv%h2WDs4>v&0@T4q}*?p2P`Cn5MtJr=FgALHpi9Yr{&wy4fi z95S?}`@cn;T@S*qrexgz_zoJ1cH&KTXQk^VA3Lf@@wF^0CcA5Gnu&qNO{j8ol0eqF zrUxD6@z!5>^#SMaG|Y4J$Z`{Udb3RCI}-A7QAMiuF%;f$=b6oDCxLFfH_|a-u&slHZM%vw-VxB7ABn{+*Wvxk zSQahfYg@xRTAaSLnsOGklSA;w&scn^dWww$Ev1RnMxet9OwxISgeJAW^{PDh%Wc2)rCl4~PjO@<6 z<57IpP!k^`@-4Evl6V`Lx$iXwcSwR!P7p56E5-G7I#NWR=qj?i>>K8?ruja)Zi>Oq z!=c!`wE_k6=**1rmDpW=@_DO~Xl&wc=k4KiMs zx2g}}AsNu>$NS-2Ll(tb;42`aOe zvy1BSK{XNTy20@CJ&E(tx-#F~TS|H`L;T!KdOyC4lzq|2>K=xhD=yOg)m|nv_(}lT zo#AFf?!CK&Sgr%w{sli?#VA0G;%j;$nNfrK8BZlHDqWH52={I{Bf<7yzKe{w|>XN{J&uQt6GNo z{cWU$?5yomFC=!&Z* zyX<`rS`V2+B)eN@#SHUgUD0`R0%P>qhiXc|g9-Im^U7MP$?huh+-0pNcW9r_MSHTl z^EK&s@|JfF2hPjzed)M}^D_ww67z<)cD`?y&m2N^J0)rNl8y=XGZtjn%iVRKQErw7 zn~XrrR472zKbn$N?ICl$$SRbqq@8vhv(dcsKM%%?ho@muq9=1Te5A3&UsnA#5&Nv0 z@YRY!@wyDuxc(-B`zDY`pvy zH+R`eDcN1|CVJ5tRAgIHHa^tt#Nl~+v2MvPyvT5rO*{vesyj*1k>5zN&qVsIoro^a z!jDlZ^4Xa?q1n$+)nNWX={+V~NQNmL6RV~d;bgqFs6O$OHDq^x`FE5}#~wr4uxI@s z1XohZ(c^AgxkYw&&DLLv)r_TEcnuzfFdV5Y#rb)EAq(qCekBx`W(!6 zhoJA;SX|0^0!i;Es;>iNLI(L(2W`nqIu2v9yRU;%@I?IszR$E1OV+wNb*|Fyy0WOW z&BAK3yFTYL(X*fl&&N4Q44+?nhd7G!oJQo<>_Iu%UHpg~I4oC{GnJf0F6}HHzP1uq z`376|C!w6|?xIx*LUTEvL3Z~tF+h$DwvcY24>8bw7YwI`;^&SF_*K_VZj#;Yy1^`~ zMhDJTRwMFJI4-3{A~oa&)-7Z1j_htpUmuw`R9|{OIfK5oAsDnI4$B@qL&J7UIYf3h z$;6ZM0Ge{+6&({~cUP9DqF>)n_}j-`Rq;G| ze(XiZH)ob6v7a&bK2rL|KF9+X=Za4w{%9}~7t%4I^&FmatfYHC<{`s84kjkAiH~XFbxa0 zHsE!$y_A#PtxRx{oNz^%YIq1&S?dNx9DuV$D|vg0uZ{I$_i6{3@cb+Mkd7g}IIp`o z5Bm9ZW`1`Uce1-pTdigI_?Jl9oPf36gE1%fB)UA-m9=BMWdqsWo&{!7H~20(FNlUu za2WnwSOuT2?PZZR`-8TAGDXRdvoTf3i4KF0cQlR^-NBkEX7Zct?i(Ew&Q5LQmd7cy zBfIljmVk5JUgAxEK3B-@nw#0zJ;#~3ggm?$#%%P9-7q*$XXZK1BJ+2T``|1O7qpTI zdk^q#MUSfz9TPnirGBo9#InvW)^LzLdm50Zup9YgcSG&-5qM2Qc5e5ekCGm|eOB_i z({n_wjYs&QV656whI^meh-$UB9D3|8V^*0;5AWMBONm0Pb2t{xy^Kvi4Mlq}e-Z@GJ=Wn-lSF$t$$Au@S%M z&XPLEUCc7oWPa08OeDLD=#+u4YCo`P8#ysQbJ^Wl#}87FL0$IaU$VQTHHUF{D%nF@ zSJ^~%H?BLG#Pm;?Qk4qdV|=z>E5Pmdn)0lzr~E~Bx9Ys59BzDutLx)XWgY^L31_fH zS)bl|A1RIF-0CJ1>19D@=B-G0-K0loel@IraZc!jpUj=XJZM8ZDafE>Vs0po*T*3D z`vbh_ZXq|x?q1FEl7CxT%e<5ltl1s}-}of%S$~6sUpA6Lc4vLfO-^l7m65Y@@b(uo z@5}a}YG5Ol_jZ(dJS)zHILV=r&FFkL6aSFiy=^*zXPcF!xX@MpspCChshvEx|A4+J zDY!;<_j${4+||&Q4oRMJ`FnsAJ?$vht~|ltfwAyR2*LcW=g^^DTRC)uj-qaUQsz#*3v6ky$jDxALE!)N7+qw=k(H( z&rxl0cwB_3WOqr0$!M;7hb7gv@}2B1*vCy0W~fNry)1s$cj9KnUJMESjn&PL63R3F z;VnnGtoIw9>-S;?*_|T&@WFdjWc%ObTV!{=(rr1j%6X5K$q4QfgftXmfpu$f?&u}n zWOuHf=3sEZN;3^uxPc))Hm=P?15pS~(-L5rb$FcwU;af}`WyY1CfXg1NC%d~yKYZXh zRq5K`CJJPCDVJ@;oqqW9Eec2rZ0|0}>wvb&D0(~v#sGv3d&m#s~m#r&g-thZ5;zPHKu z+0U4^V?R0t(J?W@S&CUxc}?gfWkKIDyEYwJWOp9)!ym6zjk5(9kNOTq*0#s1Sz*6c8p zdrg!%uwgFP3C?R}0(76^k-_TpbLD{CF%)e~z5nRuVUk_bdA0 zhsS7e56Ll<&kcl6@7+kCAAY5`gOrfn4ejS5R}~c{V9G&!V_(<#%K=y&X(bu$*ymua zyD`Cm{O>DDcJD?5*_{pj@LyYL$e-r)w2|FS>0m99^urJD%Dl;V&I`w!;ygKh^-;`4 zw+Rrqn2AB(I~b%EjRN}Nw|}p~sxF2yJit#D{h;r3b9*sXs=}=~tb>y{r*`@-H0g&Q z;m+qer`0Mmbj5S%NzA4n{%2_d*57=IBgd@8d{bw6P~a{p%hhF~V;)S%?pAG1N9~hu zaQ^8aFUaneB{)m%69qA9*pJujXVlI-gqK$oW$#IPLRss6ylyXNG#U^vD-9!}Im19d zd`Ao3d5(C9GTB{SfR$v?55K`b9;YjVVKSFKzLk2?M$Jc>F8Pa@zp3Q9-a^+dQ81?; zUiIr`aKo$Alik(C`AU2G;q7!PvG4|G82*bv!;|}{q#xdW0bMs_cke@VWX4vyRLJi1 zFDGKA7Izqi+Q_awon>#^)aWMnE=!gGZ)JfFINn&0($+5L9_&`7W zXZABz)gH#Ek4h3h%~cMw)(zIMm#-H;;;nBgs>tsCpda3_ig(huL}M>W1l2buDMJ!`VNoJH-# z+2R7a^<}nt^ez}HKSDqH;eV3d&FkwWlm2ThA!mwFF(C*Go+r`q`xY1fv6WV5JCnh? z@%>koBfqnuO?J1;crTXE_=Vd4IK#zru)v$IEr2}$`r%uXf7Q*%!XP~rS*^<12iCgd zyV%LkkoUOLIt7!>*zX@-gyNCfqE_Z9+GKbCEn-IQ)MFG6h($$d2xfL-PCAY}j(+$W ze}B2y+gK*+TqD{mnd9=9*pG#*`4!cANfW<{3zdYbSn$N#9p!Ra$+Wfe)uC~ zcM)ejrOO1eJJllWqhq4AY6?!Tdyg6)JDK>98D09}e;KLBki}Wp!{6O_NhSj5hrjLS zBrgAV#;)*`4<1Y-D<5KsdfH1aETm66-dwsbzmE=U zVi0sS6m<%fSf6DeN6GGd$nHM(F_MFdR}eWNoIRWH`LM(|B&o%q(vTD7OG45b$9Yc*1E;k zvZL-LOwaK?PIh;de)!o*y0WgTx2Tfc*&Q$wqcL}pzdIU5Gs2Ml^&)=d)0?izJAW5H zncve;lvOU_Y;hRwx<#Yh?+)i}&A3}DKn|?&mKXHHyQH7Oz)8XQ!zTe}^Iu>T{qW<+ z?mVqLBtk<&?lk9d{+^kP_}w_&`73(IIEV*((r)y_-#yk!zBe5}J=%sSnv3Kl&W{rSbSqc2}~h4C`{( zU&-?p7qYvgd_IdfQ5|0$1q+pM4ET5nuZj)DceKY_15>B{aw*WT-TgwqdSNp@Eg>MrBnsL8m=xpZ!FcJf|2 z#;^Gfz0th$lO>F!AO2aSg8Yowj~cSOvmFoP(0C=udgCIk$nJ(OwinZnpHNtniic!( zS#t{5Bh-{BpFN~6*_~UHrA*#fi(#d4=%N^c;q=32oY7;g=_6eZ`b*- zegf_Q8JUD7mT%xlKfKcs&c}bJAM%o_%&*Kr2HD*>n?3k-@+ZExca%Ufpsn=7=R9r3 zcb9#1QSO9$+7Z;QRhGbTSGmuctIb|JamxLGMSD`Ph3syI$#Lu}*OIu^o^pe8YCStx zO4eWW2%U??fz=_HM?ZYYHGMe}#yhw1ae`V`5te zUrBf1On}>YBr4HYMnC+Q*hl!Y!Gd$^0rFVYOJ30rzh+=D^7;j#aB(s$#=V0f{qXC^ z?uy2_N$0~VV%a$xrjNLrPyD67ftnl^fh=5;Me4nK~wjm$0VN`ZRp2mC=ld^!6W8l|oh6{IY|xkqr2weG9x zO!)V0M#By|CiuR+rXSvCY$FWT?SUfiFrx}`5J^A0$`v=6M|L-$m90$a`UY|I!#i#b z!o0{5DE(+Hi5r>4r(?pspM~^NeTcI6G0@!}ig^0r|9sm{&XV0#oMNx3(n#K_SL5qam&eIQ{Ji}cq$?hI>vJq{SSI`}j2$v^8*f9D8F6rpXjAk#+GJy^GiLF*1-827ajNnZ^lq1;zy%_b|kU?>adhyUpwj-0qCw6eR6<^!hkh3u{a z*_~5k8~GVt20OC5bJ_9uW&9k~yR9Uf?C$zp=1u5_kMYchAKBgbA8Amx}-AhfhsB09)?pZF=o2@Bm0@XK8) z7tv`p#99_zeSu5#!;kD0jFq;haLu}nv|jEle~{gMnMv1s#T{G@h(`CkFetyP!rb5O z=>+qWmpXoQb+(tV)>W7k9)_4%(byt)k(F#FEo66x#(PWb3tcHWc@q1b*(;$Re#l_v z?$%q2$4auhd+dGE51$sAhd8pk{XU#w82JsB^uv#2KO^*!=H<8fJ0syevsW|hUX)ogC-r^9x{dO&S{aARHf9Rx?4PUX9dIa zXBmF@>PhGudh5yVqL!M<=ImQIF_(V$k>Qv@Km1+=BY8n~*N^N@qr^bg7*(SAJ9Bs4 zW3Y0P{sccbWs58Ct!ug2I&2HBl`AI_=K5AX2#DD=th zOjl4#s$enw_HXL+owAP1lA!#>u!9k(CG#)C>y-_=$2klj_%4<9~~+3NMF zSW9-7(X9}^^ur%DqzA`@j)_o9nY8E`OzDSjZxw>z>t{F%uP<-O?$(jrS*$UUg2WqW z*c*vI6(TT#et0j%4l?1IpKKlMD}G1Y$yEB`Yi5!kS?)sSdOFKjTgX+iyJ^$CM6sCM z-L(WpWOv&iCt*?X8|-kmrRR*9^+Rqlkbd|+6LPSd>@L4z4`S~%LXm#>jqGQfnCv9E z$u4ea~09PnuUI3cQeg1(b>ESTJ*#3;2D2+zoWd__zR)jjh#$(_cX{Au5(kf_TKKzJ$rrM_IoCa8{9H|` z&?@WodW>4zV_(NK1-zl5Pr!?3t13aQR_aOaJwShVoF zNOresX&bpmKm5-X!Fc#J9`9XV;E9g4>?FIh@8cmGhiHiUv}0IAc6XG1_zl@#aA3NF zSh8PHVd^4}=!aMR`+w{%cj!SFd~GF6Gs-{wdDLtN`klVO_RrmL=bT#pxMR56Uqk-r z=^>jYFjG%Iynet7==_SuU9!8q8fB=UAO6xVZ&^xqm;KRH&XnE86Z+w;9))4~vPxU0vN?e70RgY40%nQxlE9=!egFWG1@Z1LUBkxBQx@E3Y4%z+pY^@@q*z z{*-$Bcx^4MS?iYPx=WzDx)h(yMTHA9f!EVf^YI&|cOpaJ`|@sqv%H`m-t^UeWb%ER zTz&`@?Ulrje)yj3XXvEc%llQIal|AI5oCA6lnPKXT~jiyGLKGn_k@1<-s;R&|BQpp zi(u@2avDEM^~7D1=OEc#RilaQJa`ii>4!hJDIDYey#mAaM$&VipDd;y{`^t{DT=JX zKTftOpo##av#K-RWm~$`l<6d)Dq>XGLyX#}hT)v)~ ze0Mwwf3mxUW_y^eXFu>&Cz;RhlkN%ft#{3E*}MnkKDzK@FScmwva5c zyT9zbq}08&6w?pCZX7*7&dHco^A>&LY~>x<-NB9IRG&Df7N3n1WOoYm!!Q2t7aaFG z$`+o3o@!1~ao{(4*kSg@|fpnyqcY~QFxCj<;ln-yIVV}2nQ0i z^EO>W z?iyp!9_TM8bA4p%__p%n#5qV>2wtz_E+qQluVr+UNo03R(>=xCP+RiphmX|`!uKI5 zxS;(3lV{t>0M@$Sp1I1f8fBT(CJTR)-R+|v-nY03RukyI#f7 zJM=JA$?ite58u7H1r~bF63x0JKCF|BryqXL#tif^WbWbhQEZLlJU#vJgOZu8o@gWG zA*^-76X8S0guE`rzA7Er_k?rSlLI)vXD&Tv-G_Of80dWE+|QaydP@vsd=~Fk>-^-r zijib&zKm(!;W#-n3h(#aLf!&Xneo71*3u6@qpP0mP&|!WIo!dqI3A8piHyKwkw1c}#XUJl;hLLlxy_JDw5bTN=@1k&P zVZHd~vV$z3A6~UrJIC*0O5!OI+TVK<@?}>iOJh`czkj zjP({>vb!ew;r#~QMddu^EkeTZVo4R6zO@&<*1YqR-Nm;ulxf?mkV-#%hEFucoVdeW zrJ2mP;NOMpZeXW2a@h40Cie-3(((i((hom>ptY#7&TeU9U-yhUGbecnCA%9^zZ=RI z>C7y55M%!CbKbMoo!3g9q#r;kU)vECIwpE4ir#z|S;IPig1UnoPisI#a~h0RaIVHN zAFFO?&;!adp6sq$rWO5O&vA8iJVqQ2#^LQ{DE`t$u9Dq(k=-4pA3o9jHj0zz{cs7# zs|A;t5i^ubvb*&ceC1A~fxHZ_gx;@ERBOdR?%l(}zszOgMAq^2!w+evD-lyppbgpG z$1RDd{O=V)ZEa*J*cj>f8P4>_a-;V6=vQq}0YyCjY_D(X8pSjh-*EUc=9{BIa z4zfER`r-FZQ<7ipcrPQn`w4qlLqGiB%2ezlyPI{h0OcPw&soajZ>)9K z#UaTe1Pdph!6a3Eaqi(G-N^0+Y&4NflbiT>BNCHug=6!gYCQXGB&H|nJekhflTYnr zblL?J(GTD2bqwbHdVozmEyO=QK(xs2LYi93n8Xs)Z3}``ViNBPZ=lm;BiG693~Jn@ za*L|O(+{7+d+);Kdr&&05vTh6&kw&N*hvb8H{= zVmoQE{($aD+)?o}ka^PMDA&>!MLH&ylik@p=_p!NPmsGK7J7*xD3o*fWYAV-Wci2- z+1;yc#uB1+9disK(SLgcwl1$h(XS5jZyP#jTYTjA^S1IVzZ}ueLeO$`7xs31%y}*D z{Up2VOFw+4=h|ZZfV(-og0QP78MXEA@bs#!*s>q7!kfF@rf~-d{qR%C?p&(&qG#l9 zl(snje?NTUO-H%Y`Zpe~*^5}Ry8`;*H)pCy!e}>9C%e0zW-Ht3-(tb?WEk}g!hs&e z_}jL%w6XM(+h+sh>vs#em;MM5iMx4)!Lr6t-n$ML2< z5Cfm2U=aQAztZev4*N*wqFm)xFJ%d(AHEa&87+4E;CZVVXY!ea=d-!M*in{U{D~iq zdvKEME{J~k_2*Tk{};aB>|;4xvXK$lukmnf5?aXa)+m-@VmBT6L3U?Fb~i`cLUyd< zoKHavcF+&sMXv%gTn%L58eb_s$Qg#6MsmFI3M{j^)3j9-zDD1~>5nE-O?J1}-bb`6 z^yG2FXRd_}X!uT z?plMjq|*-{+Lv=`KmKob%Lx?!sVk+XoG~N2OE5E+5fksB)4gbf@jnNpi)iQ9UgV*# z{7H6KxXngne%xLpSxD_ znC#Aue)uU-8l3Gsh8bjcuDw zfr}hqt?L-izDM7$h)CUyq2mM5GU6EOl{93yg@;(p>@1HBt>py$@LM}4zT#zq zXjU5;M?d^(vbz{3Gnv=(4xXw+<1+p5jg3|4(algkklkJS?km0Nhd{+4L4J_k^^SLzSC14VjehtqWOs|_9Kw^UigNLki=?pDjk;zpw^bToJ|hh! zWOqyHhu5>zlzmxrOmyS@&EHC%&<}ssmoqOH>Fiu^8sApw$xX7m&182AeN9Er=@y;@ zM4|V3o`XLw<3@KQSs%&y46?ia^uwRgti+=0p*Xsnb82<>F^hiqII_F`=e+1b)sZ2a zN->Lm_=YQVX6wGjwlEv=h0d~&e)xt_YSIvV6n)6<94=?TkAC=pCEP_oPBNk1NzSfp z!B+a=HOWzoUL3}d&q`7_-Bot5*0oo+m(~|P;<0xsdfp5~j{}8J?WV(Jn$?Mf_TQ$F|m~Ft}9GrxzP<+{TGSvP7#RxRgGgkI><+|yNTYu zoO@{}*G(>9P@hnEjopPiYLD=Qe)zHb10<-Amn{FcwQM+53_r5Fi7%O@?e`X4r`w7b z+1+dU;m7%@%GaOSxJ7<-&Xk#qIls_pg`?Q8*8R`RNlyDTVIlqS-sE2)v$N2vE%VB1 zZZd=H?o)uB7zDnjM$nJ(uDnjPp+VX+yF85J@)Gp{KKBe64+@HS76FlRc%VCy4 zpBnw}1IX_5dl}1B&1>|SMWCL3ctz!F^a2=5A3yHYx#S}qr?wR}uX2n##d@%JEWWuu zfhzs*s{FSPobi-l!*w`x+3k)t`YGyl`Pd~gO*_~>i4l+#fD(0z0 zz?gpcIK3Na>TDuwH~Y)<$v)DFe)#Rf&LXx)2=x8qaB<8tl+zENMs~Nii>Dmxt|cBr z3*kX__tiZWv8O&_gNnUeC%cQIA71a9lC*tu7yD#NKl|5J@N19xdkrpGmsy}I_V5p<22j0-`iq91SFXh190M6kKYVHwa|^wV#HHmj7L5-_=s&zW+uTBDFVepK?;CRIhj-&y<6xDC&x_S1@`k${ zTg6(p$XarqyhQ&q2?%ir#*afM@kffT4DZICtOs`%>@$gi^lnxVX*jl5$6iq zi!B`!W@LAJyBl&B$t7eJg`q$F@C&-!!5jxONhQ0xzuH?=+~}A{J%z}L+`-|OfcT;p zxI;gD8rhw4&cmSCFk?`|*tIZjAL|?3t(}HE&(ySrhZe3%T3i?I)zu z4?l2kAmZm2pvPfN`K!T0zOmNb^UG2q*484vI1cJcAqb-%e*HN;@v!relLuJqo-mOE z^uy0qrVsQm=QF-s;jDxay~}(qul1Gt9tLvAumabD=@EJ!!+P}rMrl~c)Sv*)pn1ty z`r#+4m%^Ux&hPIe46}Vh&%2FGVa?O`8{NSdIm2*12Q$g;cH8aYKGdJ+X+(!Q8PGub z;ZHnlMx@g|yd}H4k$wdC)+6oa>_K|-(w3Vhl=Wu2Ny^_VT zxPA5s>>qZN8nQcmvO6>S;dh4|M=!FwX>lodpYQ>>^uxbnKjTBOt0eAJmRH$Fu#>g! zz-yUs9@q@GK+c@-ecNsBD7{BD;?n9p0PisEigK`ne)zW4JmbmkZnoIS2LCt6rXT(c z+14#Uk zUx0OFccaQvarw?Cn9>h#$-AMop{vZAr6i8-JmXpG`gre$c|Qel?dB|n{JuS-AAU~T zAJB2nz&5hGV?%RML_d7SOm}&~yXjO18}VrM3f}a?Unjdu9d`mbZFHq;inzzkCqX~_)|#*Mc<;swvb)-Md1!1>7o*?q zvXwoBL4&O&;nEAVNCMP*2P55q*$~?{a-Zz(4jmJNW|+yCb9XS&pFP`StY<$~!K=Bw zd?dT;Lw1*3)?O?$t1vW-d6PNO@a%OLBU8;}fg8{Gao&<&rzkz>n@4(!(R-_Loc$s+kWX7JLVgf(GNeH{fvoco#nMnE9uk!0B*3>{i%D1 z&Pqk;80#Xbe7=>?4}Ukj0UAfsFlig-tfTVL$wX7;2YZMi*vtJ$`O{VM)?2<`_LmonP36g3e}^+`M0@@S&FQ6*ZN6HHeEYK24ZMX`Ts~d z%djldtqp^SD2jz3C4zvUga}BfYuPczm}A%2-D4t-EfS)HC?IxWw_;#7c8i@DC@O-0 z`*hF0@7VM0*}(fg_p|OR&Luy;v$LjAta$c&Ah)9IwFiueM$g#sl~Go`veRVig+&qw zAAZohx6;3dGy9X+;nBof-@=FQ`w7Mb8d;B!H4?hw2luoZ8iJlN#@s_+7BkTLPd3Og zyt{)7Q)No3k(LxM{TJ_UAbj`@?LSLK$V$n^yNheEUtHkBueS2hWq5a!!W}hc#4{NV zAO02I-KEspz`PZpx~)OJuHLwA{_kkk3Ze`U&suR%?G9xV@5= ztbSG=4T_W#)#9Yz>_=i1QdND(sm&e0Zs!ALy50S-%pJQ(Cc%eqcQgw>%0=s*GxMbdeE4~2pELWo>+$qL@t?U~j^f?@S(;q$<>LDI3H?{R zyTNuB}d5C{inpJo4G#5 zyL$^`;zuZ*jD^?5?RB(F-V-e~@~+9=Vm8{)IZ)rdtfPKw%+7!-m z(%ad;Ut;dS41*7EQzx7ENp-!7cPGP{bqkF2eejD_CAyBh`{e(Wsv-A%h9-{&lrYQ9UQ5`6fWTw7g& zceklD9K1Odv;};lo=x_~}zTsvA$~WZbzYyWqp8H;R(2 zp%-M}WJ_IxcUM?1Ko^BpR`cnXB;ZMuJS^GTwNj`1xAOHGY_SeKi;Bz@V(nH_FhhP6{qwL{x%Q?&EXJw?_;ln%gxo%H! zh57$QVy#o82=6Yi#6HsqZRZ!-klNN zon4--zFmJy=D>%4H+!+%pL9j;&a~1ycz35^O#B#Yp~O2Pt`E$Xe(#cH0p4BRz5Au@ z?9zIs2WJ%C-9PZ*f82g5)r;csbR%S3`bjjEit5EV@R9SNrh%=_iM=5=;ltnYj+SQA zuS%(Zt+nyb0R0AIB4ltSeYfh2{NWTSdzZ#Zi^ZAJEe$R*=g>G8U%lpKrn}l5k`J6y z4ZV`2*{%1oH^D{in01%Utflup8EcjJ?Xn2(t`mItfm46VsP&xjoP%GC-Sx)00^U9A zo8)#+KXd2OP6#k+gsc3ArCGt*T`yt`ddXC)Cnyg%>V z@izi=zOl7_`8PuX9xj$qYnI5(7S4S)Gxg&BWTckisqz`0N%pI*FYv}N*KV)F&8d=0#a_{|4nX=qO=fH=rm_*;QCya@OZ>4tR3VDEc*Zo5p z%pX`<;5W}cUipDomwd8yQl05vLZrEW7XOH*Hb$)QUn zdgE>BJH}2k@$UBA@Y4ms6}0`Y<5CIl&VGD?xWk8UFtVE7Yz_k=qPALPmeLUT@Q0b_ z?6#*!i-;UqZRnh*iam>0-hT7@``3&a8xJzyt3LpMf5Iarr z?gAF>mfQm+wQQcZZe`w_J9?}&F;xx z!`WpQ2`lcOi_G$tx*PBAUuU`tzbrLp_C<+=4__f5MvT($%EbXyG!yUcFZl37UCZk% z_hXXE9DRNAav2LB{$ndAZTZ`~W8O|ZUQ$QGhxf<3oBod8$mMVBi*i#>e)jqwn01F4 zYQL0CvX0*^2tK@FQ~D>PJoRtp{9-00!jSsa|Q!AL6H{hw=Su7?KGGtYem2N)@I~eco!lz1FmMoRb7sE=P{W6@lP{tzU;8H!n-S*lqmo1%97bdXf@nxOJ1#|H)rwe!-t>1z4ySB zwX&^4zL+%sj}PBw5nP%!MKbZ`df9|`7YrZ%+Wg}B@t~Jh#JlS~rn*`?e3H7$ljSPj zT~^ou*=a_G0q@QM?=B|OL7SX?B4+dBaEw{Ktpydb6g!Dpep0 zr>jGKTYS$nZQ^ap*V}cy|qN zSJc5*PRi+ieCFx#GO5i|X=drD#f^e>Y#-i{=S!=(;{h3ich~V*viQM=|9f?HjYdb> zxWr2{8}d28hcAVmF`&i#x(!M9p=OUA;7Z;k%;3?0t<3|#N*bTP%dD}8X8RGIo*B;Qe! zYSXQT4{x;1R2z-iC)?2X{;ZH9bK%497)4eU{qNofPmOdiQitnXtB8$a7iFM@ zLp*c^Gu6`WH8gW}uHM-`!+|?8LhpdijVX zcZMTWg>D-9Wv;EgrgSHtbvH&5G)k71xFAmgE%hbd-Tn^j{hU)--+sR&bxjt_`%CB< z+wSlg*l8zE|)C4JNv|A;yJ9my1|$ziFfzD20onJm4AxI z$jb*&GP3BRl+?-^5eyRI!i*hcU63bCA2= zK=|-QolMj#Y^M~%yBn6VMqJ^;$EVfMD)=NWuiZ6c;xDmVutA*gQFgycmF8cK^wuCR z{TuJ@WC>R^)@-@%vr-P-Vy8EJ_#7#%7lVEDJwDC(j*c1%AAUv9GP#3yx46wonGjJ? z-{IZm;PWi3XR8k@-ISXXmWaD&wAla3ka3M+kG~GkR9}BJ9a%}ISe=!gO(J2X#z|c1 zOqmKF{t({XmL}vg#+m7av_rD4Cw-H*iL$xXd)Ye3MF*S=Cc9Zn2i7T}^>eq&YWypU zs_Uc|eE3mQV2v^B2Ku{uKR19mdX5gSMu*j^LZi(@ZtON?>@o*#sL1eP8l4O z)i5SXnI=mZKg&jbrrnRoV!?+m!_S&OoSYgz`<;K*ivxW4PW(>Q`Q6&{JGy+#7d!ay z>-pWM^EX(+-(t}ET55a*jgG%jJN{PH_?rcfhQUgb>F;hXST zpX4)7=d+LL^-Ma#hyTKNVjtg)Gkiz1^?pe^6DGxcSISJjL(lmx<>K8u+z6n(*Np@m;jzJ2{!}X5}qEq&|H3l6+@J@!j?4Y@((Oy!B)foeaL)XDhyw zO?=mDo+F$2oE$#Sz-*p{ANbi#;_USfeE14HD+_pLw({)Q?6OcJ7!xIVrbhB?ZTopy z+Vof~AK=5ERG(IP|m_ksjO8v8xsyh$v*h-R(e;=YgW;UoS|lTcTVu(Z>~5d z4>)6=bJmVI@=ChGhkwZ#9Kcze%9-q%hpvlvx16&&i8K2PXLnCD=^OCjU+*x~uRk`) zC(d}?wpog#m{xA;sqc6Pl;d6S-1Vz`f)8JgcZBKLJ(9>fqY&@T0`D%4cS-uDmolGs z%Y-H2;s_tUKJT1l-aQR@2Q}SAh9U5dT;$!8G>+EyL_lh8d(4v&j8LkzxZM-gVkT$zfJG%gpj@uCI=V58r}W zCY+hZv0I{e*T@p{d}m$8%u|=yr#UlF_Q@Sm6YuUdv(aN_q(#h1%kY58!iPV1uSh(Y zrOGf?2%*+Eu(eZ!} z--B6t05kOnX6p&Xd^HyD?l3dA8MF6zW^l`2=+5xrJ2IR5F{9^@v-|V3HC)eYGME{@ z5wm=ok~idGnvJf+yDQJEpTx|+yU{6m6&fL{;KL_zCpgI6z>PbC>r5Z@h7Z4%yTfVj z5W~1j+)DmL7NWXZa>uyIU1JS*jwV~T%1}O+t=vT%+7`($?k3y#+`7Pr@4%g9Hg}h) z++l7U-XS6I;T^c!7;(qx$6aTQUzVKdohaS814VHcy3U=b^K4(uCa0FyxT;<$dMKN@ zI~C#G{RJQX8+R&u?pCY0V?8=;rLW<`kK^ujh&xy>?qbXL+3IDyyGPv7%5hig$DPdw z@9qYC_~G2;=5wbz!rjh1x3<294{y$$uPk@JVcY=^Jp3%v;lnrLZfM9IaSV6G7|+!I z^WoQWhqP*Gpnr3x+{p8`1U~#!?wU6|*T^;Qo-;e_;x1&Oo4J#2=5G3`y0dmEd?VfA z!%yMvn#LVAmbE=ZDR=FSG3-Ku55I~#_?z#cTiJb;a`52?a;LA#-Tp?CVtRP*e|-2A-2JVF8){dyfEApRuUEKf92!AO zv;sjhNXy4b9;a+CwZF#HPt)q$>pJI<$#x$L~tE ze~b*-8zt{@F49%9)Wv8S>(DfA9)+J-;-Uyz$7D2*Cukq}Dd-t^ccy3}*U?5sqLDPi zyDJYLz5wlH0~*S1w3M*B-ntP!{I>y5AT8YR2dEGJ2~=E`z>@&h4b>` zTco6;RXszqniy@bC-Lrvqh<9)(=tcfGR3>Ih7aE;?5#XR`>M=bP`Rw6ymI272|;2DyWG*8#0<9GaUe+FKvIJA=}$daTW7If^zn z5RGm@mFI1|NPu8lTzy6LJg9Z)7iaL*m^XMhgr@6Ksbz zxUX2W^!b`0JJ1Ygq8(O8LyXy4Nw>j=e~Y%*9*wce{715TMpa#kcefr5vNl@eDm2OL zG4NuZCra%TSuzsMaunL-{7tpAH+=YvF6;q9+iZ)*Iqg}#M8b#vt$j8^1HEztEk}||W+diW2@zh5Lqag>OCFfRMB;SpaWq!d2X^Yl;0L}Sv=~`-PYpgra zqQ9U?S8?JTtX?P!;ls~m)@_G&T@npD%VnLEVAky!zg*R$(s>oc$&UyjArhOcD}|kQ-;Hb?}Mfuy!wo6L}OoB+h1$o-94F!t~-sc zI9hz(m?cur^rmz`qt8dHuYzX(en~~G10TLu?+9s&rvK+1ax8y3YA)X0PBec1W@fCv-h?r+9nWMT-bn@?N{Ao%f4ke#7;mL49?J{7md>*(Xt`tIkqo>YH}1XdPI>B9`0)GA zZjs%1Ku_?3tnV0TWQ>RQK+jl)S2PgMXh7s@S%`P%nZ8R-;VIq6Tk1ADSS!MZ_s4S@ zhWAvC{@5x#Dx;TkPsf{@fJfzpSLJ{6p45X6e-RJMdiDkJ#?xy4&0qW0qi+&YSrhTR zs^Wb$H;9qXzIVj~PpqT?dou9I4va3ZGvUL}#ycB@hZc{Q_Ns!DnltOx#$!vuYio<= zcCc5N7{Q0{zMpP2o?Kh>im^7Hngt)eu>F7DU0SEjQZf&QCVY5*{`u2EZdw7a?|$1= z@)7TDG#+3YUSMN9!Efj#e(>Rk;T4A98UBfPXjJ~VEPxL`5KnR8FZLhfF^)fYhrat# zX^;2V9}jX*DEbWE-KEF=S{IM97hdHiJj>2EV`M*k_&mJKEdDOV`1|L`9P--@9r4h=o&myJG@d)yu0D>;kVy0oa$|s6;oa51TmHW2oD9Wl zPTdhJ*G4~-cs%HRe4l&ZNxx`PR#V}_H{<)h4bR#r<(>3B(Rkh@-gStLF1$lvh1xwqIVW$G2zzVP8M^E_tVsLm{zEnVIu zixD}s+6VSaUA*@_c<@E&lP_%?)eCQa8XkR%J`r-@)JeGnAN~X9z;MokfAI7_M&FQy z@Zn4QM9Wn?|I>K?SMlx|;oWT}6R?zQKuQQq%b(K)B}3AW zEJ=AXB^B`QhQWuoJ5eZ!WKQPt{>u2Hrgni3f9b+n8O8hU0vVN3Atf~H60A7he|O2S zd_R&UM^`6G_#E;dWL%n)by+^ZSC1`cUl4qFQ?fA0WMXp2#*|xnRz|{yf606IDcPCx zWN1dr$dHl`7Rzz6HDk!w#FDjX8V4f@@9qj2oKUhjH_7A-n6ympy?!Pu$m*o<-d{v^ zXXp=8HGaBJUY(+MK(?n?htD!2$WLy5V zAiRc7v;85iWQndbm#ijRG%C?V7r}?WlNhWG$R3p;gVZs6h0KHx{}0)uv1F9~BCAvk z@9sH#_`7`{NENb7=I{t6%swxNb1ZZ#-rZO-Pg)E;WBO&O4j(>*Oq74>Z7~~Xr(GTe zY8H%%&19#lkfCZ%mg@0@1Szz9DQC%8*^{;ELgvc#VJTe>AHFm5Trim|-{>4ED)t{A z-k8kRX|h{K$Z#FP-zuK4NvvHAbt)OJ!OV+|F1l&-y>GIX3|OlsVNxc1x4b)4QqADQ z4`<#iPG;<4yVv45W4SDY5C8b^QF%(X>=SdUC*Iw5`0x)K-IEtXV`MN{w6jz2>GCbL z6B)IqWYtWIEOpk*i!vQPd?;DA?PS`WidJvX$TMr0Evkyt@jK$7K?k zz`w}`Hp08>2p_&ZnZcX{c+O-9pG>?gmy0Uv1MVk2+*6dSp>5cCDFYw=T-jJzOBQiA znM9vHL0a&fJ}FtnzsM|}BD+@@mA)LY0Tn&w1|vjgA^03_-&`m7a_cOnGuDS_6ybGDj1>E17lfhi` z)l~PwhabfK?lu|CW@I(@O0@B6jgG-NB^#Rmp^~N~oRzEa;k%I?eM*M(5Lwbfyt_7oeDx?9(>i2L_mDaLnVcy7 z_hm^gS=4&mYxCjCJepZT9pS?Va_=>qzE)O}VQt&uKR$d#vaJ~{i=-l1*GqVJPmH(8 zc`~r24twca&YEwd*gItRN$QZ5{gce>YqGOR<;v)7`0y9L^Zq1T8%oA@=v;PyB*MOL zc1lvn;6C6!Uls3e+H4ygNJjS&S>5Agc2~~0CZ4~nH5~756q(+R&&*Y~pO&*vI0wo6 zy1K!bAOpM??`|r5_|9a5t;h(UCMz6%BuO^Ihd)Y&cqV#8TQbFa`Wowf`0yprD;|`T`95SeH9 z8fIvczG{n}a+*wZ&lQ>EgySRvK70v}Gm=Mk`Z+qxs{;0=&$HHHc~{{CM3Zk{B5%TO zh{I1CJ%EnWl`Qs9GTBuxo|F%LBcwA~ZEX*4iR^Y)-i@>2!w(|UZB4fOEAP%8FJO4U zhd)pD`v5vpI9c#lT3p+}hkr{({5)B4o6I8dKky$P-kB_UG@0@dWXtRE?k)!(emI%) zK4i~(qGQ!N@2oK!-brb)>2(s(g2<|SG%TyzR{Lr{evjH|>sPAJV14+i*!IW1mIV z{v4V6EWEp6@Zl?w#qUfezXAH*Aahtd@Zl?ran%>-fBVVs$J!fdcm};K^uc3f{4Jvm zwDvF$cA?kR#&iI(Lv!T>oq$-pyW|7AB!;fQ3pxXa=#TfZoirOh{JlmiqzB!CMRW{i zbt$I^~pW5flkHxKUPWq zEnj6JT?=D67uV=syeKf_JlOpIPR0(p8D)1Hs;iZ!PGi=cPIsdt9S)((Va%+&zbl-1 zIv%a)dL%GQ->>APmFmBO&A43F&)wTD5+elw zk_9{R===zw``MKhBTU-N{%3}ENGw1|$YhtCT_zc(LmEvM;D z`P>c@kL~;ADV?fAbgP!((*$>L)cx?`vjW)f+YpV9E>_#9is(=KZ>r|oUhwuBzO9Oj_ah|xSz6@PTKEo+7EAcedt*z z2jIj1i~seU4x0~MwqvDgX<58GJ34Ny=(;^LNtVxecYh5e7fcuK7@fExG=~Gj9dyT@ z$8wj>+?ZpWWpwDyC!tZ`-38*$tx~$iMr4r>M9WC{@J^+#OWnpc`WKx%L%h3jz0Gx~ z$7zW>62UH~c$w+{L_FZbAI7`8Pq%Lh9lw)>2Vm>6yRuBOjHUx<9?TBDN5N_fAKrqF zU^}{k-u&!!rmPq9eT5Q3m(Ypdt@PxY>hmsNGT_5M;CHW+zg-s4L7cdumR@JpwWgam zviW7>o81J00G>bb0dze3y3c;jeMlTy)(ncj^4b z-1b&qygL)Rz`yRkmeq8FKj7Upg%4kGbvgZpcXz-W9cnGyoVN5V=oUYvW1Ro%f^<1- zsYBtzKcRzcRll-srjtBpf0Q`Ghfk-goJMDPF5aC?Wu}A=zn)IB6W-l+I?hegU%~ou zQn$gu8dSBm9-<4~<~#E@n)69I(u?Ryx1=*Y7EO9ReE1LB40ZhXP5QZcm+D#AJ@HYHt)`pbk{@au;+H!CH?U32GVVJ zU%)Pwo;5TAFX0G$_;29`noJkon4ZvFw^WIR4{!O&Q&$(_0nwfB{o=Dsg%9tto^CbW z`l>rjHTYX??Sgj~Mfd*u#OLC%f0?W*9wDXS!w1mO-%nTn1fBgi2W@q9)mw6qE`L2b z{iT1h@9u<^&N>pHedzqx^Rmzn<ts*%K!@wPn z$aW+9)!@Tt!Z;Yj%ykdu!Nm2Sq&Ix{T)ew>FcC(=M(8oaN3Y>QEryxU1$IK_v3LoZ z5h2at!#BNKk=||{UEQ0#Imd0ZHGKH?Fc=#5j+UD+89t<2>r=eD;g#7H53`{H?1oGO zxc>0reb!`(&5x@3m|4@OxUbfP5C1pphtha=Nw6R;4Ei8{!-qG65%Djqh|Vx0%KDH^ zVAeGcStoVM7RY|s62F<5S@7XsMi)r#1^Dp%^QpDA%S8C_(Xc5TnU#;hs+cwGgM>^; zl9zaQ!(dtb1=FIdk*@~f-8sX$Xan=&KI{tve!gdpXTN17VpjvMv4=x zl!3+(vT*M*3Cw*a=BFLC4Bz)^-`SG^AKn?(N+Qga8Ru8R&iX8m;KTRlZuk^7%On^r zg*8*flIO`HXrokY1s@)k%L<;i#=Y=UO8<~$uwHC=9$R*f7)c z-bg5X_^L2t9>9*70z>8v-rYr>_m;3_-rl<}sjz13?V=>^*LgAgX`xZDXc9OtUZ+=9 zqoPZa2&?8k%$hH-YYeyBX*bT9BQR|uzn0g2FmB#)4w>&wka4hY`oh3jz&Z7DNNpVg zAO2&Fy;6=l`^lmd32^u-!{Ecu;7Xi(tM|J|%&v%$VlVJZ$p5ljJ3`gki zDr$6u4hf8)kbP0oEBB(DgdOA!L&ym4uJjR0ZDe#&JR|V+U=2mU99p%yinj3%(qLFb zpP!V|=CFz4YLd@LTP`=dGV8)F`ihpa@Q%0com5h*!ZuogcNbW1wFLM3E_Qit8if|~ z>$r#ha4^(%jW@|#-e2V{w#ZAENh4t=4dVS~c#~OoL5^I4tyGiuU)O}aVhSI=7wn~3 zcz3a|n7S{0A#s5T@&@nj2CSwfFq>ZD-4(~X3y0;j9HvteY$uCui{;VR%aUzOUKj7K z4c=YKUUq<#JukWJb!iAADt^cVv0Q4e?!4DyU`S1aC6)U&O$MJ|Bn6lFZbZJ7zk0)3 zVJ;}w$XkEGhtGJsQv{9d9*nAfYktUwgEcf3J;Mlw)r-Pk(*MB*$;P{T594YWtgB`X zytFQJNg{msTP;7!wka#+9^T!wX8XkuKKvY*S>J4!rD13l4R|JH>MxT8&iFVmwzk9C z@?ah+U@j^JgX`tun^LrDiM%$7mRGqMG8$G_MVMU^VRxl(sid9Z!+#wRDNkT~-G%Yh zW@c6Wig$PRZ(scb2G|mp!_pJ)uItN0F+Yj4{rfWEbj~ZOJIwQ zdX+CX{^351_Gt`z?Eb+*nKEs?lxAK`f=w0zqpaL>FE|gscWzhL;V{h3!7^KeclRp) zfUJQJzhJeG{)Kn94fdH;@DsTLAHHKsgfuyNN_N0VYk#4R-eH~%n~sJ%>$z$9qf0%=AWRAY`gndh}+v>n@8;f`M z)i_y37<`n&Fy3CEIiEMJrRLVgTI}35Ifr*Q877=X%|bZ`AAUAxyxF>%Y5_ZLh0{6- zX4b8+e7h9FmOBAs?j-ksQ26krV9z;tB*~&4@OAecl53;5H^8V{2di#5%(_<m~s?39d*L>U(xd9*Eq!^3|7b}L zWIyb{B`^eQOi7aw@Znw7`|1-IgFdhZSATjS?(pF!!5}QT^qhPi4>OZ{V?DgPk29>) z6lUQj*oF7&ER`+}x8+N|t%l*<+2h?!o>@Wfz=yAZcQ-z7nIyqNyy06-@705^3nMWC zR$^(ReUihy_GO0o!g^f#iwpza-I+#5r6YX!qc9;$Gzdbs zsG{?(-jlQN;cvl?oICG=8xYdTb7e0Jd{`o@Kn$9pbceh?8ZO}vhfW5g52B*N{+=gDV z6Fz(_jLtpZ62ufe#m}6MH+*;>Se})B1?n!?o)`Dh1&0qm6z1ps&`UBE2I#!@0qTl( zH})S({XFf0tb`Ssaf=K`$NQ2DL$qr{c+oIL+jT3a3*f`AFBdM2V2(B#`9?;6aw6*$ ztZ6Vwx5FlF1EcgYI?@{W@F}oMd%!TA#^=&`xQFJ!hd%)0^eUfQ<;!@lc1D^4AO0;Y z)Xzz->IoZlkVT5zU5g%Hh8-}lQ)j|Zt%G;h5I+3fg6C2N#_G&B%<=EiCE^6^RT!+L z@$S;T!I)TiORgI)l>k_+4`8--pJSzl=xf7guxodeg~lv6C!=7z`tE{_GXA0X!+y1! z0as*=uNF2ct8t~%WaExSvJ_VAGnlb=&N{0rv#zT(dOS?oov>x6p#L?14{rtmuIh&x z|7Xzteb8MW!-xMoZG#w{Tu2vks~Agh4S^585$|puEZf`O+0yn^vdln#bUnOZ+~&hZ z=;foE(I-2>haUhFHzPk@s-j>1I(t&q!H3@hJNFF?-4g}e?;~%B!iR4FW49Zu-G5>3 z_CXhovSxo~|4LdPChr(W&iJ@EIR_tp5zO98cz3BzzFJh1J2-s!OWk;ueUjK=_+DZX zU35CS>_iyA@3V|`4@}@1cy~W1u9FtCe#&>4!3#MD#~8Wmhol1WhAG?%@2=I=ZE~}> zu{Ns>e~5FmlTmdY0E>9cu_S4TZaf=C@vv3oGhh~%!Mn?c4?h`}@kN-%S&!LinhN*r ziMj52RYx~F1?mkL$i}mn-QmM8fQ{@Fab51hN?wU~7YQG}7z|}|SjwxDBIM5OczJp3 ziA2C!9)fqb5BBo-3S~4MK72X6yP{6XGS%vnB#x}EC!fP?e(a?~VK@&hxlKNxw_k=2 zU;a#?+=uo2oB2GyWlbFjA3hWo^ai}UdA)W>(TozBce$1p=lwUx+eIx7Wy!UaL~%mz zH>q`4ybhV^l!3n56Ys7GeE2^@A4xe_)URPu4~t_D3w-!kSk>+D?o5kW>zk<=@&G=3 z#r0$i6K_iM>9%?b?`{};_^-_?Y6BS9MtFCXr!13o@ZoE~$o`CXw*qGNwC|>Bk-1OC zoq~BebEUk8t^G9ERg3WMuE5;x2z&e4yi_@ZPqG0%yqU3qo`BIknz>`!q8b`j`G>rj zx<;(5!el5+@4u5xG#Wm9aw54>nBTQ#y_Jb!E2KI;O*BmKF0jG9p8IM%K2D=<_Ua5f zJPC%l#g9n2KkvNE`(~kk!5F_coqWFmdd8H?Qld8V6fE+4Fv$;&x6=%~J14xmOEAk{ z|2QsDFw9f_fiVFe{!iHEL-6kUz&a1OUrGhPoNHdPnBP_vhVo^{sC*p$=zElQ4M(i~UhrNi72(-ko`KPNbdLhW}1F|J}X(JJ!RRuLX0y3IFb~{BJnIhtGydpYhwf<7YU_ z&yvp1G!O6YHVpf#{H#CunLF{bKZ9|fJIGK6@;m+h-Tvfv{0a+yAxwNve)m848|1{~ zNKKge4W@+2UH(RG_*)I-Z?>_0ux^2|zlOhMIDgY+{B4^=AD6%K%d0zo^9%g#*Yg>q zy4;b)F#FBEzPd!-@=R5jmLc{$W4G|`cC(YA2G3p- zp23Gai!VOL$_jQj{KYfck7so>&+MICGj(RiLlvIo*F4h=dA5hM17a-R-Fu$-H$3}; zIRgsvIp^3J5zW~U#~Jbathl|vT^}_qlAW9(pEyhAbEc%Td%`Kri#@Y=NSrxsID2Zb zqv8tQ-E+>Qg`7>tIHShl-Hm0Z#UakFQ=DO`oMlT}o{~xIzKG|HTh3W`iZd_6;D*Gp zBjYD$p%-W3YtF{X?9e#CE{#ym%#T}6%TvzK%k0{C>khZahYSN}Yz$}Zu*YT8oZTFC zIfLhO7DsU=yR*Av8+t@AXY~=zY!A-vdk+_i-}QB3ms}t-INN{o;cu`D#E_jJPdWSD zc?aC!T`+{5Avb!|(y6>7=JKw13?F{s*d#fQcXyI^NhNnP-Osz_=!qcpWEV*y@0=^V zd#3UZ>cOs(OYAI(=iStechpGURZqTMl@NBDwB;QZ%De0q@3i{tK3TyIl*PR3?(@#G zgm$zz)qFPygLW-4lNEJzBW5o9-||T;T`+2esN9Z zoqGZAu9nvZF?&`dV|XY3=EL8!M2|X}C)IgpPv_lzjdyrocD)Q|=Zg#P_A|WW_w%lI z>YOOo@a_&X1DH21t6^(=^=nuVyu7N~hgrdqnPCdELpOHJ-14>1@yrxI$ptlM#_(Ym z%^G&nJZ1K{!3=VhS)|2gdNwX~bP_X)C$q{VW|qlm%j5t%ZaOf_1Txe7!))Wi4xFLv z!nwJY=ZV>;J2TK&cI6Z=Vpe4~N@Pa*c?mu|-kr~qjZ%K8f%a#X`aM&9=~6>~P0N)q zX07MUTqBvi7O^%#eZEf%<)ypLSrjEX#~p zj9Ig8T7smro9F~H=x%0FKW5T!b{CaQ-ycWis zdl!;V(I%L2hT4G{cp$T`3pE0(?CNXn=WcEJ63|;G!Co=n&_hD~ZJwL6_onRGrgFW04nzKvlDLbXsad)tr%`PPF64%%{Rgv9O zx4C1q}aMwVCxtmnNyQ?~Gi(KT+63E@933r$&S6tPComZE* z+x*~;7z+^H^b zx7x=YtAJfxw#}@x33o47?qCnNi^Uq8mkI3ds>dDeGIzCR+}W<--5p8y)xWsQ8FQyw z#N94|9bgY^-pNPqe3QBRwdW3K!LG2i>! zUH-gX0=d)gu!C*`yXe-UIXI&|e1H!h&^TT?9FLGSXcNh36m!rjcKD;m*SApz zG>kWB89UH4cCq^|r-_YbqII-H^H_oQVaJZVJ?zSRf+o@!ZR8jlNu|d@`sJ99MxdRX zMng$LOED>%ES)1iN(dT@30jL0nu~n}n3;pNNhyBz7&IA^LxpmK9e%%e`JG0yX@GX~ zFB(qy_Y36%JO4t_cIKe*tW2Z7%uc`vb^|s<1L}_!vhgP*5%_^-K zy8tvo9MH0A4tyq~(6$or?(VP)@m+9f?S}Ss6AjERV5PLbm@TPjWA)Lq{+rPLr{e(}$Gbbq&e%nG1MYYPmGKIa zp05_$@LcJDhmeSuunLq#wJ3Tj3EM z$16ICXOz!w;ReiHG=vnQ;T&wQ@!@MeeL z(bi>`cLLtsB)r?_9$E4gKK#P@obkzrWfdN8IlSKLd~c4k8@zk&Ly5r){(vW32|j#W zP9>c-$X}=78TZ0Fex83ttk^X^@b(f(z+3)_$9$geYd3b1cV##EtDYxh8D6yFNf-b5 zOp@7Q-h=PE7oN2#-nHNLed2+4w**gn5#F}qanH-+d1EJfDBkyAJn-Vby>ND>muX|5 zm+;7cd*wMjYG{JV59!5@^?^K(mAmeeSiJT9>|po7yUTWR)>U}#Yw_Su z`C8V`rQ>@+v!wg{P-0J1aP$^vm9lOO$7GWs}cICDV>vOT6`d`w=$Q?rC; zJ!g${AOlpFEYK?Sj7#OnBIo^*+`Aj36tnKU=v3*y%SZ>2C9-BN2_##T)$}v@ftAvR z%+V;aN1woSqPNyJS_CF^8G z=4k}kC(lyWx*G5944J6xO_eksKD;5`-DuZ1i6uMrmJC%VvQ%&S`D(viW;%zA)j6_O z&BwoNb?dN0j(=GwZ^&%*Cc9-rhASRD<6IASeYLkxY{+=| zFfTSS-zF1p6o>D_PSZ!hT8vEC+>)Q^w--UwIx&5nQWOS z88e?%5fYt#O6HS2D@6va9rNsssWv))#&zlQJenMBwA>-H_T0oqAG@G+kY(FLrmYs) zwgwL)WMKJtc|hjvBH6cMnL(QU*hixi%BUaNxGXYqIb`M975^x1rP$9zhHe*GI$JVz zolkF*WW2i@WbI763dICI{48eOb8Bj9H8Odv$mWe^&Oej5o$rS+T%}sNgnK|)vV7Gq zzZZ|1Nn%UJZxLC)LuCG1xzbI;yE7yUSc6PpQ?h|UE|KzP;~8l~X0X_A?g#gf!6D>K zCR~;FWD8@-7>*)qm}X?F2IJvDlR*q5i@1$U;^x%HDgRQa{@9qj2Nt1EE2yNe;i znUzJBlT7Ehoslv;?wlN(2s4v=<2tgRo+0$^#$1uvWJ3J|mdXz@qN@vR)i{kap6uv~ z85MLTS<)6SVNCp7CL_t1_9ScSTpvzrMr}>*Z>mlP`y`BetpR-aI-y@Aat-+m?!6_* zuBMS;jXtqO;_-m4kZmpVz(BLfy4FR{`083i=aPXfSV&HaoZ6)`Fb{l6>SMC9_sPs! zke!{~>y4xpE|;NXYTuHrtwY9kNi{!Rrm*kH-rl}+Pui2k&1@PapBKW153$t9?=X_M z_Xkg9Kit?$l1-M^mP~JBvb{6E*y$I%yZ2;%e+)0L)qiJz@$L)?6WFowO0viZC!#k5 zHYBIktdz!+Aufg<@fVq5zhhse^lu&GM6M@?$c}CHd#I z2e_#}+2sf_%&F)h)d%m9ly;>wn2hs~o-jGdJRhq;7C9$DPNS#vCllSAY;+OcT}n}) znvj`xAv@ie4E3yMQPOVqC22smx*8el*<`KPPq5UvlP|#SrBg^2+osEXY5cN^p2558 zK~{SNneD-3xAV$|i?zuLu^anF-jVG#X4XBl&0EvlO6m-<-%HSu9*_l(GRT#!UN!!o z5wAv8+oM`&6>;z2|er&qB{26Ca0n zcPzwT!$(-aB|9hM$j%=oL*JMz{jHn!Ivek9Eg5@Dvi7sc+?T?;n_>+Qe8W3wO(wq& z`ktK?T39I){Zw_Q1f&1WBg0?RC{Kodtf5gnPi|!V-#sdl1yks^;@u6W15ij8Ac0Q6 zEWA61Yt{9mN47MfGmwY=Xn%CSSS~EBQFIDAqfefqV~}+5sW|7wOD_6lGdc(}e)Hj{ z*U^D^cV<6r^cY=*Z;hA*{i5aBysIMftn~xB=zY2k#s99Puj0?hEQd(>hwj6CIuOq3 zRn>hzS>$TIYC%ULo~}e)yt^uXN%Ec!Me$@8-GVMVFr=1#e`~D2BDYH}or`vKFY3UM1t@+7r%cx+6oMnd^|(?DxRCv;JkRljxZ2q-%2NZM5uL zd|jgGpj4-ea)VAv&Ccd}cE>3>MOS4yot4dWR~$C8Ll5unQnHWEr`vLhj!UHJA~EQa zEPd&|7>};5S|!|44MK8K}pnwfSPoh&IwCvT0UPi!x4# zFCC)`bdAQ*IoccLs15DdLr51Xf=-eP-K3(EVd6MzrQD&j)P(L*Rc77ob>UKPG}1rl zHtobGv4Rh8Vq%~_EIj_-f$B>aDuGVarparhH{P8OU8%q5Of923H6Srq+jMo-&NJRh z9Nnr*zdKgn)1=-FG`$zT>WPn&LKkavH+pv(D^_&08q(G3L}%+4-ksvz9W1J>`{;DN zrrRYWmrBU)+cJpGSBp%}csgJo{VM3rZ^z{_-LQUi#8%Q3yFIoVdnNHi>5w&}OLmJ+ zSs32kp>^4SKSyXap|t4)7A5F4APf$_f9*O zSL15OWHFt-hIIRW(D57F&PgkbW#*y#casj_d-(9R@a~L0uaaYQ1l!RS9Kz4O=dFhp z4lvZ$%QpSLQ~03V7Wwc;G2J!RQ_cC^KhixcK?kwa_Y_%!cb7&tQ691zkgj5%_QC3s z!hL}b<3+lR-RU$=ia9QI3d-wOyt@u~cX{yPjjP=ekNBnHLnrbk-N;VGthC%PH0x8G z@i`XSdiHrSp-Xw9LadZ|dmomNy_V<;H~$&hXPI(p-I>hd<3%!rPG%6@%qi~9+8^(3 z2A$0=bT|K{!};DJOb+_3k&Sda%S8Oghu;{?@79Q&rgT6b@||2xCv?FGBMr4?9>lvF zO=t8LeE6c{D`nU|-d}V|XVNW=r(=4?h5Or4{2IEa?dYIB!n+$XlD+3<71fW9s^Z&rjc^!A47jYLl>3$n|dW%0Z+>yS-UFlWana*Z*7j}0q?^hdfmOIt|?N&$K zbP?U=2846xdK9y}&2fh?Hb<53;q1HSXM_=Vv>WNe$IRJ*emz5Rojcsk+~rPkph?2) zZrfKIc^X@e0PcKG=t?T=R^e`X_yuioGCPIlx12@h?s_G6*b_4x zMfD?RN4VQw*Fi;exa*$9>~5P!CKTQ_V?TG{|J#X=GIo)JdG&b7UHN=|cE@seeps2B zEcxj!Uk(JxK~1bif<3eere4iPA+x)T zxP8#trXcY&3}*7IyL-5^=>N<6L>dPk?L*L!=D~{3>hd|#LpCwHdqEQcv=JO61v$*_ zuG38DMLR)@6qj?cY`{IxuvknKITWMnWDa65* zMn;LobvlctGPR?>bYgaAxr}paU#}u|awPigiNm($wOX|Q~8d@AHr4Z`O z?kp}PXj&hoznuik?$*!%QKkiQfhLFz zv%5nJvvHAD$Y`1&j(mr)Dy1P(SA!!oMJB|Dz=S^hEho;Y9rctg9r>O>gJcgal2Z31 zq?U%kmqy9VbuHyHv%8e50dm6KO2TNF%%*Abl(xw-wK6>GVJ456-L2T>C%QCHyqd0H z(Txb4TS@DcM#>vnDM53&Pk+u^=F(89T66}LG$xkcPr+&HkC;Mh&C=jkj}_;frK3H+Z?s#!P5+G-v|Rq-eMV^Y0l3q6aijI}jc46>rM5Ej zWHqvB!Pq_EeF%N{g09+<80;lAdfX4A9n+eIOt?=XhWrtZk+fwLW9g~Un3&D%ZnnQA zKL_vPdFFV=XNr4ZOe*+C;ON-Dxqq+fB2mn0Ao`vpcmGsR)Xx!h70AUd--3 z(mHCZuPO}_b5KD8sfHF(JxwG7zjk8DuWj`r7wJbc$%l4QH)eNxe{Y0TH z<1Z~>E`Bth_Wfr+(ST~Oxr=L^EoB|EyGb;n&eDon`o{(IL^y8JkorbTs)44|CuVo= z{Jo?xKwGBLoEppQ&N(m*9d1;k1x>1Pyd$%qQRPv^InNgdaG%-TCmL2t^x;2#Y$uI& ze0QXA)g|pG*3i7V&Fro)<_NS#s!MB{SjN1gTNuVUHT92Bp_$c^c9t&p0O zC$zOD(%3ppYiskuZP<8^b80P1u$~rI6SKQNnBBDvri)Cg>kZ8=-RZH|=fzp%Kr30# z?9Q0kT`G;QgsJE8;Yt{$(*7Dw1I&#U*q_Yq_8jmOrB+%ph*sFSO(EDkV<)urXrj{+ zTd&4@TiRmht}BVz<6Vbt+RDA0#D?)?zD?(E2G8OW|wA6ZSCt$z&rl}2)g0m-PQ z-S&)z+tI1)W@+Uq_i4MO(0B{towI5BE==wJ9d$I|w)0N94^6mVS&B0E>weVp8UK)G z+y~lmaXs71PG)xsH03OG zXys{5c?@+sYw0kE&kAODt7+<;y}()Ix=_sNoq`QC_kPmeYx?g#V=FsAX!4Dt%~#4f zwR>-NLpSONcGK?5VwNzBmY?EQ&LVHz2Pb9?N7^62CYpbvzH+y^iF*sQ0JlE4c^DnB9ZM z?-)cKiGscoyO4^_WFNCTRc3cPT#UtoreO_j!+aWtgEl?IHGS@6FoWr{p5AwZu4p&r zBR`3E)?>I!ODl1HGmQymccwM$@1&)en|~NfnDOMfr{nnTdQ7Fc_@BKviUwnKDcy0q zOw43PG?7MQ-ewgsj^~{~7Jk({Q2DX{YKfE1BK>aJH3eX3vpH%k#Ek1VU(g-uhrDLlpgF0JA&mbLL`4 z1N7L2XqcGAK-2IJnrVc#9~&Sq4siCyz)&925FNUb_B~C}FSlP{c7?69qcu8-=I9LC zqes@BLQiIQ%W0B^&?X&3qcoD)-KG<6a)x&4h`k5t%d&^?Nd|t7Xhtgy5jsJ-sE%OqxZ$~03|oj*Z)#CF(i2}SX)d~8+Jlh4fVF3^~8 z5F6Q5`xKfqSl`fMollc>>USe?p6Vwb&ivP#W+vl)l;PtY&T>77fgerR1NoNHgT_RH z&;e93kbNVHa7{TJb*B<>vzfEVSL`I4*OK9KDstASYv;y`w>Pswb+*;15sSe^@-)$N*v~yi(=+02Pij-z!dB*H+ z60^H8h3rD2xjUv^6n3Y@q6017KWXxI`w}4SnBCp1ZXu0p&S4hq-tDK7VC?n=v-Y!l zp0;ldjo(eQew`hTqc^iV4dq>E>GTcOG=X#ZPQQ0Xd*Mc}*ym*Zza4C{Z$DJ$DauQl z!vERA6$$LgIR72zE4a%+d$W?vyhT|+G9EFz>+O3MfwYWG zmive1(pr`)*@oxPW)hY*P3?p zM;g-oqCBOlJAbBWOXty;w(#Yi%Tf02X`V;_PAz32E$SA`?!0DN$&elQQBJG+?~ze> zK)ZUerkT88cGqygPt=YY%O)DvH8m0F(sLV>=03r28rY4@?kdW?u>&hb9=042s{6B2R-BNnE{%u6PfK~x5^c6JG(+05f zlG&Xft@J@O)5k4!krdu%g#O)5YK}A^pSJp6JnMck&cQ$TR5%ym#vLH$FaeITQ{@XT z_D;n;u~d&}Lt&$xIIqsJLa5wp9?G$t&UTFAF$ zw_)`v8jESfPoodNL(@WLI0lFwv%7Ly@-OJa|NSH!$_BK9Pd&$2n)9QW-Dxws%Sg}> zFPij|ncdaUsQ1yRLadgfbbQFY1{(Gb^x-p$algt!a0xcF`g57xEu#;gz1&;RZSAO8Nxa12tWFT#$1$LtFD!tCz-U-b1?YSY^}iDUO@4X|6_bk}O6KjIDyv%4$o z9`K+K|4;BidN1_U*iF#DjskD`@PG3@Bjl~K)VtJU5W5UidDcDSaRf(<)ny*L4pjO6 zRz)A)BkChQ>u$$QW_Kf2pFse9_@~V744K`1ziKVM@1LOc(ruW1Gy>O>uF#vHnaQpN zeP(yFmYcDcpbVqeL?MQq3`z9i2Q^#DT4r}K>})7tcf(Zr@T*^k;eBHwg4ylxh8+)c zgL&31;||$)EeT`?ge|i>3;OW3>uc~iy{)X}d+$qjMqHo||NCqyPsxWW#K70m8ZN{%AUM?)&uQIX8-E`dIL)`<6b+mH+s!!YRfDufw* z_{pt(#D&>iXLeq+W%tEX`tTpRM`BDkefZTT;`zf*HqnP)_^&BD6tCmqlSpi0m&PYN zM7wWRGDR^^Y?$4dvUB5C%2^Cv6o!)KB$#x5i`nevc)|OI;7*)V>#8aF!;T??9Ug1w z!`ol@hDo!WB#!qLpV{@%Odozu@Bd|YeMe>C8#_TX>BCpIptUoB9nkCyna1uAZO*AF z&p3vd!J6X7ZV@})mt@k1uWa)ct!t97b{Ws{>>_DPAO2c0zcyxfqu5ollAR?_>BA>i zMB>t>>rmThDuvAMo;~xEr}It3e9={eNhHp*1LfM^kMZtpEAj3^E3Or-ICiCUD=k1@ zW_MpTQc%Y(m8tAh>B6(_$%7tp#Zf~J9Y2b$%>~Lf#2@V|KH70&5jl|cC}o4 zx(}cEeG9(Ly-9Yty!}iAmD!zFN?ZApT`zv@e92{YSKjgjrq9umY3zhaVs_{H%Z^Ta z1ud{d7=Gm6Ejwho(TCU8C37jb(Vc*-YB8lg5bIUBL`v zxxMxZmSjiZ4|dlyu)}8IeQWv1?C$C=Z|>pf$v^brXB=WbvE_E`WCup0q28g`x`cCT zd+E`$gJ~GEyHfh_H9MXoS}g&mUWVf&JDY-E8;b3FUx~L5ka11s@^RTMETRwJ;!`ww z#ob2tjTUl%*_}DFyP-o2W%lMWHCR#es*_}HG_ZQ?S*uh zexofs;V?d~W4AMX`0dQ@*0gUU!;XH!&+XgU%@GO*-+cTo&=tR_-V)F3PMcj?_m@0F zZL4_PoE`!D9hdR^i;;Y1cGsTSU05^E7z=J-&4VZ$-^ZQFJ$I44+EOksyL(GxLb@7A z;q@X+v<=6~6^Q_S_&=iUB-V=EmFxgZWfzz~efSV&cb*&4aBlQhWUw==GtasaLF|{w zQ06(GJqyf9w$O)H)TBvk>MFx|?r6advihXUGO$oup4b3JJM|F!)G30{3dwH}$Ni5g}_g^R6s+~ORwo??1%dXF&e{cR!vE&N4^+1-(!CgRQxyxtL!xYjWaM{hjD zG)Ci>K_;Yq=&FH|WFH3~DXI{>$#zx%cvvhL~*4#e@DK@TlE| zgU`NWFFX2{^UuENJJ{{Qdtn`)4acYT%Q@%;Ax-}M*6&OdW@|Gll<3AJUM zvko{8H+BR5O&{Kr*{7QS6w0#$Yxis3e#t$d6`b{o!RcDLxIx7^p#r`cS9 zJLRG19+3hg`taXe9VC_6-M{QgoS3XGr|84)VRrZ3Zx3fLe_{l?6}RwbZb2aLtUD|4 zy?GyM_%rDpb^vQ;^Ulq}P4a&7o{3$JpZk5q$HFu`VRmQo`UGaP({Z!8mn>m+=T%@Q zuPQjJxhN4it(oJ@Ey6y11Mc$q$~ksK-uTy2;x+G~XITuoK8=F;x*JGQHJ1ivcf%+6 z$!2ym4A&S6T2K_BOQiW3V8c&%v#?FjB=r+zkVyf1|J5V@GHoc7>WSyHj`+kD1=j z5y&plE6nb$(1+jlL`O7APs5qn-AZJQt9*6ko^!EVwU>?plKAAS|HyNdgJ zp_I`G1+{kn@5A@I;X-4*0fFo~jbL`yfj<1O!>Uq0+FdO88IjIzRC9Kuru?%5Png}^ z9hirAZn~U*ps&m9uIY=7ShHhw54%=}wvK?Z-z6+`W%nSnyI^K_-`AMaxVj15zj)SV zXY0XjcK`*x^N$Y@duDg#>~`(Tj@Kz;=!#rn_w}fk*vt;tKbhU-(}&-AUR&IaPhlOi zyMI5X;%-1SerGz0E3>-*cFHyfs>rNv2NBBbuE8r4wI7;M!4BI0x@dbCxk&zrdZgH- zqk-AoQ2OwFZmP+;dUrX+?C#M;2cFkHV$h)N&`9Mw3OjFKbl0OFNSle--Eb8f`7-k< z4B3%8mp;6i{S_?qH2XIkDJa2;hz^gwU3+qvKY%;>|n`5GW&kN8LoyOa;JQ#q7A{H3!=?D2YotN(Q_ zkLF#?AD)u6m0isF#}LBo&TP;wxSafkgpTy#ncWRxhx3m*MX_L~^Gn|A`YLlyja|?C z+4=0xv+$A_C)wil9YySfHXp;g&+*6D^F<%t!BehH=6pOmq!a1GZ}m*(EKwM4?K_J@ z?3x}$AHD;#yI!qZNh^-RY?Xvum8))dT-Ek@Zzqv3XJ?E~Ljn z@8Ki7p$}ih?Cw&Kk1R>lmk}Kc@pp?bR9#L+OyxW5Vpn#`diw43;XlpPkU8eL@MU&) zl-=6v=)>D7I?DiNcLUhHt;r7Ve)Qp8nBB>eY!tAgdmX#FHTe!xc&)AQvH|hz^1jaO zPJuprkqhV4a(QRnF-Q)x`};0^czw4dOezb*EOvwU+|W{vYWs>EvpWqpEBR!09|r6a zSEdjDQ?m?dz071DyT)&^b6n`dm(^dv;xg`5vy(jN#}jOz55Jq)offmZ5O$Y8V261> zW_P0>rXb$_Bc8G2`~%!&W}tB;^gCj<*0?!l*^dR#g0%$`-+Ag|hqBD>R< z(1+i|`wZvW18{0omOkuQFXLI)Q?aeA%dJM5OB#}y-RaSX&+e`*Tj0f=Ztl{svwb9e z_;RmAICTw2Dm&dD#~H|LW_O?0vIE4|QkJUS#ddbUH?a#oK=&r5_BNM7W_NGc6@QLC zyrK4GBzK}wK7mHBMLAy3hnFnwn{@VOm#nUIKb(h&%_V0Ms;2|TKA)^-1!CG(ucpy z?9RT>OHK^ZkxSoBA(6&}ssVdQ;;S$Ojxv(j-R&1{;;u_Gb9@fYFuNPHCW8)L69)RX z6El8ofeT&u@BP9%`tZw{-Hlc{jG4h|lAh%*foJH;k82}WJ5*x)%k2oA7mC~M@-b$a zu5|9nSuSRGsw-{8pFaFw!{XuFGXn08m$B@SkvwB|=fmu7a7S}FRDA!)+^RYV5|O4}bY8 z-GvF+xXbKrLeKqBqz_-S-Br#ryPHx+i(*6_3hBcaGrQ~Rk&C0tH6;EKtvF_P;YWDK zTKbOpQ8IoryE~mcIknCiyD)t4cl6)kB!_t?ZA~9ualfK`soIYwKI6xq%Hrq0k`(r8 zFPoX&Wq3M?huJqwp${L<>~8bzV;G;LDX(XE$~T^MH{7+Cg~@NQ=tvUgYzxE3@N>w| zZy_Zo+4IEgEb<7WxfEZF5oAO6hr z$H=m`mP}@MUcbF1g+6@hxdJqO4@IxODHx;p0rTm@cL)!XV_Vn(@?2fw1{}qDW_P;p zccU@(2WrMT%SmQ;w)ElEH!Dcj_509)*CzM=+SA+0&WB$SOdozW zv%7#5C-C{WmKgl>q|Fs1Uux|nd`1N{k0wGxJsg+l!(T6LDM~KB(wo`cyX8RyjG`>Dg|!0b-Ji|)+*dK|WPkz>q&Y(qG& zThh$?giM4oBT_$n5J&05dnUL^?R(zQZFiLO$zRZyKKzW2+@)!M3O={ArPW$5Ddn8n zbA3Be#7n%(OTe`?;jpI&Zb6uy|5S6{O6JWhcJRZe3`$y zBrv;MbiIu{9P=58^x@a}hGNB=(=h&|BdTiNV#DliypyeH8PmMD9*;9h5qRG261^Nl z(NLnxr57L*3(aL){Y@-gAC1xGF-SGJgWL-ia+KMf1G76Wic5s=MJ!vv&xZ>MIC1|4 z6yDg%I%aqOuJDqT^x==MI)$r0La=ssD!=wGI7J`+B=0k39e0z9t5xL0?t}Qmv#x)6 z2D^Tn@mEwkiRSmsn?8IC<`eAc!{;%( zdzhY&xypJ{vdLSr?jIFKm7U$ z^kH_Fe@h=wa}O8%09B0+1`;uL*&OLbGx zYfI;p+1=rjG==EHH*TdJJTDG$!yX|fvz4SUyDRzAM|wZempj>o?8XSg82a#kO`BRI$2$ogxpeq+ z&cn4!OX*Ne`<}+c#_3jaDCs^{O^n5gQBfH7yA*-iX0-eKWeBsofWyXOuTp~QuMxP= zYa323c!IhFYne1BP;{8xT`ks=%ok^{_Hrn8(udDY`hfEj97G{CNT~CP-4JzI{PGB9 znQ(S;%O3a~Vy9vOf7W=PF;17~_?q9azMF|4o^|u$4`ApCW!d|Oo7`e{H-|pFDZXOm zf;9MF=DX#9lekJBet0V{QDSzNyTMLUN4&ys|3ow}yKDKb2pGb?yK(ebnBCRPvlQb^ zccGmcgJtT`SWX{4OUGP_ncYq4^_8- zKUC%W26ypbcGr$R{P^NZbaUK+;L)K-Kbwbe`tbVB-m;n5UDXxFmkmeY(yN#ioMWhe^`)@iz zDv4O|_9dndvy(2IQ)~FsOJ**oBeee{f|=d@@JYkYp4HHL;wW30-DOR7lYkm!xgK;7 zBbeRo3duy((%+a#AAS(;Gq@bfz5ROVm+nDNW_PQ+k08TTU6$K;$n|tuKJ?*-gngu` zza4Xqg<{^iGq9!)e`24v99YgB-4bhA`T7ac=)=#*jey&ZD;TwuW@aLL(&@uzFEx|U z>1FU=9mRPu&M?r2S5)9xm)YHQb>{^qdv(EF8)3(W4k`WB*bm%ao#&{G4g zoky)?Y4IaWr4L`zgY&vkSMk?c<6rdrxB`XR)ai&$y0rkGYel8pn?Khq(LQN}e#g z8yx2&e$!e=Y2sNVEDS@mLNY#feT%QAt)&gmx}Ak{YF#v?eDE>!W_GuqK75PI-*Dz1 zC&}V{#d+iQqIyVCKK97MQJ!^gj>$rmf|7XChkwlMZuNL48M)^hx^&-#8O-ip&N_zm zLp8;#r>8s_Nekf-=RoY=VpMe!IxY{x1*>yVqYs~u;v*-T0wnjXmE6yIfJOA-kG_sX z$B^r|w#iif9^x+n%Q^6pR4f$gFKxjo8Om$sji|Et{_7n z??VH>Z#VB|V~4%6Xntugk<9LHC$*JX6Ki0rseumKrjF_h-WA1xOA5VG#^xBv$t~ZZfhBKY&DZ}%BDPXd`WLV0htrRar@Qv4Ndex7=lVKKJ=u+sJ~PPtZx(j*ralyaV#_?yRmDPxBUUW_NSx z!&@$VhDlcOxHuyMt9M?;Vfygt1N&U(~P?qz1C9xNaQ)5 z+1=zW2C}N82r)L{G&B;SNgw`ZjGer;;{1gXJ7KG}qz8R?6J~c^Hl<+Ai60Nw{NQ7^`h8#sxK4aONW=A8b&P2}6zlNZ2 zJ_iwp(RUuA#XqgY*q%=6-#(&b-9nC7o`n;$yO(Q|kwG7R!I0LHFpsm1Kl%52LW46` zxmd&OZp`;xNPbB(v(!l<_-7yS&Q-L=D@tYTeq7<#R%?`v=lzvLX<>VLrrAOK6nIy& zwH6n@?!+`^cWXNwhs_I38B8Dk#z4-X@30p){Ws`2FA2|?-91e{hnn9lc*dr0#O!Y8 zY%3|Scz`K!>;UnJ!u9o~cuXI@>rCz~T;NVdwXv*PQ39p!5hyU?-hB0A1dXwl$IR|h zUwBK6hQ2)SU%3qPkSmhabW0&NFZiLR$aCzDQ?@ z?j}YDVal42)oQ zx3WbJ_D8G8+_P@dgW27{>6~Gp4{sBdioT7s^OH`Yzp0L_qz^xb*1|L3^rXG@=!GYs_Mvmfb*%GJ|I^9aSi#XHcEKK!CbZN#71-LLlU^6)QJ zvA>doLCo$tJlc!e{f+3P-cB~~8LwUD!abk{yq&!l=a}79CmllHqpG4k)?Gf81c`4V z|2wxTkv?+=ru7QNvmx~1J#=M^BWPke|R*F4R)L)-9fD)JK>n!O^)W*Uuqy~g%=Pxn)A9P37p@12{UWX>#EQO zrVpQcR$KfGPC=d7-MY$DoS_dtb-$wwVRmN~PM<$OMXq-_h*`YP81I{jcb_?nT<||1 zzRAEvt{$z&uvX~^Qe|G;?g%>FR+ETd?y{EI-NXwH(kk&IrVrSTbY^$IjLzV4Pd%~9 z@Rs7O%=wjgXFcsH!VYf3O#1Lk+FZdOfyR7yqz}*RPI)zZ#@>{{abFZFHDhrk>mJJK z!=Gh#r^oE>)tQ#E;>CGbGP~PxJ`r}|6{u0P7d!7DNu&?|XS$Z`9e)C0%IGX{fWf&;5alJG*g>+1(TR z@RPo(OIcIPadQ~RbU@dL6@ z&a>_bRnDp9Dat*o|M~E?(e#Ggz9TVa7Zx(RbEXf!O1?tzR;a> z!sElxoP8FN3tEU1jfpF|+#9!RB@102Vti99M$v~qqR;)o9;ULJ*QFWkwv%K6%7G@K!R~o4b~QJ`x+RFMffAI7lD9s3aLPtKY%;Y-@SK?CuJE_@C1? zWQR#EUi*dMZOm@WYplaUC1)AW?5r`#jIL?@JI-SJo3 z%Al$m&av)G~Q=<~_hdX(rv5!f{`9=)y0@h^S&G-h`_J9&%ZbX^HLn1|amCKPMg z^E8%c-GAE1h>Jn8jz0Xhc50j(K7D-5uZx!>7^lpD>iwrET;=Uj4c3ub!!Y-4t3N*~_crWkt6 z?k@IEKy|U^!rhvVv130Jl^%P8)-KiL*LM^!p1>z{1nBC<)=R3bPXOYL` zAfDOX;dL2!S=@w(fOcZcudQ~zi%fs~3n}#B4>P+nRy~aI!rhx}cTr<@H*{TK;Adl$g-7IATv%89szM|EW9UvVqU}wj0xOYiJ$lO;jp${L$>`sf> zovp97tP4JgCCu)+2B$&$0Xsl+xs$={?g@Q(`N;j_CkK$h?5_A#ChAlaWW;A0r_Ant z9(R@r2|uB{d=Ex2yX&2J1hr$->GpcaPG)x-w>rq8-yhKVatelD3&kw@@TIYOQgqo{ zs%8a>T?F3)M?b;pdE2nzK?HW2m7rC=vGhOeCnn79b_beC$B(5*8y1C9`tX&3_t8Jt zN|uyyKk5@_x#+{U9e*BUF0(HvI0<#BuhEe{d=<016Iq_p+FVOEMjgkujob;Gy%R5u zYvKH`t>mk9kWcjC%dRNNud&&<%Ixl0@BO^zQ^bWU{9VPED>|`19>*L`hU5&os5Bl(x%_Vc5_if+sw>&6z28b6(1$N6j>SuzD8y-$qSs~an@snYl*gRc zbvF^8@9ecQkAydU_<#O+%>Ei{F&xbIn`Uo0K_7l)K>>y^yKB>*#)RqzRMLn4m)YH+ z&FlbqrYQI+rmUB)|0F|&kE^x^e3D#)!h`_PZs-7~KP_?JHX?%MWZ z-^4q(scq%t-LG(=5C4SOU7OV>@bsjXsMK@+h}qrb8ap{Zr2<0^C*mHnJ7xOt`);(9 zOU&-BXYlOG?w|wSD?)7?lyh+qVT6s zH?)z$@IZ-L>n)$@!@vAaV-P>Q2Gj{kxl^x@w!yYus4 z2j?Aa(OBmti<#Z^*RzwrpqG%-yw6x0j#>2KGoKsCU1oQYqc}r;+Cu8-!@G2dL7R!u zP}aYNhgZy{C5?$4M(hBwG?KHXmk`(#j??$z;dzwL_+}d#Y25!+@RqgDb)<`L9v=O{ zz46sMFk*C;Ri#(kLwLfo?&M8-@w-(c&OfFH%IDxK`tTO(8u`w@ z7uMxl(JAN<2GWOr9l(7(W_Rk>+DPr+pRtQRypCTe*1kWD-<3M@f!SRV=hUt`+Dh-1 z&)IDmj|^q@uh55I_SsMhncev_yURRdF4}c;k=C)R)qhZYNv$efvNko}WB0nw^e}M*12hhtZ2Z{G@yC@++5j zpFbR=s~zw6hi&H!Stvec=Ho9_J;~TiM~HK3pE|HFvGOTuE4JZYcmzD@!)w$V$;N5y z-pXeVRA45fs>=|4i)QBI7|e6I2i>!l(m9KHEVH{y%`L@sND;i0!ZGPgBBrWUpvyHo z>FGq{yv9>H(ubc~asp3|hH$Sv4SlM=Vmy8L54_J9*}_fYMllz$J@EfN{DUt0P(N5f zrgwFf3H*Ii`pa1^H~hc>`#t!hbqH3BJBoSq;YL z;U}}ZU(LD5qz`YXL~nU^kaX?JIkn?&;YT0-kDg&j?Qj+i9xWtyg%5Y_17+-}R`i`8 z;;MTbZsbM6RrNY5G)(0nv%3{q?D9KqB3r*-Mdg-A7+v0Ecuac-hmh7%&+KmF9a={8;Y*g~q8GEf#;&_jH@6Nmo0&KAKBLPS zS7~XkC{qL3)5^1MXB&Qw#Vd(gD!*n`?g`O{@4vJbgVT5N{w@Sx(vRaRefW+sp7J-d zyCsu(XT9My(md%RGrP;uKM$?0Ek*e&zi-U$9!|9q-T3>knGlPeqode|SB54XGcj1p z9@=a_nVDlOg$gC8s*XU{K77V6d4hx_Ybj-R=kv^4-dxa=uxDrR{1S71`tVUZJ|Jk4 zg9I_Vs|xlISNiZd)#)b?XA8(_GG}t-FiU+qsjW5e-H9@YD6oW%zx6Io^{_ZMuaRHyv>FyFmAKt0=VVKoy z#jS_w@U8oWPxRsU@;*cVFP`JGn(%vG1~Qo4b+F9As7Jidc;_ago&`w}efZmrRXE>0 z6*>Q9cWS3Gh(3J#40idwr(LzfRy5kNtCK$b%{}2*a*Z9HlMKbDl#ULKi35u)RQq7SdHYay%J@~-2huXrUIN*DU@k&nUw;{-UIdycJh*#W{iHGY`N%~<}w zMxDmr%q4NKX|>~8X6 zUpcVVKq~0NJ2AW4ua=08A77$=xSecccBjkiZsk&Kan3l2OLw_{MIU}^-)hWx>L@wP z?!qSVtXr)tt^o%T%C2q@I9C{)K)l)N$Yx)UfqLGW_O+ajv&QcT{hTy zNL)IfMfBkpZD9w9)^_L}XYb9%Gf<`vAG6UV+PU zV|mEz?$v)j{1WcnO({dmRZ-~Mfv$_&eH>D>lBER!qQ>k_d1*_jqYq#8Dh#6)lJLaz zHR=NGMT2ME)7L$vY&4(oKiKob>~1i9`0pV#$j)plXPDh37P9AlX^LE}8d+yw7NE?JOr=)M4EE-LT=DTF{N7*chN8?@xG$C9}H}`tWsw-Xq{=GUgeD zVfcVTl+%aTZQ~;YnBCPqXeC$AKf+h~@J&4<;TLxm_Ulb(Ecr5P!p}A67CzDNr`X`pEDpEhLRT{IdmN7^#$uvpwHpfq840!R)RZIH%T0QFPHa&vBA-%s zb=`#$W_Ja1j$zSoO=;iDQ{tH2rO}7~!}={^tCH}O*-jR`Mj$>aBB z>l9Zp2vv~s2XvoKZe_-wjd%zCjM9gn%lnK4iJaFRTLTSU&fqb-J7RhQJr`)n;0K=4 z>_O*&KKzc)74Z9TBR==9?}o%@y&ukS0C%r-tlMPdA#aq`CCii@Ak6Omq7Q$oe?9ut(9huS)3RgiOutP}ZGI*eF}v$G>>yru zQ;|yg@P<`E^7*=>_}f*(xtyJwG$s@-pG0|tHl1qv=FIL44e2DFdkOn*30OTb92WO3 zK%G8(`YK;}GMlddcMEA;eFr8I1LJ^btc<>i`JtTeVs>|q#zfdCBY72m2?ebp5V#{A z#Tn(OF1C?grvjz5t+)KuT31%lhY$Ue4tU@W#8p;e$nG{0$g{4;GIud}t;*gs`tZ!| z`qGCtjA?*c4)3h_9Q>!*MOsHR;*t?})|lNXJ;}l0DXNl0A3lrO-L9rKa_jYHY&^+( zT^bXSD^J5@zmE95^OEg1c-E#5KRf<8{!~uDh6)-=K?SZ#>2*R3)KcHz{3JRIsHUFle^^oVBx3mRE(+Z~#KcGMN?t->)4vZZjaV0q2 zXe{;2?iMZcm)Z5E(qm>RXI`Q(c3v!|72d}`%dF%uv%6**6GPlvib3T$Y|ait#>ym^ z(ube5*j}QT-L2Z}DGP7&xkVrTFJ^ZS@^|7^dM!#cog|TG-LBE?<=q-3QBlrDBH#I! z?WMWn#*VlWS5f78G5(^HyrU0)%yl=uF}n*)&c(iq8lw5zLmZghZCuK;Zty$UEJ{YJ z?qS${tq^^y^hIHukNmLaz6pK!Wgd@ka6%jk;v&%$at(`=O=WpYe<`x{7rXB!(tpl1 zIMavUFLBU#@DL|wvj>&g-2(dXwpJ}J)|}>yxpPKK%Egyw_!Rr}D!?8joqn zjP<#2Vs;nyV;4f+(9FEyB)j-$Z}*mG-LbsSh~CegcJ36KX5-O7C2?8Qo-S_(dEDGq zI?;#U^MyMZ%g5ovMt*Y-tG;zC|F&g!ccp$9Ou{TDp!8 zl;Y>!qN}bi=lc|3%d=1@$E2V%`U5_?JJ8Dsk{J^`WLdnrIMIi1!R+pz4tub)?N8K2 z{m+Nj@^_UM!R#n#yAQ3-@MkibKb!NFrNW9mo9ZaJ8ed=;k&0`~?(S|sg&=bsnL;1ljM-h=d$zLc@C$ga zWXHv>aO4fYi2L@2vNo4pcD(}R-7*W#Y~RKz9nK<0Mq_f+EtIKSNOvRNeg5(l*GGo3 zFO_>7Ps8E!G#;OWo?}XYt*l{o*Fa<9+yfnHzQkuQv%7_tcfd}Ccg&C5$RuWWZ(Z5f zG+b2{73aW#_Ze|d_Tv4)My%8LpATPh-9`Ls8W25eFTOIn%b*Y6<(R5ik8_tiW_QK$ z+&?a@#KGx1P{-`<-S9l*d+AE2w)Ekd-IZ3_Na~hyEQyMTJAL>A9WTMc!$>As`H3B~ zyOk@=CHLJ;qz;S5zrAAE1%C&b%9b)<0`1@^Uul2MK+fe|z{yeJ{N5(Oaneg<+SrML zDxXvI;V%_vOR)YaguMwtXcap^I#r`A%TbmxyHf~r6J0+QaqV;veR-d8**_DHzc%B~ z!gex|pUt@~UF2~N=hQ6Iu}qbjDt&n0yK0iuKo6AJUFCTPLEJ~I>9-w{#(c}{403ww zNgcDham?=iucUMF>-q5hc=y!0m!hj~RO^00rPpzpZOnbSjGvIp%q4S~jah_Y>87HR zgeWSKicz_SD9ttLCb=u=s=37X^?U3e=<(?G{+#!DzRvSyrf4Oh^J<}*%~>}$3Y89} zF!b*&c453fPG>jfAAQ8Rk?)h~G5Gq6I7~ct4^i~P2V@VH(Kh6gh59nA?gA##4bSv z#XX17y<0(CfBw%8pF7D>u!LUnTL;kBHJr}S6PVheF7IQR!3+13#V>6omwxzNzRAcU zyYpx$M%Qe17Sj*Efb7o3z*5$JRgLO@;&F$5cx|)mNE=}+m&oq4$?nFlH4&4>awICm zVCua%#FgB~68how+Xu_F9B;XESWhOXorf{m-NZ{t(7x~#w;$U`*-Xy5^uu5MQ$w~F z(0CQ>`iefM}6SZV_zP*@JD^e6iOWvk&7A}o;kZh+e zEQ?`ZDA}DV{qS4$+3nolLxz#vjnTH17W(1aU3SAiI0DW&=dkP-?wQH%CX?NDTUkh% zLlwF|#9cUpC1r+rh$p)<+j)S!MBTWm>?kY9?wVux z&VNfmX3-Bnl>3aJby>)%SCZ>jUE~I5U8QRSWWlR;%$d3m`x3%&hkp12u9^~D;30?n z>2z_i5g+>DcRD4ZjqI*|^93kv)0Y7H;ho9uvYgE2uzn?`yT@Sy{qXI26};2xBYVm2 z*2N8!1N6hIc9ddmc@)NO+=;`VYH*Q$_~m4GQ_j+-7RoI0k|OAn-PzP8W1`b5lqcEI zv*Ra@ecWZ(D0PWhlZzE(ckgS{P(eTZoDxTwL3VfdAsviCpK*wO_|x2H%>6eTeH2t= z%ThY8IP3oOslViAwbCn}ig2>K55=djbu>LfLz(5$X69%#eQNZ>hq)!<*2qW-f*JD*wPtDZ81?C$2=e)5QZ_>)5tPLbJGhi@>u4Cm8y%TGwaD=j=>-QS)xb#=1SoC%bF&D1=+6HnTQfvWV>N<{EMV z`r*AN?SjeZC^R}>g+(5Fw&|EKBD=dkm_02o|3UB1G5DT-_^VEL@$_rQ)5k4v_0)cSZip z$Gl(`xhfk`WOtrVGw?=DL1J3Ilv=X8tb9jV7WXeo=!Z`xyVFh1#mm|1;{Ua~{781E zwarcvKD@;2%gK00cDI#&_!B#IB)QZ}{v^8#i?Ebr`r$t=qhGcv3hL(9&{Wi0y25A?M_63pMbtPNKG5-h9+7%D`Wqc0mm*J}tV~M68{@TDn;^S>1KfPoJ-ic^@M?bv5 zLdwIxx zMn{XAY$;NcHF?LG>10M>;6a4fzrzS?C%HrhbcBBR-WNZi+%yAaWOw5Wj$(hLiY!cW zm4jq=car)`XY3mcryoAJiEa_EzoC8a3+W)c>qmBXR?Ave4tzwPN&+@+jKoR$;nzPh zkeDPN(IC4^KSfV)&uujL#v*9m4#XJTLfrMfl0tU((r}7d8%vnqCfRDjx0;TF0#8V zO{ej)h52B0FPTSn=Vjkd{?dMkn$lf}QDH`qe)#ck3}vM<^G@1>rTI)>38f!iDQpLt z%=jF%xC5)Jrcy+9my^lQ0WCxM=*kX|wULOul7Qj$4X}IBPh8*8QM;BsS@gr7{q1ku zcpJ_=*j@}h`376*hwt^2UgJDhx%<0{EK50xY|gqL8u^T${|W!Zekq&ydrPGsKHcv< z$`>DG7MGpf<;MZ~;g?s^2bxD`beEkhuxP@B@0mp=yX(m;gsAJtrfpslPj)xX&q`Lj zsKvrYcH~AyVTWZYs@@xk=TH25BfFbfNG~`2@bhlQps*$uTCVqS_B?ZHWOow+eB{+f zJ_pBN#C%2Wu;_>P(R_>>f?OHYK)~UzFG~a>_mWdG&1Oi|Ez2*H_7he z=M575(0KW!~b|q{se~^Y9?MYc5X0cHgJyIcg)V z-!rdEc6Wk)_%O2q*siA6Ut>S2ygK3R?;uKF_>1$8E_6aGNqG8ye)!m&BWPQ#C_i;N zi#eZ@b@ao3D(FH$9Xp{Xh112FkKOdc8z_57ZU{Nba9g>M`wWZdhwmo4D+Tw4kAC<}O%oaGz)l?vGS__i)Y`A3W@|L;SH7pOL>(@MV4GV|MHwT!( z<{dcw@GabD7!*2-i3zjF1CPM+|8r{AS-6y_Brd5gQbl&>Oh0_^FCFkcun%AU6^_b` z0u<5@?-S=CdSrKR=Gw@p4NvgMEeR*{Bk<7R0=?1ta*gb6|8@FG7nn24&0P5GI9v*% zM@X##=XLr>CfS`L*gU8*1y+aa5BAsbWF^s^^(sQbYyvT5dz5W z=FtzoX#Y#(&9{?Pd;R3;5a!hAhkss^i-z9e=pC5`lT-g<3H|W5xkGE$bQYUepXme6 zz!J{7Ey>v!d4~Ipk*>16gPuG3;lCc(hMCJ!(XW)OVe%99a%g5 z@UQ=E!MU&Y!d-Veo@ks#z$`5>$>3g>?5;eypNtsLKo?vBat=n~Vnqoa%{P?SH+;mE z?5=C2Dg7U}5m>(iKE*rGhkp1{9aB+r;I5;=M`rFalur8LGpiy|Yn(v$*+W>a=qHQF z?xxjx%F`WM;{3yD+%=~!Mt3h74O>v9(_hY!-Az+<6VMNzdgd6q?~xaKq~l%yUHA0E zui>nFa<7w23;qDjKJ2sRth+lS2NfSw#bm3Sl#|_kOFw){aWiJwq+oW?b`)JcgCP3h z?>c!&X&&=VC#=Lvp&mW-!?%*%jXZe;2`h}Glb`A6B`_NpyIb!+@c>o zQqxSde&J`^FFta6tAXsJAAaHwk(k^o5$Y}U2u-t==X&hVn!uV08y+9`}9yVLhg z#fk~7sH`I^CA$lm%UPFx_(-3luqL}(6`g^~u+ONWA0FIi9By=!{#Nf0UYf>!53)bs zT*R2EOO!Ql(+=`ZHo{JJZGHvx;;lW|UD~!Hyo#n%Fw;vOuJsj@E0&^PSA#_Q;e!jJ z(4JO`ck6n~rX=z$vO9;>eIz2d0!`~;aCk@@Lg|N}rED%;WOu{Ve5C)c`f`?jczd$D zK1%GUG<$;e{x%W_z89ByNc{|UKXex0NgVfs^usqswL|mB0I4RsTUpF!0{!r#{?5W2 zvb(IV!&J>Fa39U?U^9QowsDjzwQu1P#=9@FJI&iCu*Q$suv7GBg!zd@o~>Andx7A$ zyJ6Wo0&6Fo#YOtzACcWP1n?cH(n3tnKEOKq;d_ma#{LB66*e1-**p3(=$Nos$@`4C zWL{O#DD6eB=BO$-yfc>)vb(=yz2%pAda^3^9Bj$%>{NCme#|p=x7f-tvb#|M%&CDL zAY=0pPj=Un#(YLaCzkx|Aosbi_{orW##xHeI6M=1+-KCz%|wHWl5F{w?j*9i8$k}T zA-NN8hwXvf#`=1F@1)z<|O2=i-4`|d0e3%-aW-z%0Kds z>9M(NO}mc_`r)5Ei-yCFa!d_3k=VdN@|uo`Kb9KH{YBTYd7P5}) kPLu4eZoDpY48{0{?Cw_``r$XcKf+yDRo literal 0 HcmV?d00001 diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index b248835517a23e..f2ae030e8c3a67 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -1466,6 +1466,8 @@ IE_GPU.matmul_2x2_2x2 IE_GPU.matmul_2x3_3x3 IE_GPU.matmul_3x2_3x3_transpose IE_GPU.matmul_3x2_2x3_transpose +IE_GPU.region_yolo_v2_caffe +IE_GPU.region_yolo_v3_mxnet # Unsupported collapse op with dynamic shape IE_GPU.builder_opset1_collapse_dyn_shape diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index 0070aaab1dd75f..d78518810df042 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -77,6 +77,7 @@ #include "ngraph/runtime/reference/prior_box.hpp" #include "ngraph/runtime/reference/product.hpp" #include "ngraph/runtime/reference/quantize.hpp" +#include "ngraph/runtime/reference/region_yolo.hpp" #include "ngraph/runtime/reference/relu.hpp" #include "ngraph/runtime/reference/reorg_yolo.hpp" #include "ngraph/runtime/reference/replace_slice.hpp" @@ -1187,6 +1188,19 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ break; } + case OP_TYPEID::RegionYolo_v0: + { + const op::RegionYolo* region_yolo = static_cast(&node); + reference::region_yolo(args[0]->get_data_ptr(), + out[0]->get_data_ptr(), + args[0]->get_shape(), + region_yolo->get_num_coords(), + region_yolo->get_num_classes(), + region_yolo->get_num_regions(), + region_yolo->get_do_softmax(), + region_yolo->get_mask()); + break; + } case OP_TYPEID::Relu: { size_t element_count = shape_size(node.get_output_shape(0)); diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index de33cda40beaa4..4cfe6693f17e4b 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -21,6 +21,7 @@ #define ID_SUFFIX(NAME) NAME##_v0 NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) NGRAPH_OP(DetectionOutput, op::v0) +NGRAPH_OP(RegionYolo, op::v0) NGRAPH_OP(ReorgYolo, op::v0) NGRAPH_OP(RNNCell, op::v0) #undef ID_SUFFIX From c5e9ebc05c9362182388223ea84465cd47e5a5ad Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Fri, 16 Oct 2020 06:57:30 +0300 Subject: [PATCH 07/35] [NGRAPH] Fix UNITY build under Windows (#2678) --- .../frontend/onnx_import/include/onnx_import/core/attribute.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/attribute.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/attribute.hpp index d9151cfd22a698..632a829fb64079 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/attribute.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/attribute.hpp @@ -88,7 +88,7 @@ namespace ngraph template inline T get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { - throw error::attribute::UnsupportedType{attribute.type()}; + throw ngraph::onnx_import::error::attribute::UnsupportedType{attribute.type()}; } template <> From 5501f2c66d493b25b416c29492a7df05beb0ebfe Mon Sep 17 00:00:00 2001 From: azhogov Date: Fri, 16 Oct 2020 08:19:46 +0300 Subject: [PATCH 08/35] Set chmod +x for model_zoo_preprocess.sh --- ngraph/python/tests/test_onnx/model_zoo_preprocess.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ngraph/python/tests/test_onnx/model_zoo_preprocess.sh diff --git a/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh b/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh old mode 100644 new mode 100755 From ddd9cf6bf56c6f2808e418fc2b9b6629aead4689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= <4430709+postrational@users.noreply.github.com> Date: Fri, 16 Oct 2020 08:44:03 +0200 Subject: [PATCH 09/35] Workaround for Cython issue on Python 3.8 (#2684) --- .../python/src/openvino/inference_engine/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt index 9ce70b546629d3..2211d632d185a9 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt @@ -32,7 +32,7 @@ endforeach() function(python_disable_deprecated_warnings) disable_deprecated_warnings() - set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx") + set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx" "${CMAKE_CURRENT_BINARY_DIR}/constants.cxx") set_source_files_properties(${pyx_file} PROPERTIES COMPILE_FLAGS ${ie_c_cxx_deprecated}) endfunction() From 73fceedf1d4dc3325d01f5bdeb78ee2444d6537a Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Fri, 16 Oct 2020 10:14:22 +0300 Subject: [PATCH 10/35] [IE CLDNN] Add FP16 axes precision to Interpolate-4 (#2681) JIRA: 40805 --- inference-engine/src/cldnn_engine/cldnn_program.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index b78288bbfda1b1..cda4ab7af4c72d 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -3487,6 +3487,10 @@ void Program::CreateInterpolatePrimitive(cldnn::topology& topology, InferenceEng auto data = constantBlob->buffer().as(); for (size_t i = 0; i < constantBlob->size(); ++i) scales.push_back(data[i]); + } else if (axesPrecision == InferenceEngine::Precision::FP16) { + auto data = static_cast(constantBlob->buffer()); + for (size_t i = 0; i < constantBlob->size(); ++i) + scales.push_back(cldnn::half_to_float(data[i])); } else { THROW_IE_EXCEPTION << layer->name << " Incorrect scales input precision"; } From a4584d77a29f3fa16466207b856bcb41448b7334 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 16 Oct 2020 10:58:38 +0300 Subject: [PATCH 11/35] Added apiValidator post-build checks for WCOS compliance (#2657) * Added apiValidator post-build checks for WCOS compiance * WA for cmake * Additional checks for old WDK version --- cmake/api_validator.cmake | 117 ++++++++++++++++++ cmake/developer_package.cmake | 1 + cmake/uwp.toolchain.cmake | 4 +- inference-engine/src/CMakeLists.txt | 2 +- .../src/gna_plugin/CMakeLists.txt | 26 ++-- .../src/hetero_plugin/CMakeLists.txt | 2 + .../src/inference_engine/CMakeLists.txt | 2 + .../src/legacy_api/CMakeLists.txt | 2 + .../CMakeLists.txt | 2 + .../src/mkldnn_plugin/CMakeLists.txt | 2 + .../src/multi_device/CMakeLists.txt | 4 +- .../src/preprocessing/CMakeLists.txt | 2 + .../src/readers/ir_reader/CMakeLists.txt | 2 + .../src/readers/ir_reader_v7/CMakeLists.txt | 2 + .../src/readers/onnx_reader/CMakeLists.txt | 2 + .../src/transformations/CMakeLists.txt | 2 + ngraph/core/CMakeLists.txt | 4 + 17 files changed, 167 insertions(+), 11 deletions(-) create mode 100644 cmake/api_validator.cmake diff --git a/cmake/api_validator.cmake b/cmake/api_validator.cmake new file mode 100644 index 00000000000000..b465ad17fc0d12 --- /dev/null +++ b/cmake/api_validator.cmake @@ -0,0 +1,117 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +if(WIN32) + set(PROGRAMFILES_ENV "ProgramFiles(X86)") + file(TO_CMAKE_PATH $ENV{${PROGRAMFILES_ENV}} PROGRAMFILES) + set(UWP_SDK_PATH "${PROGRAMFILES}/Windows Kits/10/bin/${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}/x64") + + message(STATUS "Trying to find apivalidator in: ${UWP_SDK_PATH}") + find_host_program(UWP_API_VALIDATOR + NAMES apivalidator + PATHS "${UWP_SDK_PATH}" + DOC "ApiValidator for UWP compliance") + + if(UWP_API_VALIDATOR) + message(STATUS "Found apivalidator: ${UWP_API_VALIDATOR}") + endif() +endif() + +function(_ie_add_api_validator_post_build_step_recursive) + cmake_parse_arguments(API_VALIDATOR "" "TARGET" "" ${ARGN}) + + list(APPEND API_VALIDATOR_TARGETS ${API_VALIDATOR_TARGET}) + set(API_VALIDATOR_TARGETS ${API_VALIDATOR_TARGETS} PARENT_SCOPE) + + get_target_property(IS_IMPORTED ${API_VALIDATOR_TARGET} IMPORTED) + if(IS_IMPORTED) + return() + endif() + + get_target_property(LIBRARY_TYPE ${API_VALIDATOR_TARGET} TYPE) + if(LIBRARY_TYPE STREQUAL "EXECUTABLE" OR LIBRARY_TYPE STREQUAL "SHARED_LIBRARY") + get_target_property(LINKED_LIBRARIES ${API_VALIDATOR_TARGET} LINK_LIBRARIES) + if(LINKED_LIBRARIES) + foreach(ITEM IN LISTS LINKED_LIBRARIES) + if(NOT TARGET ${ITEM}) + continue() + endif() + get_target_property(LIBRARY_TYPE_DEPENDENCY ${ITEM} TYPE) + if(LIBRARY_TYPE_DEPENDENCY STREQUAL "SHARED_LIBRARY") + _ie_add_api_validator_post_build_step_recursive(TARGET ${ITEM}) + endif() + endforeach() + endif() + endif() + + set(API_VALIDATOR_TARGETS ${API_VALIDATOR_TARGETS} PARENT_SCOPE) +endfunction() + +set(VALIDATED_LIBRARIES "" CACHE INTERNAL "") + +function(_ie_add_api_validator_post_build_step) + set(UWP_API_VALIDATOR_APIS "${PROGRAMFILES}/Windows Kits/10/build/universalDDIs/x64/UniversalDDIs.xml") + set(UWP_API_VALIDATOR_EXCLUSION "${UWP_SDK_PATH}/BinaryExclusionlist.xml") + + if(NOT UWP_API_VALIDATOR OR (WINDOWS_STORE OR WINDOWS_PHONE) OR + NOT EXISTS UWP_API_VALIDATOR_APIS OR NOT EXISTS UWP_API_VALIDATOR_EXCLUSION) + return() + endif() + + cmake_parse_arguments(API_VALIDATOR "" "TARGET" "" ${ARGN}) + + if(NOT API_VALIDATOR_TARGET) + message(FATAL_ERROR "RunApiValidator requires TARGET to validate!") + endif() + + if(NOT TARGET ${API_VALIDATOR_TARGET}) + message(FATAL_ERROR "${API_VALIDATOR_TARGET} is not a TARGET in the project tree.") + endif() + + # collect targets + + _ie_add_api_validator_post_build_step_recursive(TARGET ${API_VALIDATOR_TARGET}) + + # remove targets which were tested before + + foreach(item IN LISTS VALIDATED_LIBRARIES) + list(REMOVE_ITEM API_VALIDATOR_TARGETS ${item}) + endforeach() + + list(REMOVE_DUPLICATES API_VALIDATOR_TARGETS) + + if(NOT API_VALIDATOR_TARGETS) + return() + endif() + + # generate rules + + foreach(target IN LISTS API_VALIDATOR_TARGETS) + list(APPEND commands + COMMAND "${UWP_API_VALIDATOR}" + -SupportedApiXmlFiles:${UWP_API_VALIDATOR_APIS} + -BinaryExclusionListXmlFile:${UWP_API_VALIDATOR_EXCLUSION} + -StrictCompliance:TRUE + -DriverPackagePath:$) + endforeach() + + # apply rules + + add_custom_command(TARGET ${API_VALIDATOR_TARGET} POST_BUILD + ${commands} + COMMENT "[apiValidator] Check ${API_VALIDATOR_TARGET} and its dependencies for WCOS compatibility" + VERBATIM) + + # update list of validated libraries + + list(APPEND VALIDATED_LIBRARIES ${API_VALIDATOR_TARGETS}) + set(VALIDATED_LIBRARIES "${VALIDATED_LIBRARIES}" CACHE INTERNAL "" FORCE) +endfunction() + +# +# ie_add_api_validator_post_build_step(TARGET ) +# +macro(ie_add_api_validator_post_build_step) + _ie_add_api_validator_post_build_step(${ARGV}) +endmacro() diff --git a/cmake/developer_package.cmake b/cmake/developer_package.cmake index 8a2178659cbc9f..03e8e11c03ae82 100644 --- a/cmake/developer_package.cmake +++ b/cmake/developer_package.cmake @@ -237,6 +237,7 @@ include(os_flags) include(sanitizer) include(cross_compiled_func) include(faster_build) +include(api_validator) function(set_ci_build_number) set(OpenVINO_MAIN_SOURCE_DIR "${CMAKE_SOURCE_DIR}") diff --git a/cmake/uwp.toolchain.cmake b/cmake/uwp.toolchain.cmake index 3c11d0311dcdc9..26f9b61a488ea9 100644 --- a/cmake/uwp.toolchain.cmake +++ b/cmake/uwp.toolchain.cmake @@ -4,11 +4,11 @@ set(CMAKE_SYSTEM_NAME WindowsStore) -if (NOT DEFINED CMAKE_SYSTEM_VERSION) +if(NOT DEFINED CMAKE_SYSTEM_VERSION) set(CMAKE_SYSTEM_VERSION 10.0) endif() -if (NOT DEFINED CMAKE_SYSTEM_PROCESSOR) +if(NOT DEFINED CMAKE_SYSTEM_PROCESSOR) set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_HOST_SYSTEM_PROCESSOR}) endif() diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt index e6f827715d60f6..65cbcf626f0c78 100644 --- a/inference-engine/src/CMakeLists.txt +++ b/inference-engine/src/CMakeLists.txt @@ -20,7 +20,7 @@ if(ENABLE_VPU) add_subdirectory(vpu) endif() -if (ENABLE_GNA) +if(ENABLE_GNA) add_subdirectory(gna_plugin) endif() diff --git a/inference-engine/src/gna_plugin/CMakeLists.txt b/inference-engine/src/gna_plugin/CMakeLists.txt index ae0cbbd287671a..36fe16a2eacd78 100644 --- a/inference-engine/src/gna_plugin/CMakeLists.txt +++ b/inference-engine/src/gna_plugin/CMakeLists.txt @@ -14,29 +14,40 @@ addVersionDefines(gna_plugin_entry_points.cpp CI_BUILD_NUMBER) find_package(libGNA) -ie_add_plugin(NAME ${TARGET_NAME} - DEVICE_NAME "GNA" - SOURCES ${SOURCES} ${HEADERS}) - if(GNA_LIBRARY_VERSION STREQUAL "GNA2") - SET(GNA_LIBRARY_VERSION_NUMBER 2) + set(GNA_LIBRARY_VERSION_NUMBER 2) else() - SET(GNA_LIBRARY_VERSION_NUMBER 1) + set(GNA_LIBRARY_VERSION_NUMBER 1) endif() -#saving rpath to GNA shared library be used by CI +# +# Shared plugin library +# + +ie_add_plugin(NAME ${TARGET_NAME} + DEVICE_NAME "GNA" + SOURCES ${SOURCES} ${HEADERS}) + +# saving rpath to GNA shared library be used by CI log_rpath_from_dir(GNA ${libGNA_LIBRARIES_BASE_PATH}) target_link_libraries(${TARGET_NAME} PRIVATE inference_engine Threads::Threads libGNA) target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + target_compile_definitions(${TARGET_NAME} PRIVATE _NO_MKL_ PUBLIC GNA_LIB_VER=${GNA_LIBRARY_VERSION_NUMBER}) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + +# +# Static version for tests +# add_library(${TARGET_NAME}_test_static STATIC ${SOURCES} ${HEADERS}) + target_compile_definitions(${TARGET_NAME}_test_static PRIVATE _NO_MKL_ @@ -45,6 +56,7 @@ target_compile_definitions(${TARGET_NAME}_test_static GNA_LIB_VER=${GNA_LIBRARY_VERSION_NUMBER} INTEGER_LOW_P USE_STATIC_IE) + target_link_libraries(${TARGET_NAME}_test_static PUBLIC inference_engine_preproc_s inference_engine_lp_transformations libGNA::API) target_include_directories(${TARGET_NAME}_test_static PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) set_target_properties(${TARGET_NAME}_test_static PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_test_static) diff --git a/inference-engine/src/hetero_plugin/CMakeLists.txt b/inference-engine/src/hetero_plugin/CMakeLists.txt index 3777a0ece9b751..7894e951e25e19 100644 --- a/inference-engine/src/hetero_plugin/CMakeLists.txt +++ b/inference-engine/src/hetero_plugin/CMakeLists.txt @@ -18,4 +18,6 @@ ie_faster_build(${TARGET_NAME} target_link_libraries(${TARGET_NAME} PRIVATE inference_engine ade pugixml ${NGRAPH_LIBRARIES} inference_engine_transformations) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index ae0e2b0d7cbeb7..0ba769e5813339 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -187,6 +187,8 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_API ie_register_plugins(MAIN_TARGET ${TARGET_NAME} POSSIBLE_PLUGINS MultiDevicePlugin HeteroPlugin clDNNPlugin GNAPlugin MKLDNNPlugin myriadPlugin) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # Static library used for unit tests which are always built add_library(${TARGET_NAME}_s STATIC diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt index 6ae6289969a09a..a7fb77285886d1 100644 --- a/inference-engine/src/legacy_api/CMakeLists.txt +++ b/inference-engine/src/legacy_api/CMakeLists.txt @@ -54,6 +54,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS} ${NGRAPH_LIBRARIES add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # LTO set_target_properties(${TARGET_NAME} ${TARGET_NAME}_obj diff --git a/inference-engine/src/low_precision_transformations/CMakeLists.txt b/inference-engine/src/low_precision_transformations/CMakeLists.txt index fac16665b65e6a..adb0ea2a6ff697 100644 --- a/inference-engine/src/low_precision_transformations/CMakeLists.txt +++ b/inference-engine/src/low_precision_transformations/CMakeLists.txt @@ -35,6 +35,8 @@ target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR} add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # LTO set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt index 5f19256dd46925..4e87370cc89219 100644 --- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt +++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt @@ -187,6 +187,8 @@ cross_compiled_file(${TARGET_NAME} NAMESPACE InferenceEngine::Extensions::Cpu::XARCH ) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # add test object library add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS}) diff --git a/inference-engine/src/multi_device/CMakeLists.txt b/inference-engine/src/multi_device/CMakeLists.txt index 65fc8084190bfc..92a0a71edf37fc 100644 --- a/inference-engine/src/multi_device/CMakeLists.txt +++ b/inference-engine/src/multi_device/CMakeLists.txt @@ -16,4 +16,6 @@ target_link_libraries(${TARGET_NAME} PRIVATE inference_engine) set_ie_threading_interface_for(${TARGET_NAME}) -set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) \ No newline at end of file +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + +set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt index b39bf1bd4cd7d1..70bab36cb869bd 100644 --- a/inference-engine/src/preprocessing/CMakeLists.txt +++ b/inference-engine/src/preprocessing/CMakeLists.txt @@ -165,6 +165,8 @@ endif() add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # Static library used for unit tests which are always built add_library(${TARGET_NAME}_s STATIC diff --git a/inference-engine/src/readers/ir_reader/CMakeLists.txt b/inference-engine/src/readers/ir_reader/CMakeLists.txt index ff8ac49a8a0f20..51ef53001120a7 100644 --- a/inference-engine/src/readers/ir_reader/CMakeLists.txt +++ b/inference-engine/src/readers/ir_reader/CMakeLists.txt @@ -33,6 +33,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE ${NGRAPH_LIBRARIES} pugixml openvino::itt) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) # code style diff --git a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt index f3e281ec2c6150..639c79d8370d95 100644 --- a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt +++ b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt @@ -31,6 +31,8 @@ target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/" target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_reader_api inference_engine_plugin_api inference_engine pugixml openvino::itt) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) # code style diff --git a/inference-engine/src/readers/onnx_reader/CMakeLists.txt b/inference-engine/src/readers/onnx_reader/CMakeLists.txt index 5aaffa9b3e9ba2..b2f6d9e3af6b57 100644 --- a/inference-engine/src/readers/onnx_reader/CMakeLists.txt +++ b/inference-engine/src/readers/onnx_reader/CMakeLists.txt @@ -22,6 +22,8 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLU target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_reader_api onnx_importer inference_engine) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) # code style diff --git a/inference-engine/src/transformations/CMakeLists.txt b/inference-engine/src/transformations/CMakeLists.txt index f855db9db1d710..ecb265d304890d 100644 --- a/inference-engine/src/transformations/CMakeLists.txt +++ b/inference-engine/src/transformations/CMakeLists.txt @@ -32,6 +32,8 @@ target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR} add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) + # LTO set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO}) diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index c1e3f699a8b2a6..018cb2bbd992b6 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -43,6 +43,10 @@ if(COMMAND ie_faster_build) ) endif() +if(COMMAND ie_add_api_validator_post_build_step) + ie_add_api_validator_post_build_step(TARGET ngraph) +endif() + set_target_properties(ngraph PROPERTIES CXX_VISIBILITY_PRESET hidden C_VISIBILITY_PRESET hidden From 0dde02e44f128a52cc47bd4587d1eeeffc82e15d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Do=C5=82bniak?= Date: Fri, 16 Oct 2020 11:30:00 +0200 Subject: [PATCH 12/35] Use GatherND-5 in the onnx_importer (#2634) --- ngraph/frontend/onnx_import/CMakeLists.txt | 2 -- ngraph/frontend/onnx_import/src/op/gather_nd.cpp | 11 ++++++----- ngraph/frontend/onnx_import/src/ops_bridge.cpp | 4 ++-- ngraph/test/runtime/ie/unit_test.manifest | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/ngraph/frontend/onnx_import/CMakeLists.txt b/ngraph/frontend/onnx_import/CMakeLists.txt index 6594408986dc5b..906bfa0889ae40 100644 --- a/ngraph/frontend/onnx_import/CMakeLists.txt +++ b/ngraph/frontend/onnx_import/CMakeLists.txt @@ -22,12 +22,10 @@ file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) # Remove disabled ops list(REMOVE_ITEM LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/op/gather_nd.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.cpp ) list(REMOVE_ITEM PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import/op/conv_integer.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import/op/gather_nd.hpp ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import/op/quant_conv.hpp ) diff --git a/ngraph/frontend/onnx_import/src/op/gather_nd.cpp b/ngraph/frontend/onnx_import/src/op/gather_nd.cpp index 3fdc6894940948..0d187f0c30060f 100644 --- a/ngraph/frontend/onnx_import/src/op/gather_nd.cpp +++ b/ngraph/frontend/onnx_import/src/op/gather_nd.cpp @@ -17,7 +17,7 @@ // Disabled in CMakeList // Update to higher opset required -#include "ngraph/opsets/opset0.hpp" +#include "onnx_import/default_opset.hpp" #include "onnx_import/utils/common.hpp" namespace ngraph @@ -30,11 +30,12 @@ namespace ngraph { OutputVector gather_nd(const Node& node) { - OutputVector ng_inputs{node.get_ng_inputs()}; - auto data = ng_inputs.at(0); - auto indices = ng_inputs.at(1); + const OutputVector ng_inputs{node.get_ng_inputs()}; + const auto data = ng_inputs.at(0); + const auto indices = ng_inputs.at(1); + const auto batch_dims = node.get_attribute_value("batch_dims", 0); - return {std::make_shared(data, indices)}; + return {std::make_shared(data, indices, batch_dims)}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/ops_bridge.cpp b/ngraph/frontend/onnx_import/src/ops_bridge.cpp index 5d53a8d7086cbb..2e896cd5e18301 100644 --- a/ngraph/frontend/onnx_import/src/ops_bridge.cpp +++ b/ngraph/frontend/onnx_import/src/ops_bridge.cpp @@ -60,7 +60,7 @@ #include "onnx_import/op/flatten.hpp" #include "onnx_import/op/floor.hpp" #include "onnx_import/op/gather.hpp" -// #include "onnx_import/op/gather_nd.hpp" +#include "onnx_import/op/gather_nd.hpp" #include "onnx_import/op/gemm.hpp" #include "onnx_import/op/global_average_pool.hpp" #include "onnx_import/op/global_max_pool.hpp" @@ -343,7 +343,7 @@ namespace ngraph REGISTER_OPERATOR("Flatten", 1, flatten); REGISTER_OPERATOR("Floor", 1, floor); REGISTER_OPERATOR("Gather", 1, gather); - // REGISTER_OPERATOR("GatherND", 1, gather_nd); + REGISTER_OPERATOR("GatherND", 1, gather_nd); REGISTER_OPERATOR("Gemm", 1, gemm); REGISTER_OPERATOR("Gemm", 6, gemm); REGISTER_OPERATOR("GlobalAveragePool", 1, global_average_pool); diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index f2ae030e8c3a67..d565eab733ddf9 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -40,7 +40,7 @@ onnx_model_matmul_integer_4d_no_zero_point onnx_model_qlinear_matmul onnx_model_qlinear_matmul_3d -# Not supported ONNX op: GatherND +# The indices input type i64 is not supported by the CPU plugin onnx_model_gatherND_int32 onnx_model_gatherND_float From 5eee1ea925a0beb9e7835e45fb677c598b4a6b67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Do=C5=82bniak?= Date: Fri, 16 Oct 2020 11:30:20 +0200 Subject: [PATCH 13/35] Avoid unnecessary Reshape in ONNX Softmax impl (#2686) --- ngraph/frontend/onnx_import/src/op/softmax.cpp | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/ngraph/frontend/onnx_import/src/op/softmax.cpp b/ngraph/frontend/onnx_import/src/op/softmax.cpp index b0c4fe4081cf8f..87c7e5192f7521 100644 --- a/ngraph/frontend/onnx_import/src/op/softmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/softmax.cpp @@ -33,16 +33,11 @@ namespace ngraph const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto max = std::make_shared(coerced_data, axis_1); - - // equivalent to numpy's max.reshape((-1,1)) - const auto reshape_pattern = - default_opset::Constant::create(element::i64, Shape{2}, {0, 1}); - const auto reshaped_max = - std::make_shared(max, reshape_pattern, true); + const auto max = + std::make_shared(coerced_data, axis_1, true); const auto data_minus_max = - std::make_shared(coerced_data, reshaped_max); + std::make_shared(coerced_data, max); const auto result = std::make_shared(data_minus_max, 1); if (data.get_partial_shape().is_static()) From fd774f0c586998e21867cdfb53f578d67a0d859b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= <4430709+postrational@users.noreply.github.com> Date: Fri, 16 Oct 2020 11:34:26 +0200 Subject: [PATCH 14/35] Remove segfault marks form test_unary_op_scalar and test_range tests (#2692) Co-authored-by: Ewa21 --- ngraph/python/tests/test_ngraph/test_ops_unary.py | 1 - ngraph/python/tests/test_ngraph/test_sequence_processing.py | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/ngraph/python/tests/test_ngraph/test_ops_unary.py b/ngraph/python/tests/test_ngraph/test_ops_unary.py index fac5005ba8340e..951bb7d0c25c06 100644 --- a/ngraph/python/tests/test_ngraph/test_ops_unary.py +++ b/ngraph/python/tests/test_ngraph/test_ops_unary.py @@ -59,7 +59,6 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): assert np.allclose(result, expected, rtol=0.001) -@pytest.mark.skip(reason="Segmentation fault") @pytest.mark.parametrize( "ng_api_fn, numpy_fn, input_data", [ diff --git a/ngraph/python/tests/test_ngraph/test_sequence_processing.py b/ngraph/python/tests/test_ngraph/test_sequence_processing.py index 19ba7b9a97613a..4a7a698fca15e7 100644 --- a/ngraph/python/tests/test_ngraph/test_sequence_processing.py +++ b/ngraph/python/tests/test_ngraph/test_sequence_processing.py @@ -14,12 +14,11 @@ # limitations under the License. # ****************************************************************************** import numpy as np -import pytest import ngraph as ng from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_36478 +from tests import xfail_issue_36478, xfail_issue_35926 def test_onehot(): @@ -47,7 +46,7 @@ def test_one_hot(): assert np.allclose(result, excepted) -@pytest.mark.skip(reason="Segmentation fault") +@xfail_issue_35926 def test_range(): start = 5 stop = 35 From 95d7c296289e7fa2c1bb27b7bdfe1a2383121737 Mon Sep 17 00:00:00 2001 From: Kamil Magierski Date: Fri, 16 Oct 2020 12:23:32 +0200 Subject: [PATCH 15/35] [GNA] Fix remove layer + identity layer insertion (#2626) * [GNA] Fix remove layer + identity layer insertion test stub Test impl style hpp style * disable FP16 for GPU --- .../src/gna_plugin/gna_graph_compiler.cpp | 2 +- .../src/gna_plugin/gna_graph_tools.hpp | 11 +- .../gna_plugin/optimizer/gna_pass_manager.cpp | 2 +- .../subgraph_tests/perm_conv_perm_concat.cpp | 41 ++++++ .../subgraph_tests/perm_conv_perm_concat.cpp | 43 ++++++ .../subgraph_tests/perm_conv_perm_concat.cpp | 41 ++++++ .../subgraph_tests/perm_conv_perm_concat.hpp | 36 +++++ .../subgraph_tests/perm_conv_perm_concat.cpp | 134 ++++++++++++++++++ 8 files changed, 300 insertions(+), 10 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp create mode 100644 inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp create mode 100644 inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/subgraph_tests/perm_conv_perm_concat.cpp diff --git a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp index 28edc654a9aa71..68f0403be72ba7 100644 --- a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp +++ b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp @@ -1528,7 +1528,7 @@ void GNAGraphCompiler::PWLPrimitive(InferenceEngine::CNNLayerPtr layer) { uint32_t c_dim_in = FROM_IR_DIM(inputs, 3); num_columns = (w_dim_in == 1) ? h_dim_in * c_dim_in : w_dim_in * c_dim_in; - num_rows = 1; + num_rows = (w_dim_in == 1) ? w_dim_in : h_dim_in; } else { num_columns = FROM_IR_DIM(inputs, 2); num_rows = FROM_IR_DIM(inputs, 1); diff --git a/inference-engine/src/gna_plugin/gna_graph_tools.hpp b/inference-engine/src/gna_plugin/gna_graph_tools.hpp index 4ef15f4b4c87ab..741951f13d5e3d 100644 --- a/inference-engine/src/gna_plugin/gna_graph_tools.hpp +++ b/inference-engine/src/gna_plugin/gna_graph_tools.hpp @@ -620,8 +620,8 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) { // remove osp->layer connection for (auto && outData : getInputTo(osp)) { - for (auto i = outData.second->insData.begin(); i != outData.second->insData.end(); i++) { - auto insData = i->lock(); + for (int i = 0; i < outData.second->insData.size(); i++) { + auto insData = outData.second->insData[i].lock(); if (!insData) { THROW_IE_EXCEPTION << "Cannot remove layer : "<< layer->name <<", its output layer(" << outData.first << " has invalid input configuration"; @@ -634,7 +634,7 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) { // found layer that need to be removed if (creator.get() == layer.get()) { - outData.second->insData.erase(i); + outData.second->insData[i] = isp; break; } } @@ -646,11 +646,6 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) { getInputTo(isp)[layer->name + "_" + outData.first] = outData.second; } - // add osp->isp connections - for (auto && outData : getInputTo(osp)) { - outData.second->insData.push_back(isp); - } - // removing layer->osp, and layer->isp connection not necessary - layer will delete it by itself } diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index a2ae86182a4c25..bfed5616b12424 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -653,7 +653,7 @@ void InsertIdentityLayerPass::run() { THROW_GNA_EXCEPTION << "cannot insert identity layer after" << prev->name << " and before " << l->name; } - auto inputData = l->insData[0].lock(); + auto inputData = l->insData[insDataIdx].lock(); auto dataPtr = std::make_shared("identity_data_" + std::to_string(numOfIdentityLayers), inputData->getTensorDesc()); auto activationLayerWithQuant = quantized ? diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp new file mode 100644 index 00000000000000..10a9ca41a8f7cf --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#include +#include "subgraph_tests/perm_conv_perm_concat.hpp" +#include "common_test_utils/test_constants.hpp" +namespace { +std::vector> input_shapes { + {1, 1, 7, 32}, + {1, 1, 8, 16}, +}; + +std::vector> kernel_shapes { + {1, 3}, + {1, 5}, +}; + +std::vector output_channels { + 32, + 64, +}; + +std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, +}; + +std::map additional_config = { +}; +} // namespace + +namespace SubgraphTestsDefinitions { +INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(kernel_shapes), + ::testing::ValuesIn(output_channels), + ::testing::Values(additional_config)), + PermConvPermConcat::getTestCaseName); +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp new file mode 100644 index 00000000000000..3032c52caadaf1 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#include +#include "subgraph_tests/perm_conv_perm_concat.hpp" +#include "common_test_utils/test_constants.hpp" +namespace { +std::vector> input_shapes { + {1, 1, 7, 32}, + {1, 1, 8, 16}, +}; + +std::vector> kernel_shapes { + {1, 3}, + {1, 5}, +}; + +std::vector output_channels { + 32, + 64, +}; + +std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, +}; + +std::map additional_config = { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "1234"} +}; +} // namespace + +namespace SubgraphTestsDefinitions { + INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(kernel_shapes), + ::testing::ValuesIn(output_channels), + ::testing::Values(additional_config)), + PermConvPermConcat::getTestCaseName); +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp new file mode 100644 index 00000000000000..a5b76e1fe10e48 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#include +#include "subgraph_tests/perm_conv_perm_concat.hpp" +#include "common_test_utils/test_constants.hpp" +namespace { +std::vector> input_shapes { + {1, 1, 7, 32}, + {1, 1, 8, 16}, +}; + +std::vector> kernel_shapes { + {1, 3}, + {1, 5}, +}; + +std::vector output_channels { + 32, + 64, +}; + +std::vector netPrecisions = { + InferenceEngine::Precision::FP32, +// InferenceEngine::Precision::FP16, +}; + +std::map additional_config = { +}; +} // namespace + +namespace SubgraphTestsDefinitions { + INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(kernel_shapes), + ::testing::ValuesIn(output_channels), + ::testing::Values(additional_config)), + PermConvPermConcat::getTestCaseName); +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp new file mode 100644 index 00000000000000..4365c511fec65f --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" + +namespace SubgraphTestsDefinitions { +typedef std::tuple< + InferenceEngine::Precision, // Network Precision + std::string, // Target Device + std::array, // Input shape + std::array, // Kernel shape + size_t, // Output channels + std::map // Configuration +> PermConvPermConcatParams; + +class PermConvPermConcat : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; + void Run() override; +}; +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/perm_conv_perm_concat.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/perm_conv_perm_concat.cpp new file mode 100644 index 00000000000000..62ab624794ad4a --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/perm_conv_perm_concat.cpp @@ -0,0 +1,134 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "subgraph_tests/perm_conv_perm_concat.hpp" + +namespace SubgraphTestsDefinitions { +std::string PermConvPermConcat::getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision netPrecision; + std::string targetName; + std::array input_shape; + std::array kernel_shape; + size_t output_channels; + std::map configuration; + + + std::tie(netPrecision, targetName, input_shape, kernel_shape, output_channels, configuration) = obj.param; + std::ostringstream results; + + results << "IS=" << CommonTestUtils::vec2str(std::vector(input_shape.begin(), input_shape.end())) << "_"; + results << "KS=" << CommonTestUtils::vec2str(std::vector(kernel_shape.begin(), kernel_shape.end())) << "_"; + results << "OC=" << output_channels << "_"; + results << "netPRC=" << netPrecision.name() << "_"; + results << "targetDevice=" << targetName; + return results.str(); +} + +void PermConvPermConcat::SetUp() { + InferenceEngine::Precision netPrecision; + std::array input_shape; + std::array kernel_shape; + size_t output_channels; + std::map additional_config; + + std::tie(netPrecision, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = this->GetParam(); + + configuration.insert(additional_config.begin(), additional_config.end()); + + const std::size_t input_dim = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + std::vector input_dims { 1, input_dim }; + std::vector reshape_in_dims = std::vector(input_shape.begin(), input_shape.end()); + std::vector permute_in_order = { 0, 3, 1, 2 }; + std::vector permute_out_order = { 0, 2, 3, 1 }; + + const int seed = 0; + std::mt19937 gen(static_cast(seed)); + + auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable { + std::vector res; + + std::uniform_real_distribution dist(min, max); + for (int i = 0; i < vec_len; i++) + res.emplace_back(static_cast(dist(gen))); + + return res; + }; + + auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims}); + + auto reshape_in_pattern = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + reshape_in_dims); + auto reshape_in = std::make_shared(input_parameter[0], reshape_in_pattern, false); + + auto permute_in_params = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{permute_in_order}); + auto permute_in = std::make_shared(reshape_in, permute_in_params); + auto conv_in_shape = permute_in->get_output_shape(0); + auto conv_weights_size = output_channels * (conv_in_shape[1]) * kernel_shape[0] * kernel_shape[1]; + auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, + ngraph::op::PadType::VALID, output_channels, false, generateFloatNumbers(conv_weights_size, -0.5f, 0.5f)); + + auto permute_out_params = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + permute_out_order); + auto permute_out = std::make_shared(conv, permute_out_params); + + auto permute_out_shape = permute_out->get_output_shape(0); + + auto concat_const = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, permute_out_shape[3]}, generateFloatNumbers(permute_out_shape[3], -10, 10)); + + auto concat = ngraph::builder::makeConcat({permute_out, concat_const}, 2); + + auto reshape_out_pattern = std::make_shared(ngraph::element::i64, + ngraph::Shape{2}, + InferenceEngine::SizeVector({1, (permute_out_shape[2] + 1) * permute_out_shape[3]})); + auto reshape_out = std::make_shared(concat, reshape_out_pattern, false); + + function = std::make_shared(reshape_out, input_parameter, "perm_conv_perm_concat"); +} + +void PermConvPermConcat::Run() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + LoadNetwork(); + + inferRequest = executableNetwork.CreateInferRequest(); + inputs.clear(); + + for (const auto &input : cnnNetwork.getInputsInfo()) { + const auto &info = input.second; + auto tensorDesc = info->getTensorDesc(); + + auto blob = FuncTestUtils::createAndFillBlobFloat(tensorDesc, 2, -1, 100, 111); + + FuncTestUtils::fillInputsBySinValues(blob); + inferRequest.SetBlob(info->name(), blob); + inputs.push_back(blob); + } + if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) && + configuration.count(InferenceEngine::PluginConfigParams::YES)) { + auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2; + inferRequest.SetBatch(batchSize); + } + inferRequest.Infer(); + + Validate(); +} + +TEST_P(PermConvPermConcat, CompareWithRefs) { + Run(); +} +} // namespace SubgraphTestsDefinitions From aff7a6608286129cb7eb9019854959c0c155718e Mon Sep 17 00:00:00 2001 From: Andrey Dmitriev Date: Fri, 16 Oct 2020 15:34:22 +0300 Subject: [PATCH 16/35] [GNA][Speech sample] Add option to specify blob names (#1529) * Added output names * Add input, output, ref names * Added zero scale factor * Adding support for multiple reference files --- .../samples/speech_sample/main.cpp | 536 +++++++++++------- .../samples/speech_sample/speech_sample.hpp | 18 + 2 files changed, 336 insertions(+), 218 deletions(-) diff --git a/inference-engine/samples/speech_sample/main.cpp b/inference-engine/samples/speech_sample/main.cpp index ef36d91732f420..5bae50b1d0bd5b 100644 --- a/inference-engine/samples/speech_sample/main.cpp +++ b/inference-engine/samples/speech_sample/main.cpp @@ -426,6 +426,20 @@ std::vector ParseScaleFactors(const std::string& str) { return scaleFactorInput; } +std::vector ParseBlobName(std::string str) { + std::vector blobName; + if (!str.empty()) { + size_t pos_last = 0; + size_t pos_next = 0; + while ((pos_next = str.find(",", pos_last)) != std::string::npos) { + blobName.push_back(str.substr(pos_last, pos_next)); + pos_last = pos_next + 1; + } + blobName.push_back(str.substr(pos_last)); + } + return blobName; +} + bool ParseAndCheckCommandLine(int argc, char *argv[]) { // ---------------------------Parsing and validation of input args-------------------------------------- slog::info << "Parsing input parameters" << slog::endl; @@ -673,8 +687,30 @@ int main(int argc, char *argv[]) { genericPluginConfig.insert(std::begin(gnaPluginConfig), std::end(gnaPluginConfig)); } auto t0 = Time::now(); + std::vector outputs; ExecutableNetwork executableNet; + if (!FLAGS_oname.empty()) { + std::vector output_names = ParseBlobName(FLAGS_oname); + std::vector ports; + for (const auto& outBlobName : output_names) { + int pos_layer = outBlobName.rfind(":"); + if (pos_layer == -1) { + throw std::logic_error(std::string("Output ") + std::string(outBlobName) + + std::string(" doesn't have a port")); + } + outputs.push_back(outBlobName.substr(0, pos_layer)); + try { + ports.push_back(std::stoi(outBlobName.substr(pos_layer + 1))); + } catch (std::exception) { + throw std::logic_error("Ports should have integer type"); + } + } + + for (size_t i = 0; i < outputs.size(); i++) { + network.addOutput(outputs[i], ports[i]); + } + } if (!FLAGS_m.empty()) { slog::info << "Loading model to the device" << slog::endl; executableNet = ie.LoadNetwork(network, deviceStr, genericPluginConfig); @@ -682,7 +718,6 @@ int main(int argc, char *argv[]) { slog::info << "Importing model to the device" << slog::endl; executableNet = ie.ImportNetwork(FLAGS_rg.c_str(), deviceStr, genericPluginConfig); } - ms loadTime = std::chrono::duration_cast(Time::now() - t0); slog::info << "Model loading time " << loadTime.count() << " ms" << slog::endl; @@ -717,10 +752,27 @@ int main(int argc, char *argv[]) { /** Stores all input blobs data **/ std::vector ptrInputBlobs; - for (auto& input : cInputInfo) { - ptrInputBlobs.push_back(inferRequests.begin()->inferRequest.GetBlob(input.first)); + if (!FLAGS_iname.empty()) { + std::vector inputNameBlobs = ParseBlobName(FLAGS_iname); + if (inputNameBlobs.size() != cInputInfo.size()) { + std::string errMessage(std::string("Number of network inputs ( ") + std::to_string(cInputInfo.size()) + + " ) is not equal to the number of inputs entered in the -iname argument ( " + + std::to_string(inputNameBlobs.size()) + " )."); + throw std::logic_error(errMessage); + } + for (const auto& input : inputNameBlobs) { + Blob::Ptr blob = inferRequests.begin()->inferRequest.GetBlob(input); + if (!blob) { + std::string errMessage("No blob with name : " + input); + throw std::logic_error(errMessage); + } + ptrInputBlobs.push_back(blob); + } + } else { + for (const auto& input : cInputInfo) { + ptrInputBlobs.push_back(inferRequests.begin()->inferRequest.GetBlob(input.first)); + } } - InputsDataMap inputInfo; if (!FLAGS_m.empty()) { inputInfo = network.getInputsInfo(); @@ -739,8 +791,21 @@ int main(int argc, char *argv[]) { if (!FLAGS_m.empty()) { outputInfo = network.getOutputsInfo(); } - - Blob::Ptr ptrOutputBlob = inferRequests.begin()->inferRequest.GetBlob(cOutputInfo.rbegin()->first); + std::vector ptrOutputBlob; + if (!outputs.empty()) { + for (const auto& output : outputs) { + Blob::Ptr blob = inferRequests.begin()->inferRequest.GetBlob(output); + if (!blob) { + std::string errMessage("No blob with name : " + output); + throw std::logic_error(errMessage); + } + ptrOutputBlob.push_back(blob); + } + } else { + for (auto& output : cOutputInfo) { + ptrOutputBlob.push_back(inferRequests.begin()->inferRequest.GetBlob(output.first)); + } + } for (auto &item : outputInfo) { DataPtr outData = item.second; @@ -754,255 +819,290 @@ int main(int argc, char *argv[]) { // ----------------------------------------------------------------------------------------------------- // --------------------------- 10. Do inference -------------------------------------------------------- - std::vector> ptrUtterances; - std::vector ptrScores; - std::vector ptrReferenceScores; - score_error_t frameError, totalError; - - ptrUtterances.resize(inputArkFiles.size()); - - // initialize memory state before starting - for (auto &&state : executableNet.QueryState()) { - state.Reset(); - } - - for (uint32_t utteranceIndex = 0; utteranceIndex < numUtterances; ++utteranceIndex) { - std::map utterancePerfMap; - std::string uttName; - uint32_t numFrames(0), n(0); - std::vector numFrameElementsInput; - - uint32_t numFramesReference(0), numFrameElementsReference(0), numBytesPerElementReference(0), - numBytesReferenceScoreThisUtterance(0); - const uint32_t numScoresPerFrame = ptrOutputBlob->size() / batchSize; - - numFrameElementsInput.resize(numInputArkFiles); - for (size_t i = 0; i < inputArkFiles.size(); i++) { - std::vector ptrUtterance; - auto inputArkFilename = inputArkFiles[i].c_str(); - uint32_t currentNumFrames(0), currentNumFrameElementsInput(0), currentNumBytesPerElementInput(0); - GetKaldiArkInfo(inputArkFilename, utteranceIndex, &n, &numBytesThisUtterance[i]); - ptrUtterance.resize(numBytesThisUtterance[i]); - LoadKaldiArkArray(inputArkFilename, - utteranceIndex, - uttName, - ptrUtterance, - ¤tNumFrames, - ¤tNumFrameElementsInput, - ¤tNumBytesPerElementInput); - if (numFrames == 0) { - numFrames = currentNumFrames; - } else if (numFrames != currentNumFrames) { - std::string errMessage("Number of frames in ark files is different: " + std::to_string(numFrames) + - " and " + std::to_string(currentNumFrames)); - throw std::logic_error(errMessage); - } - - ptrUtterances[i] = ptrUtterance; - numFrameElementsInput[i] = currentNumFrameElementsInput; + std::vector output_name_files; + std::vector reference_name_files; + size_t count_file = 1; + if (!FLAGS_o.empty()) { + output_name_files = ParseBlobName(FLAGS_o); + if (output_name_files.size() != outputs.size() && !outputs.empty()) { + throw std::logic_error("The number of output files is not equal to the number of network outputs."); } - - int i = 0; - for (auto& ptrInputBlob : ptrInputBlobs) { - if (ptrInputBlob->size() != numFrameElementsInput[i++] * batchSize) { - throw std::logic_error("network input size(" + std::to_string(ptrInputBlob->size()) + - ") mismatch to ark file size (" + - std::to_string(numFrameElementsInput[i-1] * batchSize) + ")"); - } + count_file = output_name_files.empty() ? 1 : output_name_files.size(); + } + if (!FLAGS_r.empty()) { + reference_name_files = ParseBlobName(FLAGS_r); + if (reference_name_files.size() != outputs.size() && !outputs.empty()) { + throw std::logic_error("The number of reference files is not equal to the number of network outputs."); } + count_file = reference_name_files.empty() ? 1 : reference_name_files.size(); + } + for (size_t next_output = 0; next_output < count_file; next_output++) { + std::vector> ptrUtterances; + std::vector ptrScores; + std::vector ptrReferenceScores; + score_error_t frameError, totalError; + + ptrUtterances.resize(inputArkFiles.size()); - ptrScores.resize(numFrames * numScoresPerFrame * sizeof(float)); - if (!FLAGS_r.empty()) { - std::string refUtteranceName; - GetKaldiArkInfo(FLAGS_r.c_str(), utteranceIndex, &n, &numBytesReferenceScoreThisUtterance); - ptrReferenceScores.resize(numBytesReferenceScoreThisUtterance); - LoadKaldiArkArray(FLAGS_r.c_str(), - utteranceIndex, - refUtteranceName, - ptrReferenceScores, - &numFramesReference, - &numFrameElementsReference, - &numBytesPerElementReference); + // initialize memory state before starting + for (auto &&state : executableNet.QueryState()) { + state.Reset(); } - double totalTime = 0.0; + for (uint32_t utteranceIndex = 0; utteranceIndex < numUtterances; ++utteranceIndex) { + std::map utterancePerfMap; + std::string uttName; + uint32_t numFrames(0), n(0); + std::vector numFrameElementsInput; + + uint32_t numFramesReference(0), numFrameElementsReference(0), numBytesPerElementReference(0), + numBytesReferenceScoreThisUtterance(0); + const uint32_t numScoresPerFrame = ptrOutputBlob.size() / batchSize; + + numFrameElementsInput.resize(numInputArkFiles); + for (size_t i = 0; i < inputArkFiles.size(); i++) { + std::vector ptrUtterance; + auto inputArkFilename = inputArkFiles[i].c_str(); + uint32_t currentNumFrames(0), currentNumFrameElementsInput(0), currentNumBytesPerElementInput(0); + GetKaldiArkInfo(inputArkFilename, utteranceIndex, &n, &numBytesThisUtterance[i]); + ptrUtterance.resize(numBytesThisUtterance[i]); + LoadKaldiArkArray(inputArkFilename, + utteranceIndex, + uttName, + ptrUtterance, + ¤tNumFrames, + ¤tNumFrameElementsInput, + ¤tNumBytesPerElementInput); + if (numFrames == 0) { + numFrames = currentNumFrames; + } else if (numFrames != currentNumFrames) { + std::string errMessage( + "Number of frames in ark files is different: " + std::to_string(numFrames) + + " and " + std::to_string(currentNumFrames)); + throw std::logic_error(errMessage); + } - std::cout << "Utterance " << utteranceIndex << ": " << std::endl; + ptrUtterances[i] = ptrUtterance; + numFrameElementsInput[i] = currentNumFrameElementsInput; + } - ClearScoreError(&totalError); - totalError.threshold = frameError.threshold = MAX_SCORE_DIFFERENCE; - auto outputFrame = &ptrScores.front(); - std::vector inputFrame; - for (auto& ut : ptrUtterances) { - inputFrame.push_back(&ut.front()); - } + int i = 0; + for (auto &ptrInputBlob : ptrInputBlobs) { + if (ptrInputBlob->size() != numFrameElementsInput[i++] * batchSize) { + throw std::logic_error("network input size(" + std::to_string(ptrInputBlob->size()) + + ") mismatch to ark file size (" + + std::to_string(numFrameElementsInput[i - 1] * batchSize) + ")"); + } + } - std::map callPerfMap; + ptrScores.resize(numFrames * numScoresPerFrame * sizeof(float)); + if (!FLAGS_r.empty()) { + std::string refUtteranceName; + GetKaldiArkInfo(reference_name_files[next_output].c_str(), utteranceIndex, &n, &numBytesReferenceScoreThisUtterance); + ptrReferenceScores.resize(numBytesReferenceScoreThisUtterance); + LoadKaldiArkArray(reference_name_files[next_output].c_str(), + utteranceIndex, + refUtteranceName, + ptrReferenceScores, + &numFramesReference, + &numFrameElementsReference, + &numBytesPerElementReference); + } - size_t frameIndex = 0; - uint32_t numFramesArkFile = numFrames; - numFrames += FLAGS_cw_l + FLAGS_cw_r; - uint32_t numFramesThisBatch{batchSize}; + double totalTime = 0.0; - auto t0 = Time::now(); - auto t1 = t0; + std::cout << "Utterance " << utteranceIndex << ": " << std::endl; - while (frameIndex <= numFrames) { - if (frameIndex == numFrames) { - if (std::find_if(inferRequests.begin(), - inferRequests.end(), - [&](InferRequestStruct x) { return (x.frameIndex != -1); } ) == inferRequests.end()) { - break; - } + ClearScoreError(&totalError); + totalError.threshold = frameError.threshold = MAX_SCORE_DIFFERENCE; + auto outputFrame = &ptrScores.front(); + std::vector inputFrame; + for (auto &ut : ptrUtterances) { + inputFrame.push_back(&ut.front()); } - bool inferRequestFetched = false; - for (auto &inferRequest : inferRequests) { + std::map callPerfMap; + + size_t frameIndex = 0; + uint32_t numFramesArkFile = numFrames; + numFrames += FLAGS_cw_l + FLAGS_cw_r; + uint32_t numFramesThisBatch{batchSize}; + + auto t0 = Time::now(); + auto t1 = t0; + + while (frameIndex <= numFrames) { if (frameIndex == numFrames) { - numFramesThisBatch = 1; - } else { - numFramesThisBatch = (numFrames - frameIndex < batchSize) ? (numFrames - frameIndex) - : batchSize; + if (std::find_if(inferRequests.begin(), + inferRequests.end(), + [&](InferRequestStruct x) { return (x.frameIndex != -1); }) == + inferRequests.end()) { + break; + } } - if (inferRequest.frameIndex != -1) { - StatusCode code = inferRequest.inferRequest.Wait( - InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - - if (code != StatusCode::OK) { - if (!useHetero) continue; - if (code != StatusCode::INFER_NOT_STARTED) continue; + bool inferRequestFetched = false; + for (auto &inferRequest : inferRequests) { + if (frameIndex == numFrames) { + numFramesThisBatch = 1; + } else { + numFramesThisBatch = (numFrames - frameIndex < batchSize) ? (numFrames - frameIndex) + : batchSize; } - if (inferRequest.frameIndex >= 0) { - if (!FLAGS_o.empty()) { - outputFrame = - &ptrScores.front() + numScoresPerFrame * sizeof(float) * (inferRequest.frameIndex); - MemoryBlob::CPtr moutput = as(inferRequest.inferRequest.GetBlob(cOutputInfo.rbegin()->first)); - if (!moutput) { - throw std::logic_error("We expect output to be inherited from MemoryBlob, " - "but by fact we were not able to cast output to MemoryBlob"); - } - // locked memory holder should be alive all time while access to its buffer happens - auto moutputHolder = moutput->rmap(); - auto byteSize = inferRequest.numFramesThisBatch * numScoresPerFrame * sizeof(float); - std::memcpy(outputFrame, - moutputHolder.as(), - byteSize); - } + if (inferRequest.frameIndex != -1) { + StatusCode code = inferRequest.inferRequest.Wait( + InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - if (!FLAGS_r.empty()) { - Blob::Ptr outputBlob = inferRequest.inferRequest.GetBlob(cOutputInfo.rbegin()->first); - MemoryBlob::CPtr moutput = as(outputBlob); - if (!moutput) { - throw std::logic_error("We expect output to be inherited from MemoryBlob, " - "but by fact we were not able to cast output to MemoryBlob"); - } - // locked memory holder should be alive all time while access to its buffer happens - auto moutputHolder = moutput->rmap(); - CompareScores(moutputHolder.as(), - &ptrReferenceScores[inferRequest.frameIndex * - numFrameElementsReference * - numBytesPerElementReference], - &frameError, - inferRequest.numFramesThisBatch, - numFrameElementsReference); - UpdateScoreError(&frameError, &totalError); + if (code != StatusCode::OK) { + if (!useHetero) continue; + if (code != StatusCode::INFER_NOT_STARTED) continue; } - if (FLAGS_pc) { - // retrieve new counters - getPerformanceCounters(inferRequest.inferRequest, callPerfMap); - // summarize retrieved counters with all previous - sumPerformanceCounters(callPerfMap, utterancePerfMap); + ConstOutputsDataMap newOutputInfo; + if (inferRequest.frameIndex >= 0) { + if (!FLAGS_o.empty()) { + outputFrame = + &ptrScores.front() + + numScoresPerFrame * sizeof(float) * (inferRequest.frameIndex); + if (!outputs.empty()) { + newOutputInfo[outputs[next_output]] = cOutputInfo[outputs[next_output]]; + } else { + newOutputInfo = cOutputInfo; + } + Blob::Ptr outputBlob = inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); + MemoryBlob::CPtr moutput = as(outputBlob); + + if (!moutput) { + throw std::logic_error("We expect output to be inherited from MemoryBlob, " + "but in fact we were not able to cast output to MemoryBlob"); + } + // locked memory holder should be alive all time while access to its buffer happens + auto moutputHolder = moutput->rmap(); + auto byteSize = + inferRequest.numFramesThisBatch * numScoresPerFrame * sizeof(float); + std::memcpy(outputFrame, + moutputHolder.as(), + byteSize); + } + if (!FLAGS_r.empty()) { + if (!outputs.empty()) { + newOutputInfo[outputs[next_output]] = cOutputInfo[outputs[next_output]]; + } else { + newOutputInfo = cOutputInfo; + } + Blob::Ptr outputBlob = inferRequest.inferRequest.GetBlob(newOutputInfo.rbegin()->first); + MemoryBlob::CPtr moutput = as(outputBlob); + if (!moutput) { + throw std::logic_error("We expect output to be inherited from MemoryBlob, " + "but in fact we were not able to cast output to MemoryBlob"); + } + // locked memory holder should be alive all time while access to its buffer happens + auto moutputHolder = moutput->rmap(); + CompareScores(moutputHolder.as(), + &ptrReferenceScores[inferRequest.frameIndex * + numFrameElementsReference * + numBytesPerElementReference], + &frameError, + inferRequest.numFramesThisBatch, + numFrameElementsReference); + UpdateScoreError(&frameError, &totalError); + } + if (FLAGS_pc) { + // retrieve new counters + getPerformanceCounters(inferRequest.inferRequest, callPerfMap); + // summarize retrieved counters with all previous + sumPerformanceCounters(callPerfMap, utterancePerfMap); + } } } - } - - if (frameIndex == numFrames) { - inferRequest.frameIndex = -1; - continue; - } - - ptrInputBlobs.clear(); - for (auto& input : cInputInfo) { - ptrInputBlobs.push_back(inferRequest.inferRequest.GetBlob(input.first)); - } - for (size_t i = 0; i < numInputArkFiles; ++i) { - MemoryBlob::Ptr minput = as(ptrInputBlobs[i]); - if (!minput) { - slog::err << "We expect ptrInputBlobs[" << i << "] to be inherited from MemoryBlob, " << - "but by fact we were not able to cast input blob to MemoryBlob" << slog::endl; - return 1; + if (frameIndex == numFrames) { + inferRequest.frameIndex = -1; + continue; } - // locked memory holder should be alive all time while access to its buffer happens - auto minputHolder = minput->wmap(); - std::memcpy(minputHolder.as(), - inputFrame[i], - minput ->byteSize()); - } + if (FLAGS_iname.empty()) { + size_t num_files = FLAGS_iname.empty() ? numInputArkFiles : ptrInputBlobs.size(); + for (size_t i = 0; i < num_files; ++i) { + MemoryBlob::Ptr minput = as(ptrInputBlobs[i]); + if (!minput) { + slog::err << "We expect ptrInputBlobs[" << i + << "] to be inherited from MemoryBlob, " << + "but in fact we were not able to cast input blob to MemoryBlob" + << slog::endl; + return 1; + } + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); - int index = static_cast(frameIndex) - (FLAGS_cw_l + FLAGS_cw_r); - inferRequest.inferRequest.StartAsync(); - inferRequest.frameIndex = index < 0 ? -2 : index; - inferRequest.numFramesThisBatch = numFramesThisBatch; + std::memcpy(minputHolder.as(), + inputFrame[i], + minput->byteSize()); + } + } - frameIndex += numFramesThisBatch; - for (size_t j = 0; j < inputArkFiles.size(); j++) { - if (FLAGS_cw_l > 0 || FLAGS_cw_r > 0) { - int idx = frameIndex - FLAGS_cw_l; - if (idx > 0 && idx < static_cast(numFramesArkFile)) { + int index = static_cast(frameIndex) - (FLAGS_cw_l + FLAGS_cw_r); + inferRequest.inferRequest.StartAsync(); + inferRequest.frameIndex = index < 0 ? -2 : index; + inferRequest.numFramesThisBatch = numFramesThisBatch; + + frameIndex += numFramesThisBatch; + for (size_t j = 0; j < inputArkFiles.size(); j++) { + if (FLAGS_cw_l > 0 || FLAGS_cw_r > 0) { + int idx = frameIndex - FLAGS_cw_l; + if (idx > 0 && idx < static_cast(numFramesArkFile)) { + inputFrame[j] += sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; + } else if (idx >= static_cast(numFramesArkFile)) { + inputFrame[j] = &ptrUtterances[j].front() + + (numFramesArkFile - 1) * sizeof(float) * numFrameElementsInput[j] * + numFramesThisBatch; + } else if (idx <= 0) { + inputFrame[j] = &ptrUtterances[j].front(); + } + } else { inputFrame[j] += sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; - } else if (idx >= static_cast(numFramesArkFile)) { - inputFrame[j] = &ptrUtterances[j].front() + - (numFramesArkFile - 1) * sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; - } else if (idx <= 0) { - inputFrame[j] = &ptrUtterances[j].front(); } - } else { - inputFrame[j] += sizeof(float) * numFrameElementsInput[j] * numFramesThisBatch; } + inferRequestFetched |= true; + } + if (!inferRequestFetched) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + continue; } - inferRequestFetched |= true; - } - if (!inferRequestFetched) { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - continue; } - } - t1 = Time::now(); + t1 = Time::now(); - fsec fs = t1 - t0; - ms d = std::chrono::duration_cast(fs); - totalTime += d.count(); + fsec fs = t1 - t0; + ms d = std::chrono::duration_cast(fs); + totalTime += d.count(); - // resetting state between utterances - for (auto &&state : executableNet.QueryState()) { - state.Reset(); - } + // resetting state between utterances + for (auto &&state : executableNet.QueryState()) { + state.Reset(); + } - if (!FLAGS_o.empty()) { - bool shouldAppend = (utteranceIndex == 0) ? false : true; - SaveKaldiArkArray(FLAGS_o.c_str(), shouldAppend, uttName, &ptrScores.front(), - numFramesArkFile, numScoresPerFrame); - } + if (!FLAGS_o.empty()) { + bool shouldAppend = (utteranceIndex == 0) ? false : true; + SaveKaldiArkArray(output_name_files[next_output].c_str(), shouldAppend, uttName, &ptrScores.front(), + numFramesArkFile, numScoresPerFrame); + } - /** Show performance results **/ - std::cout << "Total time in Infer (HW and SW):\t" << totalTime << " ms" - << std::endl; - std::cout << "Frames in utterance:\t\t\t" << numFrames << " frames" - << std::endl; - std::cout << "Average Infer time per frame:\t\t" << totalTime / static_cast(numFrames) << " ms" - << std::endl; - if (FLAGS_pc) { - // print - printPerformanceCounters(utterancePerfMap, frameIndex, std::cout, getFullDeviceName(ie, FLAGS_d)); - } - if (!FLAGS_r.empty()) { - printReferenceCompareResults(totalError, numFrames, std::cout); + /** Show performance results **/ + std::cout << "Total time in Infer (HW and SW):\t" << totalTime << " ms" + << std::endl; + std::cout << "Frames in utterance:\t\t\t" << numFrames << " frames" + << std::endl; + std::cout << "Average Infer time per frame:\t\t" << totalTime / static_cast(numFrames) << " ms" + << std::endl; + if (FLAGS_pc) { + // print + printPerformanceCounters(utterancePerfMap, frameIndex, std::cout, getFullDeviceName(ie, FLAGS_d)); + } + if (!FLAGS_r.empty()) { + printReferenceCompareResults(totalError, numFrames, std::cout); + } + std::cout << "End of Utterance " << utteranceIndex << std::endl << std::endl; } - std::cout << "End of Utterance " << utteranceIndex << std::endl << std::endl; } // ----------------------------------------------------------------------------------------------------- } diff --git a/inference-engine/samples/speech_sample/speech_sample.hpp b/inference-engine/samples/speech_sample/speech_sample.hpp index a76a51c45a731d..f6081582920e33 100644 --- a/inference-engine/samples/speech_sample/speech_sample.hpp +++ b/inference-engine/samples/speech_sample/speech_sample.hpp @@ -81,6 +81,16 @@ static const char context_window_message_r[] = "Optional. Number of frames for r "Works only with context window networks." " If you use the cw_r or cw_l flag, then batch size and nthreads arguments are ignored."; +/// @brief message for output layer names +static const char output_layer_names_message[] = "Optional. Layer names for output blobs. " \ + "The names are separated with \",\" " \ + "Example: input1:port,input2:port "; + +/// @brief message for inputs layer names +static const char input_layer_names_message[] = "Optional. Layer names for input blobs. " \ + "The names are separated with \",\" " \ + "Example: Input1,Input2 "; + /// \brief Define flag for showing help message
DEFINE_bool(h, false, help_message); @@ -145,6 +155,12 @@ DEFINE_int32(cw_r, 0, context_window_message_r); /// @brief Left context window size (default 0) DEFINE_int32(cw_l, 0, context_window_message_l); +/// @brief Output layer name +DEFINE_string(oname, "", output_layer_names_message); + +/// @brief Input layer name +DEFINE_string(iname, "", input_layer_names_message); + /** * \brief This function show a help message */ @@ -173,5 +189,7 @@ static void showUsage() { std::cout << " -nthreads \"\" " << infer_num_threads_message << std::endl; std::cout << " -cw_l \"\" " << context_window_message_l << std::endl; std::cout << " -cw_r \"\" " << context_window_message_r << std::endl; + std::cout << " -oname \"\" " << output_layer_names_message << std::endl; + std::cout << " -iname \"\" " << input_layer_names_message << std::endl; } From d8466cf6ee3aa546188c2934fd2a5341765b13b3 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 16 Oct 2020 18:12:20 +0300 Subject: [PATCH 17/35] Small fix for python doc (#2696) --- .../ie_bridges/python/src/openvino/inference_engine/ie_api.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index cea04fc0138138..21cd5aaeeb4c36 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -244,7 +244,7 @@ cdef class IECore: versions[device].major = ver.apiVersion.major return versions - ## Reads a network from the Intermediate Representation (IR) and creates an `IENetwork`. + ## Reads a network from Intermediate Representation (IR) or ONNX formats and creates an `IENetwork`. # @param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR. # @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or # bytes with file content. From 595a52ae6782a67b0959d8a593e0dcc5e877d071 Mon Sep 17 00:00:00 2001 From: Jesus Espinoza Date: Fri, 16 Oct 2020 09:02:41 -0700 Subject: [PATCH 18/35] Updating broken link on getting started linux doc (#2507) Link to build instructions was broken, updated link to the correct location. --- get-started-linux.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/get-started-linux.md b/get-started-linux.md index 9bac4911198cc3..931d516cae2e0b 100644 --- a/get-started-linux.md +++ b/get-started-linux.md @@ -12,7 +12,7 @@ the OpenVINOâ„¢ Toolkit on Linux\*. With this guide, you will learn how to: ## Prerequisites 1. This guide assumes that you have already cloned the `openvino` repo and successfully built the Inference Engine and Samples using the - [build instructions](inference-engine/README.md). + [build instructions](build-instruction.md). 2. The original structure of the repository directories remains unchanged. > **NOTE**: Below, the directory to which the `openvino` repository is cloned is @@ -188,7 +188,7 @@ Now you are ready to run the Image Classification Sample Application. ## Run the Image Classification Sample Application The Inference Engine sample applications are automatically compiled when you -built the Inference Engine using the [build instructions](inference-engine/README.md). +built the Inference Engine using the [build instructions](build-instruction.md). The binary files are located in the `/inference-engine/bin/intel64/Release` directory. @@ -253,7 +253,7 @@ Throughput: 375.3339402 FPS ## Additional Resources * [OpenVINOâ„¢ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes) -* [Inference Engine build instructions](inference-engine/README.md) +* [Inference Engine build instructions](build-instruction.md) * [Introduction to Intel® Deep Learning Deployment Toolkit](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Introduction.html) * [Inference Engine Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide.html) * [Model Optimizer Developer Guide] @@ -264,4 +264,4 @@ Throughput: 375.3339402 FPS [OpenVINOâ„¢ pre-trained models]:https://github.com/opencv/open_model_zoo/tree/master/models/intel [prerequisites]:https://github.com/opencv/open_model_zoo/tree/master/tools/downloader#prerequisites [list of supported devices]:https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html -[different precision formats]:https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html#supported_model_formats \ No newline at end of file +[different precision formats]:https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html#supported_model_formats From f0a37743e1cdc00964e5577e0bdc4d996512febd Mon Sep 17 00:00:00 2001 From: Alexey Suhov Date: Fri, 16 Oct 2020 21:03:46 +0300 Subject: [PATCH 19/35] [install_dependencies.sh] install latest cmake if current version is lower 3.13 (#2695) * [install_dependencies.sh] install latest cmake if current version is lower 3.13 * add shellcheck for Ubuntu * install python 2.7 for Ubuntu --- build-instruction.md | 6 ++--- ...encies.sh => install_build_dependencies.sh | 23 +++++++++++-------- 2 files changed, 16 insertions(+), 13 deletions(-) rename install_dependencies.sh => install_build_dependencies.sh (89%) diff --git a/build-instruction.md b/build-instruction.md index 801c6f53fc4069..9d16305d98b202 100644 --- a/build-instruction.md +++ b/build-instruction.md @@ -69,13 +69,13 @@ The software was validated on: cd openvino git submodule update --init --recursive ``` -2. Install build dependencies using the `install_dependencies.sh` script in the +2. Install build dependencies using the `install_build_dependencies.sh` script in the project root folder. ```sh - chmod +x install_dependencies.sh + chmod +x install_build_dependencies.sh ``` ```sh - ./install_dependencies.sh + ./install_build_dependencies.sh ``` 3. By default, the build enables the Inference Engine GPU plugin to infer models on your Intel® Processor Graphics. This requires you to diff --git a/install_dependencies.sh b/install_build_dependencies.sh similarity index 89% rename from install_dependencies.sh rename to install_build_dependencies.sh index 8e9ea32b707b5c..64b69bf62feae6 100755 --- a/install_dependencies.sh +++ b/install_build_dependencies.sh @@ -32,7 +32,6 @@ if [ -f /etc/lsb-release ]; then sudo -E apt update sudo -E apt-get install -y \ build-essential \ - cmake \ curl \ wget \ libssl-dev \ @@ -46,6 +45,8 @@ if [ -f /etc/lsb-release ]; then automake \ libtool \ autoconf \ + shellcheck \ + python \ libcairo2-dev \ libpango1.0-dev \ libglib2.0-dev \ @@ -101,13 +102,6 @@ elif [ -f /etc/redhat-release ]; then sudo -E yum install -y rh-python36 source scl_source enable rh-python36 - wget https://cmake.org/files/v3.12/cmake-3.12.3.tar.gz - tar xf cmake-3.12.3.tar.gz - cd cmake-3.12.3 - ./configure - make -j16 - sudo -E make install - echo echo "FFmpeg is required for processing audio and video streams with OpenCV. Please select your preferred method for installing FFmpeg:" echo @@ -135,7 +129,6 @@ elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then sudo -E apt update sudo -E apt-get install -y \ build-essential \ - cmake \ curl \ wget \ libssl-dev \ @@ -166,4 +159,14 @@ elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then fi else echo "Unknown OS, please install build dependencies manually" -fi \ No newline at end of file +fi + +# cmake 3.13 or higher is required to build OpenVINO +current_cmake_version=$(cmake --version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\{0,4\}[0-9][^.]\).*/\1/p') +required_cmake_ver=3.13 +if [ ! "$(printf '%s\n' "$required_cmake_ver" "$current_cmake_version" | sort -V | head -n1)" = "$required_cmake_ver" ]; then + wget "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4.tar.gz" + tar xf cmake-3.18.4.tar.gz + (cd cmake-3.18.4 && ./bootstrap --parallel="$(nproc --all)" && make --jobs="$(nproc --all)" && sudo make install) + rm -rf cmake-3.18.4 cmake-3.18.4.tar.gz +fi From 2b5ed2e9ebb766851e639cfc425f5ba416fed144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= <4430709+postrational@users.noreply.github.com> Date: Sun, 18 Oct 2020 17:08:06 +0200 Subject: [PATCH 20/35] Tweaks for ONNX scoreboard (#2697) --- ngraph/python/requirements.txt | 2 +- ngraph/python/tests/test_onnx/__init__.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 ngraph/python/tests/test_onnx/__init__.py diff --git a/ngraph/python/requirements.txt b/ngraph/python/requirements.txt index badb7a7dd5175d..49afa45678c7ae 100644 --- a/ngraph/python/requirements.txt +++ b/ngraph/python/requirements.txt @@ -1,2 +1,2 @@ numpy -typing +typing; python_version < '3.6' diff --git a/ngraph/python/tests/test_onnx/__init__.py b/ngraph/python/tests/test_onnx/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 From cc2bfcf1d7b7d28fc8d1f867ce91b659edba643c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Karzy=C5=84ski?= <4430709+postrational@users.noreply.github.com> Date: Sun, 18 Oct 2020 17:12:25 +0200 Subject: [PATCH 21/35] Improve python_wheel CMake target (#2688) --- ngraph/python/CMakeLists.txt | 36 ++++++-------------- ngraph/python/build_wheel.py.in | 60 +++++++++++++++++++-------------- ngraph/python/build_wheel.sh.in | 54 ----------------------------- 3 files changed, 45 insertions(+), 105 deletions(-) delete mode 100644 ngraph/python/build_wheel.sh.in diff --git a/ngraph/python/CMakeLists.txt b/ngraph/python/CMakeLists.txt index 3b3f7c1303cb7c..85254c66a50573 100644 --- a/ngraph/python/CMakeLists.txt +++ b/ngraph/python/CMakeLists.txt @@ -23,37 +23,23 @@ include(ExternalProject) ExternalProject_Add( pybind11 GIT_REPOSITORY "https://github.com/pybind/pybind11.git" - GIT_TAG "v2.4.3" + GIT_TAG "v2.5.0" SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/pybind11" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" ) -if (WIN32) - set(BUILD_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/build_wheel.py.in") - set(BUILD_PY "${CMAKE_CURRENT_BINARY_DIR}/build_wheel.py") - configure_file(${BUILD_PY_IN} ${BUILD_PY} @ONLY) +set(BUILD_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/build_wheel.py.in") +set(BUILD_PY "${CMAKE_CURRENT_BINARY_DIR}/build_wheel.py") +configure_file(${BUILD_PY_IN} ${BUILD_PY} @ONLY) - add_custom_command( - DEPENDS pybind11 - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dist/ - POST_BUILD - WORKING_DIR ${CMAKE_CURRENT_BINARY_DIR} - COMMAND python ${BUILD_PY} - ) -else() - set(BUILD_SH_IN "${CMAKE_CURRENT_SOURCE_DIR}/build_wheel.sh.in") - set(BUILD_SH "${CMAKE_CURRENT_BINARY_DIR}/build_wheel.sh") - configure_file(${BUILD_SH_IN} ${BUILD_SH} @ONLY) - - add_custom_command( - DEPENDS pybind11 - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dist/ - POST_BUILD - WORKING_DIR ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_BUILD_TOOL} -C ../ DESTDIR=python/_install install && bash build_wheel.sh - ) -endif() +add_custom_command( + DEPENDS pybind11 + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dist/ + POST_BUILD + WORKING_DIR ${CMAKE_CURRENT_BINARY_DIR} + COMMAND python ${BUILD_PY} +) add_custom_target(python_wheel DEPENDS ngraph ${CMAKE_CURRENT_BINARY_DIR}/dist/) diff --git a/ngraph/python/build_wheel.py.in b/ngraph/python/build_wheel.py.in index 0cba66758ea656..974d4867c44538 100644 --- a/ngraph/python/build_wheel.py.in +++ b/ngraph/python/build_wheel.py.in @@ -17,57 +17,65 @@ import os import subprocess import sys +import venv -print('Building ngraph wheel for Python {}'.format(sys.version_info.major)) +print("Building ngraph wheel for Python {}".format(sys.version_info.major)) -PYBIND_HEADERS_PATH="@CMAKE_CURRENT_BINARY_DIR@/pybind11" -NGRAPH_CPP_BUILD_PATH="@CMAKE_INSTALL_PREFIX@/@NGRAPH_COMPONENT_PREFIX@" -NGRAPH_ONNX_IMPORT_ENABLE="@NGRAPH_ONNX_IMPORT_ENABLE@" -NGRAPH_VERSION="@NGRAPH_WHEEL_VERSION@" -PYTHON_API_SOURCE_DIR="@CMAKE_CURRENT_SOURCE_DIR@" -BUILD_DIR="@CMAKE_CURRENT_BINARY_DIR@" +PYBIND_HEADERS_PATH = "@CMAKE_CURRENT_BINARY_DIR@/pybind11" +NGRAPH_CPP_BUILD_PATH = "@CMAKE_INSTALL_PREFIX@/@NGRAPH_COMPONENT_PREFIX@" +NGRAPH_ONNX_IMPORT_ENABLE = "@NGRAPH_ONNX_IMPORT_ENABLE@" +NGRAPH_VERSION = "@NGRAPH_WHEEL_VERSION@" +PYTHON_API_SOURCE_DIR = "@CMAKE_CURRENT_SOURCE_DIR@" +BUILD_DIR = "@CMAKE_CURRENT_BINARY_DIR@" -BUILD_DEPS = ['setuptools', 'wheel'] +BUILD_DEPS = ["setuptools", "wheel", "pip"] try: - venv_dir = os.path.join(os.path.curdir, 'whl_build_venv') + venv_dir = os.path.join(os.path.curdir, "whl_build_venv") print("Creating a virtualenv to build the wheel in: ", os.path.abspath(venv_dir)) - subprocess.check_call(['virtualenv', venv_dir]) + venv.create(venv_dir, with_pip=True) - venv_activator = os.path.abspath(os.path.join(venv_dir, "Scripts", "activate.bat")) - print("Activating the virtualenv...") - os.startfile(venv_activator) + venv_python = ( + os.path.abspath(os.path.join(venv_dir, "Scripts", "python")) + if os.name == "nt" + else os.path.abspath(os.path.join(venv_dir, "bin", "python")) + ) print("Installing build dependencies...") - pip_install_cmd = ['pip', 'install', '-U'] + pip_install_cmd = [venv_python, "-m", "pip", "install", "-U"] pip_install_cmd.extend(BUILD_DEPS) subprocess.check_call(pip_install_cmd) - build_env_variables = {"PYBIND_HEADERS_PATH": PYBIND_HEADERS_PATH, - "NGRAPH_CPP_BUILD_PATH": NGRAPH_CPP_BUILD_PATH, - "NGRAPH_ONNX_IMPORT_ENABLE": NGRAPH_ONNX_IMPORT_ENABLE, - "NGRAPH_VERSION": NGRAPH_VERSION} + build_env_variables = { + "PYBIND_HEADERS_PATH": PYBIND_HEADERS_PATH, + "NGRAPH_CPP_BUILD_PATH": NGRAPH_CPP_BUILD_PATH, + "NGRAPH_ONNX_IMPORT_ENABLE": NGRAPH_ONNX_IMPORT_ENABLE, + "NGRAPH_VERSION": NGRAPH_VERSION, + } env = os.environ env.update(build_env_variables) print("Running setup.py bdist_wheel") - build_log = subprocess.Popen(['python', os.path.join(PYTHON_API_SOURCE_DIR, 'setup.py'), 'bdist_wheel'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True) + build_log = subprocess.Popen( + [venv_python, os.path.join(PYTHON_API_SOURCE_DIR, "setup.py"), "bdist_wheel"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, + ) for line in build_log.stdout: sys.stdout.write(line) print("Running setup.py sdist") - subprocess.check_call(['python', os.path.join(PYTHON_API_SOURCE_DIR, 'setup.py'), 'sdist']) + subprocess.check_call([venv_python, os.path.join(PYTHON_API_SOURCE_DIR, "setup.py"), "sdist"]) + + output_dir = os.path.join(PYTHON_API_SOURCE_DIR, "dist") + print("\n>>> NOTE: nGraph Python packages created in ", output_dir) + print("\n".join(os.listdir(output_dir))) - print("Deactivating the temporary build virtualenv") - venv_deactivator = os.path.abspath(os.path.join(venv_dir, "Scripts", "deactivate.bat")) - os.startfile(venv_deactivator) except subprocess.CalledProcessError as err: print("Could not complete the wheel building process") print("Command that failed: ", err.cmd) if err.stdout is not None: - print("Command std output: ", err.stdout.decode('utf-8')) + print("Command std output: ", err.stdout.decode("utf-8")) if err.stderr is not None: - print("Command err output: ", err.stderr.decode('utf-8')) + print("Command err output: ", err.stderr.decode("utf-8")) sys.exit(1) diff --git a/ngraph/python/build_wheel.sh.in b/ngraph/python/build_wheel.sh.in deleted file mode 100644 index 597ffc052c34fe..00000000000000 --- a/ngraph/python/build_wheel.sh.in +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -set -e - -export PYBIND_HEADERS_PATH=@CMAKE_CURRENT_BINARY_DIR@/pybind11 -export NGRAPH_CPP_BUILD_PATH=@CMAKE_CURRENT_BINARY_DIR@/_install/@CMAKE_INSTALL_PREFIX@/@NGRAPH_COMPONENT_PREFIX@ -export NGRAPH_ONNX_IMPORT_ENABLE=@NGRAPH_ONNX_IMPORT_ENABLE@ -export NGRAPH_VERSION=@NGRAPH_WHEEL_VERSION@ - -SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@ -BUILD_DIR=@CMAKE_CURRENT_BINARY_DIR@ - -! PYTHON2_DETECTED=$(($(python -c 'import sys; print(sys.version_info.major)' 2> /dev/null) == 2)) -! PYTHON3_DETECTED=$(($(python3 -c 'import sys; print(sys.version_info.major)' 2> /dev/null) == 3)) - -mkdir -p build - -if [ "${PYTHON2_DETECTED}" == 1 ]; then - echo "Building wheel for Python 2" - python --version - cd ${BUILD_DIR} - virtualenv -p "$(command -v python)" build/venv2 - source build/venv2/bin/activate - pip install --upgrade setuptools pip wheel - python ${SOURCE_DIR}/setup.py bdist_wheel - deactivate -fi - -if [ "${PYTHON3_DETECTED}" == 1 ]; then - echo "Building wheel for Python 3" - python3 --version - cd ${BUILD_DIR} - virtualenv -p "$(command -v python3)" build/venv3 - source build/venv3/bin/activate - pip install --upgrade setuptools pip wheel - python ${SOURCE_DIR}/setup.py bdist_wheel - python ${SOURCE_DIR}/setup.py sdist - deactivate -fi From cc569d2254044f5c37414ea53a6b1f6843ebd01b Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Sun, 18 Oct 2020 20:47:22 +0300 Subject: [PATCH 22/35] [IE CLDNN] Added HSigmoid operation (#2700) --- .../src/cldnn_engine/cldnn_program.cpp | 9 ++++ .../src/cldnn_engine/cldnn_program.h | 1 + .../single_layer_tests/activation.cpp | 3 +- .../include/single_layer_tests/activation.hpp | 1 + .../ngraph_functions/utils/ngraph_helpers.hpp | 3 +- .../tests/ngraph_functions/src/activation.cpp | 2 + .../thirdparty/clDNN/api/activation.hpp | 1 + .../kernel_selector/common/common_types.h | 1 + .../kernel_selector/core/common/jitter.cpp | 21 ++++++--- .../core/kernel_selector_common.cpp | 1 + .../clDNN/src/kernel_selector_helper.cpp | 2 + .../test_cases/activation_simple_gpu_test.cpp | 46 ++++++++++++++++++- 12 files changed, 82 insertions(+), 9 deletions(-) diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index cda4ab7af4c72d..28a25291d11758 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -830,6 +830,7 @@ Program::LayerType Program::LayerTypeFromStr(const std::string &str) { { "Ceiling" , Ceiling }, { "Erf" , Erf }, { "HardSigmoid" , HardSigmoid }, + { "HSigmoid", HSigmoid }, { "Log" , Log }, { "Neg" , Neg }, { "Reciprocal" , Reciprocal }, @@ -1399,6 +1400,7 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, InferenceEng case Ceiling: case Erf: case HardSigmoid: + case HSigmoid: case Log: case Neg: case Reciprocal: @@ -3078,6 +3080,8 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi activationType = Exp; } else if (activation_type == "not") { activationType = Not; + } else if (activation_type == "hsigmoid") { + activationType = HSigmoid; } else { THROW_CLDNN_EXCEPTION("Unsupported activation type (" + activation_type + ") in layer " + layer->name); @@ -3199,6 +3203,11 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi params.b = layer->GetParamAsFloat("beta", 0.5f); break; } + case HSigmoid: + { + func = cldnn::activation_func::hsigmoid; + break; + } case Log: { func = cldnn::activation_func::log; diff --git a/inference-engine/src/cldnn_engine/cldnn_program.h b/inference-engine/src/cldnn_engine/cldnn_program.h index 67a466cea77aa4..ed677552ac14b5 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.h +++ b/inference-engine/src/cldnn_engine/cldnn_program.h @@ -195,6 +195,7 @@ class Program { Ceiling, Erf, HardSigmoid, + HSigmoid, Log, Neg, Reciprocal, diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp index 6584d247427145..154a9986a60c81 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp @@ -44,7 +44,8 @@ const std::map>> activationTypes {Ceiling, {}}, {Mish, {}}, {HSwish, {}}, - {SoftPlus, {}} + {SoftPlus, {}}, + {HSigmoid, {}} }; std::map, std::vector>> basic = { diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp index 44b02479ee0122..bbf6e9f32b6f04 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp @@ -71,6 +71,7 @@ static std::map activationNames = {ngraph::helpers::ActivationTypes::HSwish, "HSwish"}, {ngraph::helpers::ActivationTypes::SoftPlus, "SoftPlus"}, {ngraph::helpers::ActivationTypes::Swish, "Swish"}, + {ngraph::helpers::ActivationTypes::HSigmoid, "HSigmoid"}, }; typedef std::tuple< diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index 32f767c95c8797..9a32f229c4b7cf 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -111,7 +111,8 @@ enum ActivationTypes { Mish, HSwish, SoftPlus, - Swish + Swish, + HSigmoid }; enum EltwiseTypes { diff --git a/inference-engine/tests/ngraph_functions/src/activation.cpp b/inference-engine/tests/ngraph_functions/src/activation.cpp index c09df345184631..cbccfb670de187 100644 --- a/inference-engine/tests/ngraph_functions/src/activation.cpp +++ b/inference-engine/tests/ngraph_functions/src/activation.cpp @@ -102,6 +102,8 @@ std::shared_ptr makeActivation(const ngraph::Output &in, auto beta = std::make_shared(type, inShape, constantsValue[0]); return std::make_shared(in, beta); } + case ngraph::helpers::ActivationTypes::HSigmoid: + return std::make_shared(in); default: throw std::runtime_error("Can't create layer for this activation type"); } diff --git a/inference-engine/thirdparty/clDNN/api/activation.hpp b/inference-engine/thirdparty/clDNN/api/activation.hpp index 9b892d6030a1b7..ce5d8e8db7ed83 100644 --- a/inference-engine/thirdparty/clDNN/api/activation.hpp +++ b/inference-engine/thirdparty/clDNN/api/activation.hpp @@ -63,6 +63,7 @@ enum class activation_func { reciprocal, // (1/val) erf, // Gauss error function hard_sigmoid, // max(0, min(1, a * val + b)) (a,b are additional params) + hsigmoid, // min(max(val + 3, 0), 6) / 6 selu, // for val <= 0: b * (a * e^val - a); for val > 0: b * val (a,b are additional params) sign, // val > 0: 1; val < 0: -1; val == 0: 0 softplus, // ln(exp(val) + 1) diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/common/common_types.h b/inference-engine/thirdparty/clDNN/kernel_selector/common/common_types.h index 85a27934e16fd7..28283742fed0d2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/common/common_types.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/common/common_types.h @@ -145,6 +145,7 @@ enum class ActivationFunction { POW, ERF, HARD_SIGMOID, + HSIGMOID, RECIPROCAL, SELU, SIGN, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp index 52404348df4197..d5664401a290e6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp @@ -584,7 +584,7 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantmacroName = MacroName(name, macroNameArgs); this->calcFunction = FuncBody(name, funcArgs, body); std::string osv = "16", isv = "16"; - if (l == WeightsLayout::g_os_zyx_is_osv16_isv16) { + if (l == WeightsLayout::g_os_zyx_is_osv16_isv16) { osv = "16"; isv = "16"; } else if (l == WeightsLayout::g_os_zyx_is_osv16_isv32) { osv = "16"; isv = "32"; @@ -741,7 +741,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { if (is_grouped_4d_layout) { index_macro_name = _name + "_GET_INDEX(g, o, i, y, x)"; auto layout_str = toString(layout); - if (layout == WeightsLayout::goiyx) + if (layout == WeightsLayout::goiyx) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x)"; else if (layout == WeightsLayout::g_os_is_yx_isv16_osv16) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x, 16)"; @@ -765,7 +765,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { if (is_grouped_5d_layout) { index_macro_name = _name + "_GET_INDEX(g, o, i, z, y, x)"; auto layout_str = toString(layout); - if (layout == WeightsLayout::goizyx) + if (layout == WeightsLayout::goizyx) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x)"; else if (layout == WeightsLayout::g_os_is_zyx_isv16_osv16) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x, 16)"; @@ -787,7 +787,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { if (is_common_4d_layout) { index_macro_name = _name + "_GET_INDEX(o, i, y, x)"; auto layout_str = toString(layout); - if (layout == WeightsLayout::oiyx) + if (layout == WeightsLayout::oiyx) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x)"; else if (layout == WeightsLayout::os_is_yx_isv16_osv16) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x, 16)"; @@ -814,7 +814,7 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { if (is_common_5d_layout) { index_macro_name = _name + "_GET_INDEX(o, i, z, y, x)"; auto layout_str = toString(layout); - if (layout == WeightsLayout::oizyx) + if (layout == WeightsLayout::oizyx) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x)"; else if (layout == WeightsLayout::os_is_zyx_isv16_osv16) index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x, 16)"; @@ -1022,6 +1022,15 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function, max_func(zero, min_func(one, (JitTerm)((alpha * input + beta).str()))).str())); break; } + case ActivationFunction::HSIGMOID: { + std::string type_suffix = out_dt == Datatype::F32 ? "f" : "h"; + const JitTerm three("3." + type_suffix); + const JitTerm six("6." + type_suffix); + jitConstants.AddConstant(MakeJitConstant( + macro_def, + (min_func(max_func(zero, input + three), six) / six).str())); + break; + } case ActivationFunction::SIGN: jitConstants.AddConstant(MakeJitConstant( macro_def, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_selector_common.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_selector_common.cpp index 95bac9711c2899..c2a4998fc1866c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_selector_common.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_selector_common.cpp @@ -76,6 +76,7 @@ std::string toString(ActivationFunction activation) { case ActivationFunction::NEGATIVE: method = "NEGATIVE"; break; case ActivationFunction::ERF: method = "ERF"; break; case ActivationFunction::HARD_SIGMOID: method = "HARD_SIGMOID"; break; + case ActivationFunction::HSIGMOID: method = "HSIGMOID"; break; case ActivationFunction::RECIPROCAL: method = "RECIPROCAL"; break; case ActivationFunction::SELU: method = "SELU"; break; case ActivationFunction::SIGN: method = "SIGN"; break; diff --git a/inference-engine/thirdparty/clDNN/src/kernel_selector_helper.cpp b/inference-engine/thirdparty/clDNN/src/kernel_selector_helper.cpp index c0e0683833b0f7..0d6f168eaaa39d 100644 --- a/inference-engine/thirdparty/clDNN/src/kernel_selector_helper.cpp +++ b/inference-engine/thirdparty/clDNN/src/kernel_selector_helper.cpp @@ -693,6 +693,8 @@ kernel_selector::activation_function get_kernel_selector_activation_param(activa return kernel_selector::activation_function::SOFTSIGN; case cldnn::activation_func::hard_sigmoid: return kernel_selector::activation_function::HARD_SIGMOID; + case cldnn::activation_func::hsigmoid: + return kernel_selector::activation_function::HSIGMOID; case cldnn::activation_func::swish: return kernel_selector::activation_function::SWISH; case cldnn::activation_func::hswish: diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/activation_simple_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/activation_simple_gpu_test.cpp index 8c801b5305acc9..5f99ac86474b13 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/activation_simple_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/activation_simple_gpu_test.cpp @@ -735,6 +735,46 @@ TEST(activation_f16_fw_gpu, basic_yxfb_hswish) { } } +TEST(activation_f16_fw_gpu, basic_yxfb_hsigmoid) { + const auto& engine = get_test_engine(); + + auto input = memory::allocate(engine, { data_types::f16, format::yxfb, { 1, 2, 5, 2 } }); + set_values(input, + { FLOAT16(0.0f), FLOAT16(-2.0f), FLOAT16(-3.0f), FLOAT16(4.0f), FLOAT16(5.0f), + FLOAT16(2.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(-6.0f), + FLOAT16(3.0f), FLOAT16(-3.0f), FLOAT16(3.0f), FLOAT16(5.0f), FLOAT16(1.0f), + FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(-1.0f), FLOAT16(1.0f) }); + + topology topology( + input_layout("input", input.get_layout()), + activation("hsigmoid", "input", activation_func::hsigmoid)); + network network(engine, topology); + network.set_input_data("input", input); + auto outputs = network.execute(); + EXPECT_EQ(outputs.size(), size_t(1)); + EXPECT_EQ(outputs.begin()->first, "hsigmoid"); + + auto output_memory = outputs.at("hsigmoid").get_memory(); + auto output_layout = output_memory.get_layout(); + auto output_ptr = output_memory.pointer(); + auto input_ptr = input.pointer(); + + int y_size = output_layout.size.spatial[1]; + int x_size = output_layout.size.spatial[0]; + int f_size = output_layout.size.feature[0]; + int b_size = output_layout.size.batch[0]; + EXPECT_EQ(output_layout.format, format::yxfb); + EXPECT_EQ(y_size, 2); + EXPECT_EQ(x_size, 5); + EXPECT_EQ(f_size, 2); + EXPECT_EQ(b_size, 1); + + for (size_t i = 0; i < output_layout.get_linear_size(); ++i) { + EXPECT_NEAR((FLOAT16)(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f), + output_ptr[i], 1e-3f); + } +} + TEST(activation_f32_fw_gpu, basic_yxfb_all_functions) { // Input: @@ -782,7 +822,8 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions) activation_func::swish, activation_func::hswish, activation_func::mish, - activation_func::gelu + activation_func::gelu, + activation_func::hsigmoid }; activation_additional_params params = { 0.5f, 2.5f }; @@ -910,6 +951,9 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions) EXPECT_NEAR(0.5f * (float)input_ptr[i] * (1.f + std::erf((float)(input_ptr[i]) / std::sqrt(2.0f))), output_ptr[i], 1e-5f); break; + case activation_func::hsigmoid: + EXPECT_FLOAT_EQ(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f, output_ptr[i]); + break; default: break; } From 84b5fc51dcb63f3f83e8ed8954d32bc979da4f71 Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Mon, 19 Oct 2020 06:53:46 +0300 Subject: [PATCH 23/35] [opset5] ngraph implementation of Loop op (#2583) * Loop op ngraph implementation, update IE IR Reader and ngraph to cnn converter * refactoring SubGraphOp class * type prop unit tests * ngraph code style * update comment * single layer tests for Loop operation * fix file name * Add SpecialBodyPorts attribute in Loop op, update single layer tests * add several new tests cases, strict checks in Loop impl, temporary disable single layer tests * ngraph codestyle, refactoring, clone_new_args test * resolve review remarks * fix build * fix tests * add a new constructor of Loop op, resolve review remarks --- .../src/convert_function_to_cnn_network.cpp | 2 + .../src/ie_cnn_layer_builder_ngraph.cpp | 26 +- .../src/readers/ir_reader/ie_ir_parser.cpp | 38 +- .../src/readers/ir_reader/ie_ir_parser.hpp | 5 + .../single_layer_tests/loop.cpp | 34 + .../skip_tests_config.cpp | 5 +- .../include/single_layer_tests/loop.hpp | 40 + .../shared/src/single_layer_tests/loop.cpp | 159 ++++ ngraph/core/include/ngraph/op/loop.hpp | 105 +++ ngraph/core/include/ngraph/op/lstm_cell.hpp | 2 +- .../include/ngraph/op/tensor_iterator.hpp | 366 +-------- .../include/ngraph/op/util/sub_graph_base.hpp | 376 ++++++++- ngraph/core/include/ngraph/ops.hpp | 1 + .../core/include/ngraph/opsets/opset5_tbl.hpp | 1 + ngraph/core/src/graph_util.cpp | 5 +- ngraph/core/src/op/loop.cpp | 350 ++++++++ ngraph/core/src/op/tensor_iterator.cpp | 342 -------- ngraph/core/src/op/util/sub_graph_base.cpp | 332 +++++++- ngraph/test/CMakeLists.txt | 2 + ngraph/test/copy.cpp | 68 ++ ngraph/test/type_prop/loop.cpp | 753 ++++++++++++++++++ ngraph/test/type_prop/ti.cpp | 204 +++++ 22 files changed, 2488 insertions(+), 728 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/single_layer_tests/loop.cpp create mode 100644 ngraph/core/include/ngraph/op/loop.hpp create mode 100644 ngraph/core/src/op/loop.cpp create mode 100644 ngraph/test/type_prop/loop.cpp create mode 100644 ngraph/test/type_prop/ti.cpp diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index 50a296fafe1482..8a992a40f4962b 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -43,6 +43,7 @@ #include "caseless.hpp" #include #include +#include #include "transformations/utils/utils.hpp" #include "transformations/rt_info/fused_names_attribute.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" @@ -809,6 +810,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(), std::make_shared>(), std::make_shared>(), + std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index 3f459e844e0f02..c9adf5389e176a 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "legacy/graph_transformer.h" @@ -114,8 +115,7 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::share return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { +CNNLayer::Ptr createSubGraphLayer(const std::shared_ptr& layer) { auto find_input_idx = [](const CNNLayerPtr& where, const DataPtr& what) { auto it = std::find_if(where->insData.begin(), where->insData.end(), [&](const DataWeakPtr& wk_ptr) { auto layer_data = wk_ptr.lock(); @@ -129,7 +129,7 @@ CNNLayer::Ptr NodeConverter::createLayer(const std:: return it - where->insData.begin(); }; - auto tensor_iterator = ngraph::as_type_ptr(layer); + auto tensor_iterator = std::dynamic_pointer_cast(layer); if (!tensor_iterator) { THROW_IE_EXCEPTION << "Cannot cast layer to TensorIterator."; } @@ -142,8 +142,8 @@ CNNLayer::Ptr NodeConverter::createLayer(const std:: std::map out_info_map; // inputs/outputs of TensorIterator (ngraph representation) - auto parameters = tensor_iterator->get_body()->get_parameters(); - auto results = tensor_iterator->get_body()->get_results(); + auto parameters = tensor_iterator->get_function()->get_parameters(); + auto results = tensor_iterator->get_function()->get_results(); // Convert body (ngraph representation) to CNNNetwork. // This network will contain nodes of type = "Input" and data nodes with wrong names. @@ -155,7 +155,7 @@ CNNLayer::Ptr NodeConverter::createLayer(const std:: // This map will save information about data nodes std::map> layer_name_to_tensor_desc; { - CNNNetwork body_net(tensor_iterator->get_body()); + CNNNetwork body_net(tensor_iterator->get_function()); CNNNetwork net(InferenceEngine::details::convertFunctionToICNNNetwork(body_net.getFunction(), body_net)); // Paranoid check for cycles bool res = CNNNetForestDFS( @@ -356,6 +356,20 @@ CNNLayer::Ptr NodeConverter::createLayer(const std:: return res; } +template<> +CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { + auto res = createSubGraphLayer(layer); + res->type = "TensorIterator"; + return res; +} + +template<> +CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { + auto res = createSubGraphLayer(layer); + res->type = "Loop"; + return res; +} + template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Const", diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 8df430eb0d4ba6..d07a3e9250ddc4 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -477,6 +478,7 @@ std::shared_ptr V10Parser::createNode(const std::vector>("Tile"), std::make_shared>("TopK"), std::make_shared>("TensorIterator"), + std::make_shared>("Loop"), std::make_shared>("Transpose"), std::make_shared>("Unsqueeze"), std::make_shared>("LogicalAnd"), @@ -662,12 +664,12 @@ std::shared_ptr V10Parser::LayerCreator -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, - const GenericLayerParams& layerParsePrms) { - auto tensor_iterator = std::make_shared(); +// SubGraph layer +std::shared_ptr +V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &inputs, const pugi::xml_node &node, + std::istream &binStream, + const V10Parser::GenericLayerParams &layerParsePrms, + std::shared_ptr tensor_iterator) { tensor_iterator->set_friendly_name(GetStrAttr(node, "name")); auto body_node = node.child("body"); @@ -695,7 +697,7 @@ std::shared_ptr V10Parser::LayerCreator(result_nodes, parameter_nodes); - tensor_iterator->set_body(body); + tensor_iterator->set_function(body); // Parse PortMap: inputs std::map input_map; @@ -795,7 +797,8 @@ std::shared_ptr V10Parser::LayerCreatorget_concatenated_slices(*body_result, start, stride, part_size, end, axis); if (!is_sliced_input_exists) { - tensor_iterator->set_num_iterations((std::abs(end - start)) / part_size); + if (auto ti = std::dynamic_pointer_cast(tensor_iterator)) + ti->set_num_iterations((std::abs(end - start)) / part_size); } } else { // otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration. @@ -807,6 +810,25 @@ std::shared_ptr V10Parser::LayerCreator +std::shared_ptr V10Parser::LayerCreator::createLayer( + const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, + const GenericLayerParams& layerParsePrms) { + auto ti = std::make_shared(); + return fillSubGraphLayer(inputs, node, binStream, layerParsePrms, ti); + } + +// Loop layer +template <> +std::shared_ptr V10Parser::LayerCreator::createLayer( + const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, + const GenericLayerParams& layerParsePrms) { + auto loop = std::make_shared(); + return fillSubGraphLayer(inputs, node, binStream, layerParsePrms, loop); +} + // PriorBoxClustered layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp index 0d270eed7a2aec..b792d6b35d7a0a 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp @@ -6,6 +6,7 @@ #ifdef IR_READER_V10 # include +# include # include # include #endif // IR_READER_V10 @@ -102,6 +103,10 @@ class V10Parser : public IParser { std::string type; protected: + static std::shared_ptr fillSubGraphLayer(const ngraph::OutputVector& inputs, const pugi::xml_node& node, + std::istream& binStream, + const GenericLayerParams& layerParsePrms, + std::shared_ptr sub_graph_node); explicit LayerBaseCreator(const std::string& type): type(type) {} std::string getType() { return type; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp new file mode 100644 index 00000000000000..1b1fe9d8285a94 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "single_layer_tests/loop.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + // without clip values increase rapidly, so use only seq_lenghts = 2 + std::vector execute_first_iteration{true}; + std::vector is_body_condition_const{true, false}; + std::vector body_condition{true, false}; // works only if is_body_condition_const == true + std::vector trip_count{1, 10, -1}; // -1 means infinity + std::vector, LOOP_IN_TYPE>>> inputs = { + {{{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::MERGED}}, + }; + std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; + + INSTANTIATE_TEST_CASE_P(smoke_LoopCommonZeroClip, LoopTest, + ::testing::Combine( + ::testing::ValuesIn(execute_first_iteration), + ::testing::ValuesIn(is_body_condition_const), + ::testing::ValuesIn(body_condition), + ::testing::ValuesIn(trip_count), + ::testing::ValuesIn(inputs), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + LoopTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index e8302db43893e7..582ebe6f0408a4 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -53,6 +53,9 @@ std::vector disabledTestPatterns() { R"(.*ReverseSequenceLayerTest.*netPRC=(I8|U8).*)", // TODO: Issue: 38841 R"(.*TopKLayerTest.*k=10.*mode=min.*sort=index.*)", - R"(.*TopKLayerTest.*k=5.*sort=(none|index).*)" + R"(.*TopKLayerTest.*k=5.*sort=(none|index).*)", + + // TODO: not supported yet, ticket 37690 + R"(.*Loop.*)" }; } diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp new file mode 100644 index 00000000000000..bf3b005fdec30d --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { +enum LOOP_IN_TYPE { + INVARIANT, + MERGED +}; + +using LoopParams = typename std::tuple< + bool, // ExecuteFirstIteration + bool, // BodyCondition is a constant? + bool, // BodyCondition value, if it is a Const + int64_t, // TripCount, -1 means infinity + std::vector, LOOP_IN_TYPE>>, // inputs + InferenceEngine::Precision, // Network precision + std::string>; // Device name + +class LoopTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/loop.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/loop.cpp new file mode 100644 index 00000000000000..b1043a09ca654b --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/loop.cpp @@ -0,0 +1,159 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include + +#include "ie_core.hpp" + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +#include "single_layer_tests/loop.hpp" + +namespace LayerTestsDefinitions { + + std::string LoopTest::getTestCaseName(const testing::TestParamInfo &obj) { + bool execute_first_iteration; + bool is_body_condition_const; + bool body_condition; // works only if is_body_condition_const == + int64_t trip_count; + std::vector, LOOP_IN_TYPE>> inputs; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, inputs, netPrecision, + targetDevice) = obj.param; + + std::vector> inputs_separate; + std::vector types_separate; + for (auto &el : inputs) { + inputs_separate.push_back(el.first); + types_separate.push_back(el.second); + } + std::ostringstream result; + result << "execute_first_iteration" << execute_first_iteration << "_"; + result << "is_body_condition_const=" << is_body_condition_const << "_"; + result << "body_condition=" << body_condition << "_"; + result << "trip_count=" << trip_count << "_"; + result << "IS=" << CommonTestUtils::vec2str(inputs_separate) << "_"; + result << "types=" << CommonTestUtils::vec2str(types_separate) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + return result.str(); + } + + void LoopTest::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + SetRefMode(LayerTestsUtils::IE); + bool execute_first_iteration; + bool is_body_condition_const; + bool body_condition; // works only if is_body_condition_const == + int64_t trip_count; + std::vector, LOOP_IN_TYPE>> inputs; + InferenceEngine::Precision netPrecision; + std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, inputs, netPrecision, + targetDevice) = this->GetParam(); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + // That which we iterate over + std::vector> inputs_separate; + std::vector types_separate; + for (auto &el : inputs) { + inputs_separate.push_back(el.first); + types_separate.push_back(el.second); + } + // Example: + /* auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto Y = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto M = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10});*/ + auto params = ngraph::builder::makeParams(ngPrc, inputs_separate); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + const std::vector body_params_shapes(inputs_separate.size(), ngraph::PartialShape::dynamic()); + auto current_iteration = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + + //Example: +/* auto Xi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto Yi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto M_body = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic());*/ + + ngraph::ParameterVector body_params; + for (const auto &pshape : body_params_shapes) { + auto paramNode = std::make_shared(ngPrc, pshape); + body_params.push_back(paramNode); + } + + std::shared_ptr body_condition_const; + if (is_body_condition_const) { + if (body_condition) { + body_condition_const = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + } else { + body_condition_const = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, false); + } + } + + auto trip_count_const = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + + std::shared_ptr exec_condition; + if (execute_first_iteration) { + exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + } else { + exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, false); + } + + // Body + std::shared_ptr Zo = body_params[0]; + for (int i = 1; i < body_params.size(); ++i) { + Zo = body_params[i] + Zo; + } + + // body_params.insert(body_params.begin(), current_iteration); + auto body = std::make_shared(ngraph::OutputVector{body_condition_const, Zo}, + body_params); + + auto loop = std::make_shared(trip_count_const, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + for (int i = 0; i < body_params.size(); ++i) { + if (types_separate[i] == LOOP_IN_TYPE::INVARIANT) { + loop->set_invariant_input(body_params[i], params[i]); + } else if (types_separate[i] == LOOP_IN_TYPE::MERGED) { + // todo: support several merged inputs + // now supported only one in this sample + loop->set_merged_input(body_params[i], params[i], Zo); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition_const, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); + } + + + TEST_P(LoopTest, CompareWithRefs) { + Run(); + }; +} // namespace LayerTestsDefinitions diff --git a/ngraph/core/include/ngraph/op/loop.hpp b/ngraph/core/include/ngraph/op/loop.hpp new file mode 100644 index 00000000000000..cc13b48a0fe0b3 --- /dev/null +++ b/ngraph/core/include/ngraph/op/loop.hpp @@ -0,0 +1,105 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/factory_adapter.hpp" +#include "ngraph/function.hpp" +#include "ngraph/op/constant.hpp" +#include "ngraph/op/parameter.hpp" +#include "ngraph/op/tensor_iterator.hpp" +#include "ngraph/op/util/sub_graph_base.hpp" + +namespace ngraph +{ + namespace op + { + namespace v5 + { + /// \brief Iterate a body over tensors, accumulating into tensors. + class NGRAPH_API Loop : public op::util::SubGraphOp + { + public: + /// \brief Allows to define the purpose of inputs/outputs in the body + struct SpecialBodyPorts + { + SpecialBodyPorts() = default; + SpecialBodyPorts(int64_t in_current_iteration_input_idx, + int64_t in_body_condition_output_idx) + : current_iteration_input_idx(in_current_iteration_input_idx) + , body_condition_output_idx(in_body_condition_output_idx) + { + } + // -1 means the input is not provided, this input is optional + int64_t current_iteration_input_idx = -1; + // -1 means the output is not provided, + // this output is required, throw an exception if not provided + int64_t body_condition_output_idx = -1; + }; + + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs a Loop operation. + Loop() = default; + + /// \brief Constructs a Loop operation. + /// + /// \param trip_count Node specifies the maximum number of iterations. + /// \param execution_condition Node determines whether to execute the first + /// iteration or not. + Loop(const Output& trip_count, const Output& execution_condition); + + int64_t get_num_iterations() const { return m_num_iterations; } + void set_sliced_input(const std::shared_ptr& parameter, + const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) override + { + NGRAPH_CHECK(false, + "Incorrect type of input. Implicit slicing is not supported in " + "Loop operation."); + } + + Output get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) override; + + void set_special_body_ports(const SpecialBodyPorts& special_body_ports) + { + m_special_body_ports = special_body_ports; + } + + SpecialBodyPorts get_special_body_ports() const { return m_special_body_ports; } + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + private: + SpecialBodyPorts m_special_body_ports; + int64_t m_num_iterations = -1; // -1 means infinity + }; + } + } +} diff --git a/ngraph/core/include/ngraph/op/lstm_cell.hpp b/ngraph/core/include/ngraph/op/lstm_cell.hpp index c830cae247fa7c..9b6885d207ca5a 100644 --- a/ngraph/core/include/ngraph/op/lstm_cell.hpp +++ b/ngraph/core/include/ngraph/op/lstm_cell.hpp @@ -293,7 +293,7 @@ namespace ngraph class NGRAPH_API LSTMCell : public util::RNNCellBase { public: - static constexpr NodeTypeInfo type_info{"LSTMCell", 1}; + static constexpr NodeTypeInfo type_info{"LSTMCell", 4}; const NodeTypeInfo& get_type_info() const override { return type_info; } LSTMCell(); /// diff --git a/ngraph/core/include/ngraph/op/tensor_iterator.hpp b/ngraph/core/include/ngraph/op/tensor_iterator.hpp index c59c6314acc00f..ab3579e98fb635 100644 --- a/ngraph/core/include/ngraph/op/tensor_iterator.hpp +++ b/ngraph/core/include/ngraph/op/tensor_iterator.hpp @@ -18,7 +18,6 @@ #include -#include "ngraph/factory_adapter.hpp" #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/sub_graph_base.hpp" @@ -36,273 +35,9 @@ namespace ngraph static constexpr NodeTypeInfo type_info{"TensorIterator", 0}; const NodeTypeInfo& get_type_info() const override { return type_info; } bool visit_attributes(AttributeVisitor& visitor) override; - // Forward declarations - class SliceInputDescription; - class MergedInputDescription; - class InvariantInputDescription; TensorIterator() = default; - TensorIterator(const OutputVector& values); - - /// \brief Describes a connection between a TensorIterator input and the body. - class InputDescription - { - protected: - /// - /// \brief Constructs a new instance. - /// - /// \param input_index Position of the TensorIterator input - /// \param body_parameter_index Body parameter to receive input - /// - InputDescription(uint64_t input_index, uint64_t body_parameter_index); - InputDescription() = default; - - public: - using type_info_t = DiscreteTypeInfo; - virtual ~InputDescription() {} - virtual std::shared_ptr copy() const = 0; - - virtual const type_info_t& get_type_info() const = 0; - virtual bool visit_attributes(AttributeVisitor& visitor); - - uint64_t m_input_index{0}; - uint64_t m_body_parameter_index{0}; - }; - - /// - /// \brief Describes a body input formed from slices of an input to - /// TensorIterator. - /// - class NGRAPH_API SliceInputDescription : public InputDescription - { - public: - static constexpr type_info_t type_info{"SliceInputDescription", 0}; - const type_info_t& get_type_info() const override { return type_info; } - /// - /// \brief Constructs a new instance. - /// - /// \param input_index Position of the TensorIterator input - /// \param body_parameter_index Body parameter position to receive input - /// \param start First index for slices - /// \param stride Step amount for slices - /// \param part_size Width of slices - /// \param end Last index for slices - /// \param axis Axis being sliced - /// - SliceInputDescription(uint64_t input_index, - uint64_t body_parameter_index, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis); - SliceInputDescription() = default; - std::shared_ptr copy() const override; - bool visit_attributes(AttributeVisitor& visitor) override; - int64_t m_start{0}; - int64_t m_stride{0}; - int64_t m_part_size{0}; - int64_t m_end{0}; - int64_t m_axis{0}; - }; - - /// - /// \brief Describes a body input initialized from a TensorIterator input on - /// the first iteration, and then a body output thereafter. - /// - class NGRAPH_API MergedInputDescription : public InputDescription - { - public: - static constexpr type_info_t type_info{"MergedInputDescription", 0}; - const type_info_t& get_type_info() const override { return type_info; } - /// - /// \brief Constructs a new instance. - /// - /// \param input_index Position of the TensorIterator input - /// supplying a value to body_parameter for - /// the initial iteration. - /// \param body_parameter_index Body parameter position to receive input. - /// \param body_value_index Body value to supply body_parameter for - /// successive - /// iterations. - /// - MergedInputDescription(uint64_t input_index, - uint64_t body_parameter_index, - uint64_t body_value_index); - MergedInputDescription() = default; - std::shared_ptr copy() const override; - bool visit_attributes(AttributeVisitor& visitor) override; - uint64_t m_body_value_index{0}; - }; - - class NGRAPH_API InvariantInputDescription : public InputDescription - { - public: - static constexpr type_info_t type_info{"InvariantInputDescription", 0}; - const type_info_t& get_type_info() const override { return type_info; } - InvariantInputDescription(uint64_t input_index, uint64_t body_parameter_index); - InvariantInputDescription() = default; - std::shared_ptr copy() const override; - bool visit_attributes(AttributeVisitor& visitor) override; - }; - - // Forward declarations - class ConcatOutputDescription; - class BodyOutputDescription; - - /// \brief Describes how a TensorIterator output is produced from the body. - class OutputDescription - { - protected: - /// - /// \brief Constructs a new instance. - /// - /// \param body_value_index A body value that produces the output - /// \param output_index The TensorIterator output index - /// - OutputDescription(uint64_t body_value_index, uint64_t output_index); - OutputDescription() = default; - - public: - using type_info_t = DiscreteTypeInfo; - virtual ~OutputDescription() {} - virtual std::shared_ptr copy() const = 0; - virtual bool visit_attributes(AttributeVisitor& visitor); - virtual const type_info_t& get_type_info() const = 0; - - uint64_t m_body_value_index{0}; - uint64_t m_output_index{0}; - }; - - /// \brief Produces an output by concatenating an output from each iteration - class NGRAPH_API ConcatOutputDescription : public OutputDescription - { - public: - static constexpr type_info_t type_info{"ConcatOutputDescription", 0}; - const type_info_t& get_type_info() const override { return type_info; } - /// - /// \brief Constructs a new instance. - /// - /// \param body_value_index A body value that produces the output - /// \param output_index The TensorIterator output index - /// \param start First index for slices - /// \param stride Step amount for slices - /// \param part_size Width of slices - /// \param end Last index for slices - /// \param axis Axis being sliced - /// - ConcatOutputDescription(uint64_t body_value_index, - uint64_t output_index, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis); - ConcatOutputDescription() = default; - - virtual std::shared_ptr copy() const override; - bool visit_attributes(AttributeVisitor& visitor) override; - int64_t m_start{0}; - int64_t m_stride{0}; - int64_t m_part_size{0}; - int64_t m_end{0}; - int64_t m_axis{0}; - }; - - /// \brief Produces an output from a specific iteration - class NGRAPH_API BodyOutputDescription : public OutputDescription - { - public: - static constexpr type_info_t type_info{"BodyOutputDescription", 0}; - const type_info_t& get_type_info() const override { return type_info; } - /// - /// \brief Constructs a new instance. - /// - /// \param body_value_index A body value that produces the output - /// \param output_index The TensorIterator output index - /// \param iteration which iteration (typically -1, final) will - /// supply the value - /// - BodyOutputDescription(uint64_t body_value_index, - uint64_t output_index, - int64_t iteration); - BodyOutputDescription() = default; - std::shared_ptr copy() const override; - bool visit_attributes(AttributeVisitor& visitor) override; - int64_t m_iteration{0}; - }; - - /// - /// \brief Indicate that a body parameter comes from slices of a value - /// - /// \param parameter The parameter to receive the slices - /// \param value The value to be sliced. This will be added as an input to - /// TensorIterator. - /// \param start First index on axis of the slicing - /// \param stride Stepping of the slice - /// \param part_size Size of the slice on axis - /// \param end The last index on axis of the slicing - /// \param axis The axis to slice along - /// - void set_sliced_input(const std::shared_ptr& parameter, - const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis); - /// - /// \brief Indicates that a body parameter has an initial value in the first - /// iteration and computed value thereafter - /// - /// \param[in] body_parameter The body parameter - /// \param initial_value Value for the parameter in first iteration. This - /// will be added as an input to TensorIterator. - /// \param successive_value Value for the parameter in successive iterations. - /// The value is what is active in the most recent - /// completed iteration. - /// - void set_merged_input(const std::shared_ptr& body_parameter, - const Output& initial_value, - const Output& successive_value); - /// - /// \brief Indicates that a body parameter has an invariant value during - /// iteration that may depend on values computed outside of the - /// iteration. - /// - /// \param body_parameter The body parameter - /// \param value The value supplied as an input to the block - /// - void set_invariant_input(const std::shared_ptr& body_parameter, - const Output& value); - /// - /// \brief Gets a value for a particular iteration point - /// - /// \param body_value The value - /// \param iteration The iteration that supplies the value. Negative values - /// are from the last iteration. - /// - /// \return The iterator value. - /// - Output get_iter_value(const Output& body_value, int64_t iteration); - /// - /// \brief Concatenates slices from all iterations - /// - /// \param value The value supplying slice values from each iteration. - /// \param start First index on axis of the slicing - /// \param stride Stepping of the slice - /// \param part_size Size of the slice on axis - /// \param end The last index on axis of the slicing - /// \param axis The axis to slice along - /// - /// \return The concatenated slices. - /// - Output get_concatenated_slices(const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis); + explicit TensorIterator(const OutputVector& values); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; @@ -310,32 +45,7 @@ namespace ngraph std::shared_ptr get_body() const { return m_body; } /// \param body set the body of the iteration void set_body(const std::shared_ptr& body) { m_body = body; } - /// \return a reference to the input descriptions. - const std::vector>& get_input_descriptions() const - { - return m_input_descriptions; - } - /// \return a reference to the input descriptions. Can add input descriptions - /// before - /// validation. - std::vector>& get_input_descriptions() - { - return m_input_descriptions; - } - /// \return a reference to the output descriptions. - const std::vector>& - get_output_descriptions() const - { - return m_output_descriptions; - } - /// \return a reference to the output descriptions. Can add output descriptions - /// before - /// validation. - std::vector>& get_output_descriptions() - { - return m_output_descriptions; - } - virtual void validate_and_infer_types() override; + void validate_and_infer_types() override; void revalidate_and_infer_types_for_body_ops(); /// \return the body of the iteration std::shared_ptr get_function() override; @@ -347,81 +57,9 @@ namespace ngraph } private: - // Find an input corresponding to value, adding one if necessary. - Input input_for_value(const Output& value); - - std::shared_ptr m_body; - std::vector> m_input_descriptions; - std::vector> m_output_descriptions; - int64_t m_num_iterations = -1; }; } using v0::TensorIterator; } - template class NGRAPH_API FactoryRegistry; - - template <> - class NGRAPH_API AttributeAdapter> - : public FactoryAttributeAdapter - { - public: - using FactoryAttributeAdapter::FactoryAttributeAdapter; - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter>" - ">>", - 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - }; - - template <> - class NGRAPH_API - AttributeAdapter>> - : public VisitorAdapter - { - public: - AttributeAdapter(std::vector>& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter>" - ">>", - 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - protected: - std::vector>& m_ref; - }; - - template class NGRAPH_API FactoryRegistry; - - template <> - class NGRAPH_API AttributeAdapter> - : public FactoryAttributeAdapter - { - public: - using FactoryAttributeAdapter::FactoryAttributeAdapter; - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter>" - ">>", - 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - }; - - template <> - class NGRAPH_API - AttributeAdapter>> - : public VisitorAdapter - { - public: - AttributeAdapter(std::vector>& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter>" - ">>", - 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - protected: - std::vector>& m_ref; - }; } diff --git a/ngraph/core/include/ngraph/op/util/sub_graph_base.hpp b/ngraph/core/include/ngraph/op/util/sub_graph_base.hpp index 58b97c38cae951..9f00922b44bf55 100644 --- a/ngraph/core/include/ngraph/op/util/sub_graph_base.hpp +++ b/ngraph/core/include/ngraph/op/util/sub_graph_base.hpp @@ -16,6 +16,8 @@ #pragma once +#include +#include "ngraph/factory_adapter.hpp" #include "ngraph/op/op.hpp" namespace ngraph @@ -29,13 +31,383 @@ namespace ngraph class NGRAPH_API SubGraphOp : public Op { public: - virtual std::shared_ptr get_function(); + /// \brief Describes a connection between a SubGraphOp input and the body. + class InputDescription + { + protected: + /// + /// \brief Constructs a new instance. + /// + /// \param input_index Position of the SubGraphOp input + /// \param body_parameter_index Body parameter to receive input + /// + InputDescription(uint64_t input_index, uint64_t body_parameter_index); + InputDescription() = default; + + public: + using type_info_t = DiscreteTypeInfo; + virtual ~InputDescription() = default; + virtual std::shared_ptr copy() const = 0; + + virtual const type_info_t& get_type_info() const = 0; + virtual bool visit_attributes(AttributeVisitor& visitor); + + uint64_t m_input_index{0}; + uint64_t m_body_parameter_index{0}; + }; + + /// + /// \brief Describes a body input formed from slices of an input to + /// SubGraphOp. + /// + class NGRAPH_API SliceInputDescription : public InputDescription + { + public: + static constexpr type_info_t type_info{"SliceInputDescription", 0}; + const type_info_t& get_type_info() const override { return type_info; } + /// + /// \brief Constructs a new instance. + /// + /// \param input_index Position of the SubGraphOp input + /// \param body_parameter_index Body parameter position to receive input + /// \param start First index for slices + /// \param stride Step amount for slices + /// \param part_size Width of slices + /// \param end Last index for slices + /// \param axis Axis being sliced + /// + SliceInputDescription(uint64_t input_index, + uint64_t body_parameter_index, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis); + SliceInputDescription() = default; + std::shared_ptr copy() const override; + bool visit_attributes(AttributeVisitor& visitor) override; + int64_t m_start{0}; + int64_t m_stride{0}; + int64_t m_part_size{0}; + int64_t m_end{0}; + int64_t m_axis{0}; + }; + + /// + /// \brief Describes a body input initialized from a SubGraphOp input on + /// the first iteration, and then a body output thereafter. + /// + class NGRAPH_API MergedInputDescription : public InputDescription + { + public: + static constexpr type_info_t type_info{"MergedInputDescription", 0}; + const type_info_t& get_type_info() const override { return type_info; } + /// + /// \brief Constructs a new instance. + /// + /// \param input_index Position of the SubGraphOp input + /// supplying a value to body_parameter for + /// the initial iteration. + /// \param body_parameter_index Body parameter position to receive input. + /// \param body_value_index Body value to supply body_parameter for + /// successive + /// iterations. + /// + MergedInputDescription(uint64_t input_index, + uint64_t body_parameter_index, + uint64_t body_value_index); + MergedInputDescription() = default; + std::shared_ptr copy() const override; + bool visit_attributes(AttributeVisitor& visitor) override; + uint64_t m_body_value_index{0}; + }; + + /// + /// \brief Describes a body input initialized from a SubGraphOp input on + /// the first iteration, and invariant thereafter. + /// + class NGRAPH_API InvariantInputDescription : public InputDescription + { + public: + static constexpr type_info_t type_info{"InvariantInputDescription", 0}; + const type_info_t& get_type_info() const override { return type_info; } + /// + /// \brief Constructs a new instance. + /// + /// \param input_index Position of the SubGraphOp input + /// \param body_parameter_index Body parameter to receive input + /// + InvariantInputDescription(uint64_t input_index, uint64_t body_parameter_index); + InvariantInputDescription() = default; + std::shared_ptr copy() const override; + bool visit_attributes(AttributeVisitor& visitor) override; + }; + + /// \brief Describes how a SubGraphOp output is produced from the body. + class OutputDescription + { + protected: + /// + /// \brief Constructs a new instance. + /// + /// \param body_value_index A body value that produces the output + /// \param output_index The SubGraphOp output index + /// + OutputDescription(uint64_t body_value_index, uint64_t output_index); + OutputDescription() = default; + + public: + using type_info_t = DiscreteTypeInfo; + virtual ~OutputDescription() = default; + virtual std::shared_ptr copy() const = 0; + virtual bool visit_attributes(AttributeVisitor& visitor); + virtual const type_info_t& get_type_info() const = 0; + + uint64_t m_body_value_index{0}; + uint64_t m_output_index{0}; + }; + + /// \brief Produces an output by concatenating an output from each iteration + class NGRAPH_API ConcatOutputDescription : public OutputDescription + { + public: + static constexpr type_info_t type_info{"ConcatOutputDescription", 0}; + const type_info_t& get_type_info() const override { return type_info; } + /// + /// \brief Constructs a new instance. + /// + /// \param body_value_index A body value that produces the output + /// \param output_index The SubGraphOp output index + /// \param start First index for slices + /// \param stride Step amount for slices + /// \param part_size Width of slices + /// \param end Last index for slices + /// \param axis Axis being sliced + /// + ConcatOutputDescription(uint64_t body_value_index, + uint64_t output_index, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis); + ConcatOutputDescription() = default; + + std::shared_ptr copy() const override; + bool visit_attributes(AttributeVisitor& visitor) override; + int64_t m_start{0}; + int64_t m_stride{0}; + int64_t m_part_size{0}; + int64_t m_end{0}; + int64_t m_axis{0}; + }; + + /// \brief Produces an output from a specific iteration + class NGRAPH_API BodyOutputDescription : public OutputDescription + { + public: + static constexpr type_info_t type_info{"BodyOutputDescription", 0}; + const type_info_t& get_type_info() const override { return type_info; } + /// + /// \brief Constructs a new instance. + /// + /// \param body_value_index A body value that produces the output + /// \param output_index The SubGraphOp output index + /// \param iteration which iteration (typically -1, final) will + /// supply the value + /// + BodyOutputDescription(uint64_t body_value_index, + uint64_t output_index, + int64_t iteration); + BodyOutputDescription() = default; + std::shared_ptr copy() const override; + bool visit_attributes(AttributeVisitor& visitor) override; + int64_t m_iteration{0}; + }; + + virtual std::shared_ptr get_function() { return m_body; }; + virtual void set_function(const std::shared_ptr& func) { m_body = func; }; + /// \return a reference to the input descriptions. + const std::vector>& get_input_descriptions() const + { + return m_input_descriptions; + } + /// \return a reference to the input descriptions. Can add input descriptions + /// before + /// validation. + std::vector>& get_input_descriptions() + { + return m_input_descriptions; + } + /// \return a reference to the output descriptions. + const std::vector>& + get_output_descriptions() const + { + return m_output_descriptions; + } + /// \return a reference to the output descriptions. Can add output descriptions + /// before + /// validation. + std::vector>& get_output_descriptions() + { + return m_output_descriptions; + } + + /// + /// \brief Indicate that a body parameter comes from slices of a value + /// + /// \param parameter The parameter to receive the slices + /// \param value The value to be sliced. This will be added as an input to + /// SubGraphOp. + /// \param start First index on axis of the slicing + /// \param stride Stepping of the slice + /// \param part_size Size of the slice on axis + /// \param end The last index on axis of the slicing + /// \param axis The axis to slice along + /// + virtual void set_sliced_input(const std::shared_ptr& parameter, + const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis); + /// + /// \brief Indicates that a body parameter has an initial value in the first + /// iteration and computed value thereafter + /// + /// \param[in] body_parameter The body parameter + /// \param initial_value Value for the parameter in first iteration. This + /// will be added as an input to Loop. + /// \param successive_value Value for the parameter in successive iterations. + /// The value is what is active in the most recent + /// completed iteration. + /// + virtual void set_merged_input(const std::shared_ptr& body_parameter, + const Output& initial_value, + const Output& successive_value); + /// + /// \brief Indicates that a body parameter has an invariant value during + /// iteration that may depend on values computed outside of the + /// iteration. + /// + /// \param body_parameter The body parameter + /// \param value The value supplied as an input to the block + /// + virtual void set_invariant_input(const std::shared_ptr& body_parameter, + const Output& value); + /// + /// \brief Gets a value for a particular iteration point + /// + /// \param body_value The value + /// \param iteration The iteration that supplies the value. Negative values + /// are from the last iteration. + /// Default value -1 (the last iteration). + /// + /// \return The iterator value. + /// + virtual Output get_iter_value(const Output& body_value, + int64_t iteration = -1); + /// + /// \brief Concatenates slices from all iterations + /// + /// \param value The value supplying slice values from each iteration. + /// \param start First index on axis of the slicing + /// \param stride Stepping of the slice + /// \param part_size Size of the slice on axis + /// \param end The last index on axis of the slicing + /// \param axis The axis to slice along + /// + /// \return The concatenated slices. + /// + virtual Output get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis); protected: + // Find an input corresponding to value, adding one if necessary. + Input input_for_value(const Output& value); + SubGraphOp() = default; - SubGraphOp(const OutputVector& args); + explicit SubGraphOp(const OutputVector& args); + + std::shared_ptr m_body; + std::vector> + m_input_descriptions; + std::vector> + m_output_descriptions; }; } } + template class NGRAPH_API FactoryRegistry; + + template <> + class NGRAPH_API AttributeAdapter> + : public FactoryAttributeAdapter + { + public: + using FactoryAttributeAdapter::FactoryAttributeAdapter; + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter>" + ">>", + 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + }; + + template <> + class NGRAPH_API + AttributeAdapter>> + : public VisitorAdapter + { + public: + explicit AttributeAdapter( + std::vector>& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter>" + ">>", + 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + protected: + std::vector>& m_ref; + }; + + template class NGRAPH_API FactoryRegistry; + + template <> + class NGRAPH_API AttributeAdapter> + : public FactoryAttributeAdapter + { + public: + using FactoryAttributeAdapter::FactoryAttributeAdapter; + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter>" + ">>", + 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + }; + + template <> + class NGRAPH_API + AttributeAdapter>> + : public VisitorAdapter + { + public: + explicit AttributeAdapter( + std::vector>& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter>" + ">>", + 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + protected: + std::vector>& m_ref; + }; } diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index c2293b54b3bc54..761e3268ed294d 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -83,6 +83,7 @@ #include "ngraph/op/less_eq.hpp" #include "ngraph/op/log.hpp" #include "ngraph/op/log_softmax.hpp" +#include "ngraph/op/loop.hpp" #include "ngraph/op/lrn.hpp" #include "ngraph/op/lstm_cell.hpp" #include "ngraph/op/lstm_sequence.hpp" diff --git a/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp index c665d94d0e2e21..2fbc5f6c825818 100644 --- a/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp @@ -167,6 +167,7 @@ NGRAPH_OP(GatherND, ngraph::op::v5) NGRAPH_OP(GRUSequence, ngraph::op::v5) NGRAPH_OP(HSigmoid, ngraph::op::v5) NGRAPH_OP(LogSoftmax, ngraph::op::v5) +NGRAPH_OP(Loop, ngraph::op::v5) NGRAPH_OP(LSTMSequence, ngraph::op::v5) NGRAPH_OP(NonMaxSuppression, ngraph::op::v5) NGRAPH_OP(RNNSequence, ngraph::op::v5) diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index 0bb6e8b89ce28a..b6fe89c8aa9916 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -31,6 +31,7 @@ #include "ngraph/op/result.hpp" #include "ngraph/op/tensor_iterator.hpp" #include "ngraph/op/util/op_types.hpp" +#include "ngraph/opsets/opset5.hpp" #include "ngraph/pass/manager.hpp" #include "ngraph/pass/visualize_tree.hpp" #include "ngraph/provenance.hpp" @@ -311,7 +312,7 @@ std::vector> // There is a friendly name for this node so copy it cloned_node->set_friendly_name(node->get_friendly_name()); // TODO: workaround for shape inference, delete it after fix - if (ngraph::as_type_ptr(cloned_node)) + if (std::dynamic_pointer_cast(cloned_node)) { cloned_node->validate_and_infer_types(); } @@ -379,7 +380,7 @@ std::list> // There is a friendly name for this node so copy it cloned_node->set_friendly_name(node->get_friendly_name()); // TODO: workaround for shape inference, delete it after fix - if (ngraph::as_type_ptr(cloned_node)) + if (std::dynamic_pointer_cast(cloned_node)) { cloned_node->validate_and_infer_types(); } diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp new file mode 100644 index 00000000000000..3cf0881cf74bbe --- /dev/null +++ b/ngraph/core/src/op/loop.cpp @@ -0,0 +1,350 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "ngraph/op/loop.hpp" +#include "ngraph/factory.hpp" +#include "ngraph/graph_util.hpp" +#include "ngraph/opsets/opset5.hpp" +#include "ngraph/specialize_function.hpp" + +using namespace std; +using namespace ngraph; + +NGRAPH_RTTI_DEFINITION(op::v5::Loop, "Loop", 5); + +op::v5::Loop::Loop(const Output& trip_count, const Output& execution_condition) +{ + set_argument(0, trip_count); + set_argument(1, execution_condition); +} + +bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("body", m_body); + visitor.on_attribute("input_descriptions", m_input_descriptions); + visitor.on_attribute("output_descriptions", m_output_descriptions); + + return false; +} + +void op::v5::Loop::validate_and_infer_types() +{ + if (m_special_body_ports.current_iteration_input_idx >= 0) + { + const auto& cur_iter_rank = m_body->get_parameters() + .at(m_special_body_ports.current_iteration_input_idx) + ->get_partial_shape() + .rank(); + if (cur_iter_rank.is_static()) + { + NODE_VALIDATION_CHECK(this, + cur_iter_rank.compatible(1) || cur_iter_rank.compatible(0), + "Rank of CurrentIteration input must be equal to 0 or 1"); + } + } + bool zero_number_of_iter = false; + const auto& loop_execution_condition = input_value(1); + const auto& loop_condition_rank = loop_execution_condition.get_partial_shape().rank(); + if (loop_condition_rank.is_static()) + { + NODE_VALIDATION_CHECK(this, + loop_condition_rank.compatible(1) || + loop_condition_rank.compatible(0), + "Rank of ExecutionCondition input must be equal to 0 or 1"); + } + if (const auto& cond_value = std::dynamic_pointer_cast( + loop_execution_condition.get_node_shared_ptr())) + { + auto val = cond_value->cast_vector(); + NODE_VALIDATION_CHECK(this, + val.size() == 1, + "The number of values in the Condition constant is greater than 1"); + + if (!val[0]) + { + zero_number_of_iter = true; + } + } + + bool condition_always_true = false; + NODE_VALIDATION_CHECK(this, + m_special_body_ports.body_condition_output_idx >= 0, + "Condition body output is not provided. " + "Condition is a mandatory output of the body in Loop op."); + const auto& body_execution_condition = + m_body->get_results().at(m_special_body_ports.body_condition_output_idx)->input_value(0); + const auto& body_condition_rank = body_execution_condition.get_partial_shape().rank(); + if (body_condition_rank.is_static()) + { + NODE_VALIDATION_CHECK(this, + body_condition_rank.compatible(0) || + body_condition_rank.compatible(1), + "Rank of BodyExecutionCondition output must be equal to 0 or 1"); + } + if (const auto& cond_value = std::dynamic_pointer_cast( + body_execution_condition.get_node_shared_ptr())) + { + auto val = cond_value->cast_vector(); + NODE_VALIDATION_CHECK(this, + val.size() == 1, + "The number of values in the Condition constant is greater than 1"); + + if (val[0]) + { + condition_always_true = true; + } + else + { + m_num_iterations = 1; // condition_always_false, do_while mode + } + } + + const auto& trip_count = input_value(0); + const auto& trip_count_rank = trip_count.get_partial_shape().rank(); + if (trip_count_rank.is_static()) + { + NODE_VALIDATION_CHECK(this, + trip_count_rank.compatible(1) || trip_count_rank.compatible(0), + "Rank of TripCount input must be equal to 0 or 1"); + } + if (const auto& trip_count_val = std::dynamic_pointer_cast( + trip_count.get_node_shared_ptr())) + { + auto val = trip_count_val->cast_vector(); + NODE_VALIDATION_CHECK(this, + val.size() == 1, + "The number of values in the TripCount constant is greater than 1"); + if (condition_always_true) + m_num_iterations = val[0]; + } + + NODE_VALIDATION_CHECK(this, + get_input_size() == m_input_descriptions.size() + 2, + "Number of inputs must be the same as number of input descriptions"); + + NODE_VALIDATION_CHECK(this, + get_output_size() == m_output_descriptions.size(), + "Number of outputs must be the same as number of output descriptions"); + + std::vector> ends; + + // Input + uint64_t index_it = 2; + for (const auto& input_description : m_input_descriptions) + { + auto index = input_description->m_input_index; + NODE_VALIDATION_CHECK(this, index == index_it, "Input_index not in order"); + index_it++; + + if (auto merged_input_description = as_type_ptr(input_description)) + { + auto body_value = + m_body->get_results().at(merged_input_description->m_body_value_index); + ends.push_back(body_value); + + const auto& body_value_partial_shape = body_value->get_input_partial_shape(0); + auto body_parameter = + m_body->get_parameters().at(merged_input_description->m_body_parameter_index); + + auto body_param_partial_shape = body_parameter->get_partial_shape(); + auto input_partial_shape = input(index).get_partial_shape(); + NODE_VALIDATION_CHECK(this, + body_value_partial_shape.compatible(body_param_partial_shape), + "Iterator successive value is not compatible with body param"); + NODE_VALIDATION_CHECK(this, + input_partial_shape.compatible(body_param_partial_shape), + "Iterator initial value is not compatible with body param"); + + if (input_partial_shape.is_static()) + { + auto input_shape = input_partial_shape.to_shape(); + // infer type for body_parameter + if (body_param_partial_shape.is_dynamic()) + { + body_parameter->set_partial_shape(input_shape); + } + } + } + else if (auto invariant_input_description = + as_type_ptr(input_description)) + { + auto body_parameter = + m_body->get_parameters().at(invariant_input_description->m_body_parameter_index); + + auto body_param_partial_shape = body_parameter->get_partial_shape(); + auto input_partial_shape = input(index).get_partial_shape(); + NODE_VALIDATION_CHECK(this, + input_partial_shape.compatible(body_param_partial_shape), + "Iterator initial value is not compatible with body param"); + + if (input_partial_shape.is_static()) + { + auto input_shape = input_partial_shape.to_shape(); + // infer type for m_body_parameter + if (body_param_partial_shape.is_dynamic()) + { + body_parameter->set_partial_shape(input_shape); + } + } + } + } + + // Body + m_body->validate_nodes_and_infer_types(); + + // Output + index_it = 0; + for (const auto& output_description : m_output_descriptions) + { + auto index = output_description->m_output_index; + NODE_VALIDATION_CHECK(this, index == index_it, "Output_index not in order"); + index_it++; + + auto body_value = + m_body->get_results().at(output_description->m_body_value_index)->input_value(0); + + if (auto concat_output_description = + as_type_ptr(output_description)) + { + const auto& body_value_partial_shape = body_value.get_partial_shape(); + set_output_type(index, body_value.get_element_type(), PartialShape::dynamic()); + if (body_value_partial_shape.is_static()) + { + auto body_value_shape = body_value_partial_shape.to_shape(); + auto axis = concat_output_description->m_axis; + + Shape out_shape{body_value_shape}; + + if (body_value_shape.empty()) + { + NODE_VALIDATION_CHECK( + this, + axis == 0, + "Axis must be equal to 0 if concatenated output tensor slices are scalars. " + "Loop output index: ", + index); + out_shape = Shape(1); + } + + if (m_num_iterations != -1) + { + out_shape[axis] = m_num_iterations * body_value_shape[axis]; + if (zero_number_of_iter) + { + out_shape.at(0) = 0; + } + set_output_type(index, body_value.get_element_type(), out_shape); + } + } + } + else if (auto body_output_description = + as_type_ptr(output_description)) + { + const PartialShape& ps = body_value.get_partial_shape(); + if (ps.is_dynamic()) + { + set_output_type(index, body_value.get_element_type(), ps); + } + else + { + auto shape = ps.get_shape(); + if (zero_number_of_iter) + { + shape.at(0) = 0; + } + set_output_type(index, body_value.get_element_type(), shape); + } + } + } +} + +std::shared_ptr op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const +{ + // 0 - trip_count, 1 - execution condition, these inputs are not connected to the body params + OutputVector body_params_args(new_args.begin() + 2, new_args.end()); + auto op = make_shared(new_args[0], new_args[1]); + for (int idx = 2; idx < new_args.size(); ++idx) + { + op->set_argument(idx, new_args[idx]); + } + NGRAPH_CHECK(op.get(), + op != nullptr, + "Cannot clone ", + description(), + " operation with name ", + get_friendly_name()); + op->set_output_size(m_output_descriptions.size()); + + std::vector<::ngraph::element::Type> types(m_body->get_parameters().size()); + std::vector<::ngraph::PartialShape> new_shapes(m_body->get_parameters().size()); + + for (size_t input_index = 0; input_index < new_args.size(); ++input_index) + { + for (auto& input_description : m_input_descriptions) + { + if (input_description->m_input_index == input_index) + { + types[input_description->m_body_parameter_index] = + new_args[input_index].get_element_type(); + new_shapes[input_description->m_body_parameter_index] = + new_args[input_index].get_partial_shape(); + } + } + } + + if (m_special_body_ports.current_iteration_input_idx >= 0) + { + const auto& cur_iterations_param = + m_body->get_parameters().at(m_special_body_ports.current_iteration_input_idx); + body_params_args.insert(body_params_args.begin() + + m_special_body_ports.current_iteration_input_idx, + cur_iterations_param); + new_shapes.at(m_special_body_ports.current_iteration_input_idx) = + cur_iterations_param->get_partial_shape(); + types.at(m_special_body_ports.current_iteration_input_idx) = + cur_iterations_param->get_element_type(); + } + op->m_num_iterations = m_num_iterations; + op->m_special_body_ports = m_special_body_ports; + auto func = std::make_shared(m_body->get_results(), m_body->get_parameters()); + auto spec_func = specialize_function( + func, types, new_shapes, std::vector(body_params_args.size(), nullptr)); + op->m_body = std::make_shared(spec_func->get_results(), spec_func->get_parameters()); + + for (auto& input_description : m_input_descriptions) + { + op->m_input_descriptions.push_back(input_description->copy()); + } + for (auto& output_description : m_output_descriptions) + { + op->m_output_descriptions.push_back(output_description->copy()); + } + return move(op); +} + +Output op::v5::Loop::get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) +{ + NGRAPH_CHECK(start == 0 && stride == 1 && part_size == 1 && end == -1, + "Invalid start, stride, part_size, or end attribute values in Loop op. " + "Supported values for start {0}, for stride and part_size {1}, for end " + "{-1}"); + return SubGraphOp::get_concatenated_slices(value, start, stride, part_size, end, axis); +} diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 18345da0f2b60c..cefe036289530d 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -22,285 +22,13 @@ using namespace std; using namespace ngraph; -NGRAPH_SUPPRESS_DEPRECATED_START - constexpr NodeTypeInfo op::v0::TensorIterator::type_info; -constexpr DiscreteTypeInfo op::v0::TensorIterator::SliceInputDescription::type_info; -constexpr DiscreteTypeInfo op::v0::TensorIterator::MergedInputDescription::type_info; -constexpr DiscreteTypeInfo op::v0::TensorIterator::InvariantInputDescription::type_info; - -constexpr DiscreteTypeInfo op::v0::TensorIterator::BodyOutputDescription::type_info; -constexpr DiscreteTypeInfo op::v0::TensorIterator::ConcatOutputDescription::type_info; - op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) { } -op::v0::TensorIterator::InputDescription::InputDescription(uint64_t input_index, - uint64_t body_parameter_index) - : m_input_index(input_index) - , m_body_parameter_index(body_parameter_index) -{ -} - -bool op::v0::TensorIterator::InputDescription::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("input_index", m_input_index); - visitor.on_attribute("body_parameter_index", m_body_parameter_index); - return true; -} - -op::v0::TensorIterator::SliceInputDescription::SliceInputDescription(uint64_t input_index, - uint64_t body_parameter_index, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) - : InputDescription(input_index, body_parameter_index) - , m_start(start) - , m_stride(stride) - , m_part_size(part_size) - , m_end(end) - , m_axis(axis) -{ -} - -shared_ptr - op::v0::TensorIterator::SliceInputDescription::copy() const -{ - return make_shared( - m_input_index, m_body_parameter_index, m_start, m_stride, m_part_size, m_end, m_axis); -} - -bool op::v0::TensorIterator::SliceInputDescription::visit_attributes(AttributeVisitor& visitor) -{ - InputDescription::visit_attributes(visitor); - visitor.on_attribute("start", m_start); - visitor.on_attribute("stride", m_stride); - visitor.on_attribute("part_size", m_part_size); - visitor.on_attribute("end", m_end); - visitor.on_attribute("axis", m_axis); - return true; -} - -op::v0::TensorIterator::MergedInputDescription::MergedInputDescription( - uint64_t input_index, uint64_t body_parameter_index, uint64_t body_value_index) - : InputDescription(input_index, body_parameter_index) - , m_body_value_index(body_value_index) -{ -} - -shared_ptr - op::v0::TensorIterator::MergedInputDescription::copy() const -{ - return make_shared( - m_input_index, m_body_parameter_index, m_body_value_index); -} - -bool op::v0::TensorIterator::MergedInputDescription::visit_attributes(AttributeVisitor& visitor) -{ - InputDescription::visit_attributes(visitor); - visitor.on_attribute("body_value_index", m_body_value_index); - return true; -} - -op::v0::TensorIterator::InvariantInputDescription::InvariantInputDescription( - uint64_t input_index, uint64_t body_parameter_index) - : InputDescription(input_index, body_parameter_index) -{ -} - -shared_ptr - op::v0::TensorIterator::InvariantInputDescription::copy() const -{ - return make_shared(m_input_index, m_body_parameter_index); -} - -bool op::v0::TensorIterator::InvariantInputDescription::visit_attributes(AttributeVisitor& visitor) -{ - InputDescription::visit_attributes(visitor); - return true; -} - -op::v0::TensorIterator::OutputDescription::OutputDescription(uint64_t body_value_index, - uint64_t output_index) - : m_body_value_index(body_value_index) - , m_output_index(output_index) -{ -} - -bool op::v0::TensorIterator::OutputDescription::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("body_value_index", m_body_value_index); - visitor.on_attribute("output_index", m_output_index); - return true; -} - -op::v0::TensorIterator::ConcatOutputDescription::ConcatOutputDescription(uint64_t body_value_index, - uint64_t output_index, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) - : OutputDescription(body_value_index, output_index) - , m_start(start) - , m_stride(stride) - , m_part_size(part_size) - , m_end(end) - , m_axis(axis) -{ -} - -bool op::v0::TensorIterator::ConcatOutputDescription::visit_attributes(AttributeVisitor& visitor) -{ - OutputDescription::visit_attributes(visitor); - visitor.on_attribute("start", m_start); - visitor.on_attribute("stride", m_stride); - visitor.on_attribute("part_size", m_part_size); - visitor.on_attribute("end", m_end); - visitor.on_attribute("axis", m_axis); - return true; -} - -shared_ptr - op::v0::TensorIterator::ConcatOutputDescription::copy() const -{ - return make_shared( - m_body_value_index, m_output_index, m_start, m_stride, m_part_size, m_end, m_axis); -} - -op::v0::TensorIterator::BodyOutputDescription::BodyOutputDescription(uint64_t body_value_index, - uint64_t output_index, - int64_t iteration) - : OutputDescription(body_value_index, output_index) - , m_iteration(iteration) -{ -} - -shared_ptr - op::v0::TensorIterator::BodyOutputDescription::copy() const -{ - return make_shared(m_body_value_index, m_output_index, m_iteration); -} - -bool op::v0::TensorIterator::BodyOutputDescription::visit_attributes(AttributeVisitor& visitor) -{ - OutputDescription::visit_attributes(visitor); - visitor.on_attribute("iteration", m_iteration); - return true; -} - -namespace -{ -} - -namespace ngraph -{ - template <> - FactoryRegistry& - FactoryRegistry::get() - { - static FactoryRegistry registry; - static mutex init_guard; - if (registry.m_factory_map.size() == 0) - { - lock_guard guard(init_guard); - if (registry.m_factory_map.size() == 0) - { - registry.register_factory(); - registry.register_factory(); - registry.register_factory(); - } - } - return registry; - } - - constexpr DiscreteTypeInfo - AttributeAdapter>::type_info; - - constexpr DiscreteTypeInfo AttributeAdapter< - std::vector>>::type_info; - - AttributeAdapter>>:: - AttributeAdapter(std::vector>& ref) - : m_ref(ref) - { - } - - bool AttributeAdapter>>:: - visit_attributes(AttributeVisitor& visitor) - { - int64_t size = m_ref.size(); - visitor.on_attribute("size", size); - if (size != m_ref.size()) - { - m_ref.resize(size); - } - ostringstream index; - for (int64_t i = 0; i < size; i++) - { - index.str(""); - index << i; - visitor.on_attribute(index.str(), m_ref[i]); - } - return true; - } - - template <> - FactoryRegistry& - FactoryRegistry::get() - { - static FactoryRegistry registry; - static mutex init_guard; - // TODO: Add a lock - if (registry.m_factory_map.size() == 0) - { - lock_guard guard(init_guard); - if (registry.m_factory_map.size() == 0) - { - registry.register_factory(); - registry.register_factory(); - } - } - return registry; - } - - constexpr DiscreteTypeInfo AttributeAdapter< - std::vector>>::type_info; - - constexpr DiscreteTypeInfo - AttributeAdapter>::type_info; - - AttributeAdapter>>:: - AttributeAdapter(std::vector>& ref) - : m_ref(ref) - { - } - - bool AttributeAdapter>>:: - visit_attributes(AttributeVisitor& visitor) - { - int64_t size = m_ref.size(); - visitor.on_attribute("size", size); - if (size != m_ref.size()) - { - m_ref.resize(size); - } - ostringstream index; - for (int64_t i = 0; i < size; i++) - { - index.str(""); - index << i; - visitor.on_attribute(index.str(), m_ref[i]); - } - return true; - } -} - bool op::v0::TensorIterator::visit_attributes(AttributeVisitor& visitor) { visitor.on_attribute("body", m_body); @@ -310,72 +38,6 @@ bool op::v0::TensorIterator::visit_attributes(AttributeVisitor& visitor) return false; } -Input op::v0::TensorIterator::input_for_value(const Output& value) -{ - auto input_index = get_input_size(); - set_argument(input_index, value); - return Input(this, input_index); -} - -void op::v0::TensorIterator::set_sliced_input(const std::shared_ptr& body_parameter, - const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) -{ - m_input_descriptions.push_back( - make_shared(input_for_value(value).get_index(), - m_body->get_parameter_index(body_parameter), - start, - stride, - part_size, - end, - axis)); -} - -void op::v0::TensorIterator::set_merged_input(const std::shared_ptr& body_parameter, - const Output& initial_value, - const Output& successive_value) -{ - m_input_descriptions.push_back( - make_shared(input_for_value(initial_value).get_index(), - m_body->get_parameter_index(body_parameter), - m_body->get_result_index(successive_value))); -} - -void op::v0::TensorIterator::set_invariant_input(const std::shared_ptr& body_parameter, - const Output& value) -{ - m_input_descriptions.push_back(make_shared( - input_for_value(value).get_index(), m_body->get_parameter_index(body_parameter))); -} - -Output op::v0::TensorIterator::get_iter_value(const Output& body_value, - int64_t iteration) -{ - auto output_index = get_output_size(); - m_output_descriptions.push_back(make_shared( - m_body->get_result_index(body_value), output_index, iteration)); - set_output_size(output_index + 1); - return Output(shared_from_this(), output_index); -} - -Output op::v0::TensorIterator::get_concatenated_slices(const Output& body_value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) -{ - auto output_index = get_output_size(); - m_output_descriptions.push_back(make_shared( - m_body->get_result_index(body_value), output_index, start, stride, part_size, end, axis)); - set_output_size(output_index + 1); - return Output(shared_from_this(), output_index); -} - void op::v0::TensorIterator::revalidate_and_infer_types_for_body_ops() { std::stack, std::vector>> nodes_to_do; @@ -669,7 +331,3 @@ std::shared_ptr } return move(op); } - -namespace ngraph -{ -} diff --git a/ngraph/core/src/op/util/sub_graph_base.cpp b/ngraph/core/src/op/util/sub_graph_base.cpp index 5a0b728f73b893..377f9c3ae60ad9 100644 --- a/ngraph/core/src/op/util/sub_graph_base.cpp +++ b/ngraph/core/src/op/util/sub_graph_base.cpp @@ -15,17 +15,345 @@ //***************************************************************************** #include "ngraph/op/util/sub_graph_base.hpp" +#include "ngraph/opsets/opset5.hpp" #include "ngraph/graph_util.hpp" using namespace ngraph; +constexpr DiscreteTypeInfo op::util::SubGraphOp::SliceInputDescription::type_info; +constexpr DiscreteTypeInfo op::util::SubGraphOp::MergedInputDescription::type_info; +constexpr DiscreteTypeInfo op::util::SubGraphOp::InvariantInputDescription::type_info; + +constexpr DiscreteTypeInfo op::util::SubGraphOp::BodyOutputDescription::type_info; +constexpr DiscreteTypeInfo op::util::SubGraphOp::ConcatOutputDescription::type_info; + +op::util::SubGraphOp::InputDescription::InputDescription(uint64_t input_index, + uint64_t body_parameter_index) + : m_input_index(input_index) + , m_body_parameter_index(body_parameter_index) +{ +} + +bool op::util::SubGraphOp::InputDescription::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("input_index", m_input_index); + visitor.on_attribute("body_parameter_index", m_body_parameter_index); + return true; +} + +op::util::SubGraphOp::SliceInputDescription::SliceInputDescription(uint64_t input_index, + uint64_t body_parameter_index, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) + : InputDescription(input_index, body_parameter_index) + , m_start(start) + , m_stride(stride) + , m_part_size(part_size) + , m_end(end) + , m_axis(axis) +{ +} + +std::shared_ptr + op::util::SubGraphOp::SliceInputDescription::copy() const +{ + return std::make_shared( + m_input_index, m_body_parameter_index, m_start, m_stride, m_part_size, m_end, m_axis); +} + +bool op::util::SubGraphOp::SliceInputDescription::visit_attributes(AttributeVisitor& visitor) +{ + InputDescription::visit_attributes(visitor); + visitor.on_attribute("start", m_start); + visitor.on_attribute("stride", m_stride); + visitor.on_attribute("part_size", m_part_size); + visitor.on_attribute("end", m_end); + visitor.on_attribute("axis", m_axis); + return true; +} + +op::util::SubGraphOp::MergedInputDescription::MergedInputDescription(uint64_t input_index, + uint64_t body_parameter_index, + uint64_t body_value_index) + : InputDescription(input_index, body_parameter_index) + , m_body_value_index(body_value_index) +{ +} + +std::shared_ptr + op::util::SubGraphOp::MergedInputDescription::copy() const +{ + return std::make_shared( + m_input_index, m_body_parameter_index, m_body_value_index); +} + +bool op::util::SubGraphOp::MergedInputDescription::visit_attributes(AttributeVisitor& visitor) +{ + InputDescription::visit_attributes(visitor); + visitor.on_attribute("body_value_index", m_body_value_index); + return true; +} + +op::util::SubGraphOp::InvariantInputDescription::InvariantInputDescription( + uint64_t input_index, uint64_t body_parameter_index) + : InputDescription(input_index, body_parameter_index) +{ +} + +std::shared_ptr + op::util::SubGraphOp::InvariantInputDescription::copy() const +{ + return std::make_shared(m_input_index, m_body_parameter_index); +} + +bool op::util::SubGraphOp::InvariantInputDescription::visit_attributes(AttributeVisitor& visitor) +{ + InputDescription::visit_attributes(visitor); + return true; +} + +op::util::SubGraphOp::OutputDescription::OutputDescription(uint64_t body_value_index, + uint64_t output_index) + : m_body_value_index(body_value_index) + , m_output_index(output_index) +{ +} + +bool op::util::SubGraphOp::OutputDescription::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("body_value_index", m_body_value_index); + visitor.on_attribute("output_index", m_output_index); + return true; +} + +op::util::SubGraphOp::ConcatOutputDescription::ConcatOutputDescription(uint64_t body_value_index, + uint64_t output_index, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) + : OutputDescription(body_value_index, output_index) + , m_start(start) + , m_stride(stride) + , m_part_size(part_size) + , m_end(end) + , m_axis(axis) +{ +} + +bool op::util::SubGraphOp::ConcatOutputDescription::visit_attributes(AttributeVisitor& visitor) +{ + OutputDescription::visit_attributes(visitor); + visitor.on_attribute("start", m_start); + visitor.on_attribute("stride", m_stride); + visitor.on_attribute("part_size", m_part_size); + visitor.on_attribute("end", m_end); + visitor.on_attribute("axis", m_axis); + return true; +} + +std::shared_ptr + op::util::SubGraphOp::ConcatOutputDescription::copy() const +{ + return std::make_shared( + m_body_value_index, m_output_index, m_start, m_stride, m_part_size, m_end, m_axis); +} + +op::util::SubGraphOp::BodyOutputDescription::BodyOutputDescription(uint64_t body_value_index, + uint64_t output_index, + int64_t iteration) + : OutputDescription(body_value_index, output_index) + , m_iteration(iteration) +{ +} + +std::shared_ptr + op::util::SubGraphOp::BodyOutputDescription::copy() const +{ + return std::make_shared(m_body_value_index, m_output_index, m_iteration); +} + +bool op::util::SubGraphOp::BodyOutputDescription::visit_attributes(AttributeVisitor& visitor) +{ + OutputDescription::visit_attributes(visitor); + visitor.on_attribute("iteration", m_iteration); + return true; +} + op::util::SubGraphOp::SubGraphOp(const OutputVector& args) : Op(args) { } -std::shared_ptr op::util::SubGraphOp::get_function() +void op::util::SubGraphOp::set_merged_input(const std::shared_ptr& body_parameter, + const Output& initial_value, + const Output& successive_value) +{ + m_input_descriptions.push_back(std::make_shared( + input_for_value(initial_value).get_index(), + m_body->get_parameter_index(body_parameter), + m_body->get_result_index(successive_value))); +} + +void op::util::SubGraphOp::set_invariant_input(const std::shared_ptr& body_parameter, + const Output& value) +{ + m_input_descriptions.push_back(std::make_shared( + input_for_value(value).get_index(), m_body->get_parameter_index(body_parameter))); +} + +Output op::util::SubGraphOp::get_iter_value(const Output& body_value, int64_t iteration) +{ + auto output_index = get_output_size(); + m_output_descriptions.push_back(std::make_shared( + m_body->get_result_index(body_value), output_index, iteration)); + set_output_size(output_index + 1); + return Output(shared_from_this(), output_index); +} + +Output op::util::SubGraphOp::get_concatenated_slices(const Output& body_value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) { - return nullptr; + auto output_index = get_output_size(); + m_output_descriptions.push_back(std::make_shared( + m_body->get_result_index(body_value), output_index, start, stride, part_size, end, axis)); + set_output_size(output_index + 1); + return Output(shared_from_this(), output_index); } + +void op::util::SubGraphOp::set_sliced_input(const std::shared_ptr& parameter, + const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) +{ + m_input_descriptions.push_back( + std::make_shared(input_for_value(value).get_index(), + m_body->get_parameter_index(parameter), + start, + stride, + part_size, + end, + axis)); +} + +Input op::util::SubGraphOp::input_for_value(const Output& value) +{ + auto input_index = get_input_size(); + set_argument(input_index, value); + return Input(this, input_index); +} + +namespace ngraph +{ + template <> + FactoryRegistry& + FactoryRegistry::get() + { + static FactoryRegistry registry; + static std::mutex init_guard; + if (registry.m_factory_map.size() == 0) + { + std::lock_guard guard(init_guard); + if (registry.m_factory_map.size() == 0) + { + registry.register_factory(); + registry.register_factory(); + registry.register_factory(); + } + } + return registry; + } + + constexpr DiscreteTypeInfo + AttributeAdapter>::type_info; + + constexpr DiscreteTypeInfo AttributeAdapter< + std::vector>>::type_info; + + AttributeAdapter>>:: + AttributeAdapter(std::vector>& ref) + : m_ref(ref) + { + } + + bool AttributeAdapter>>:: + visit_attributes(AttributeVisitor& visitor) + { + int64_t size = m_ref.size(); + visitor.on_attribute("size", size); + if (size != m_ref.size()) + { + m_ref.resize(size); + } + std::ostringstream index; + for (int64_t i = 0; i < size; i++) + { + index.str(""); + index << i; + visitor.on_attribute(index.str(), m_ref[i]); + } + return true; + } + + template <> + FactoryRegistry& + FactoryRegistry::get() + { + static FactoryRegistry registry; + static std::mutex init_guard; + // TODO: Add a lock + if (registry.m_factory_map.size() == 0) + { + std::lock_guard guard(init_guard); + if (registry.m_factory_map.size() == 0) + { + registry.register_factory(); + registry.register_factory(); + } + } + return registry; + } + + constexpr DiscreteTypeInfo AttributeAdapter< + std::vector>>::type_info; + + constexpr DiscreteTypeInfo + AttributeAdapter>::type_info; + + AttributeAdapter>>:: + AttributeAdapter(std::vector>& ref) + : m_ref(ref) + { + } + + bool AttributeAdapter>>:: + visit_attributes(AttributeVisitor& visitor) + { + int64_t size = m_ref.size(); + visitor.on_attribute("size", size); + if (size != m_ref.size()) + { + m_ref.resize(size); + } + std::ostringstream index; + for (int64_t i = 0; i < size; i++) + { + index.str(""); + index << i; + visitor.on_attribute(index.str(), m_ref[i]); + } + return true; + } +} \ No newline at end of file diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 6e3a9f5312c2c1..58dd0300758e6e 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -138,6 +138,7 @@ set(SRC type_prop/lrn.cpp type_prop/lstm_cell.cpp type_prop/lstm_sequence.cpp + type_prop/loop.cpp type_prop/matmul.cpp type_prop/max_pool.cpp type_prop/mish.cpp @@ -183,6 +184,7 @@ set(SRC type_prop/swish.cpp type_prop/reduce_prod.cpp type_prop/reduce_sum.cpp + type_prop/ti.cpp type_prop/tile.cpp type_prop/top_k.cpp type_prop/transpose.cpp diff --git a/ngraph/test/copy.cpp b/ngraph/test/copy.cpp index fb3dec4c13fda2..5719301a7e1386 100644 --- a/ngraph/test/copy.cpp +++ b/ngraph/test/copy.cpp @@ -20,6 +20,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset5.hpp" #include "util/ndarray.hpp" #include "util/test_tools.hpp" @@ -368,3 +369,70 @@ TEST(copy, tanh) { ASSERT_TRUE(check_unary()); } + +TEST(copy, loop) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = + std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); + auto exec_condition = + std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{Zo, body_condition}, + ParameterVector{Xi, current_iteration, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + loop->validate_and_infer_types(); + // That which we iterate over + auto X_new = make_shared(element::f32, Shape{3, 2, 5}); + auto Y_new = make_shared(element::f32, Shape{3, 2, 5}); + auto M_new = make_shared(element::f32, Shape{3, 2, 5}); + OutputVector new_args = {trip_count, exec_condition, X_new, Y_new, M_new}; + auto loop_copy = loop->clone_with_new_inputs(new_args); + + auto node_cast = std::dynamic_pointer_cast(loop_copy); + ASSERT_NE(node_cast, nullptr); + ASSERT_TRUE(nullptr != loop_copy); + EXPECT_EQ(loop->get_num_iterations(), node_cast->get_num_iterations()); + EXPECT_EQ(loop->get_special_body_ports().body_condition_output_idx, + node_cast->get_special_body_ports().body_condition_output_idx); + EXPECT_EQ(loop->get_special_body_ports().current_iteration_input_idx, + node_cast->get_special_body_ports().current_iteration_input_idx); + ASSERT_TRUE(new_args == loop_copy->input_values()); + + loop_copy->validate_and_infer_types(); + Shape out0_shape{}; + Shape out1_shape{3, 2, 5}; + Shape out2_shape{3, 20, 5}; + EXPECT_EQ(loop_copy->get_output_shape(0), out0_shape); + EXPECT_EQ(loop_copy->get_output_shape(1), out1_shape); + EXPECT_EQ(loop_copy->get_output_shape(2), out2_shape); +} \ No newline at end of file diff --git a/ngraph/test/type_prop/loop.cpp b/ngraph/test/type_prop/loop.cpp new file mode 100644 index 00000000000000..d455441f6ddd9d --- /dev/null +++ b/ngraph/test/type_prop/loop.cpp @@ -0,0 +1,753 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset5.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +// trip_count = 10 +// execution_condition = true +// body_condition = true +// all shapes are static, 10 iterations will be executed +TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, + ParameterVector{current_iteration, Xi, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{1}; + Shape out1_shape{32, 1, 10}; + Shape out2_shape{32, 10, 10}; + + auto results = ResultVector{result0, result1, result2}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_shape(2), out2_shape); +} + +// trip_count = 10 +// execution_condition = true +// body_condition = false +// will be executed only 1 iteration, all shapes are static +TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, false); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, + ParameterVector{current_iteration, Xi, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{1}; + Shape out1_shape{32, 1, 10}; + Shape out2_shape{32, 1, 10}; + + auto results = ResultVector{result0, result1, result2}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_shape(2), out2_shape); +} + +// trip_count = 10 +// execution_condition = true +// body_condition is not a Constant +// concat output is not provided, another outputs will be static +TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{1}); + auto Y = make_shared(element::f32, Shape{1}); + auto M = make_shared(element::f32, Shape{1}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = + std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, + ParameterVector{Xi, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + Shape out0_shape{1}; + Shape out1_shape{1}; + + auto results = ResultVector{result0, result1}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); +} + +// trip_count = 10 +// execution_condition = true +// body_condition is not a Constant +// concat output will be dynamic, another outputs are static +TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shapes) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{1}); + auto Y = make_shared(element::f32, Shape{1}); + auto M = make_shared(element::f32, Shape{1}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = + std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, + ParameterVector{current_iteration, Xi, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{1}; + Shape out1_shape{1}; + PartialShape out2_shape{PartialShape::dynamic()}; + + auto results = ResultVector{result0, result1}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_partial_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_partial_shape(2), out2_shape); +} + +// trip_count = -1 +// execution_condition = true +// body_condition = true +// concat output will be dynamic, another outputs are static +TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, -1); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, + ParameterVector{current_iteration, Xi, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{1}; + Shape out1_shape{32, 1, 10}; + PartialShape out2_shape{PartialShape::dynamic()}; + + auto results = ResultVector{result0, result1, result2}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_partial_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_partial_shape(2), out2_shape); +} + +// SpecialBodyPorts (1, 1) <- test specific +// trip_count = 10 +// execution_condition = true +// body_condition = true +// all shapes are static, 10 iterations will be executed +TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto exec_condition = std::make_shared( + ngraph::element::boolean, ngraph::Shape{1}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{Zo, body_condition}, + ParameterVector{Xi, current_iteration, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{1}; + Shape out1_shape{32, 1, 10}; + Shape out2_shape{32, 10, 10}; + + auto results = ResultVector{result0, result1, result2}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_shape(2), out2_shape); +} + +// Scalars instead of 1d tensors with 1 element <-- test specific +// trip_count = 10 +// execution_condition = true +// body_condition = true +// all shapes are static, 10 iterations will be executed +TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports_scalars) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = + std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + + auto trip_count = + std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); + auto exec_condition = + std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + // Body + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{Zo, body_condition}, + ParameterVector{Xi, current_iteration, Yi, M_body}); + + auto loop = make_shared(trip_count, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + + loop->set_invariant_input(Xi, X); + loop->set_invariant_input(Yi, Y); + loop->set_merged_input(M_body, M, Zo); + + // check input descriptors + for (auto& desc : loop->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + // check output descriptors + for (auto& desc : loop->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + Shape out0_shape{}; + Shape out1_shape{32, 1, 10}; + Shape out2_shape{32, 10, 10}; + + auto results = ResultVector{result0, result1, result2}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + EXPECT_EQ(result2->get_output_shape(0), out2_shape); + + EXPECT_EQ(loop->get_output_shape(0), out0_shape); + EXPECT_EQ(loop->get_output_shape(1), out1_shape); + EXPECT_EQ(loop->get_output_shape(2), out2_shape); +} diff --git a/ngraph/test/type_prop/ti.cpp b/ngraph/test/type_prop/ti.cpp new file mode 100644 index 00000000000000..a7d6938b95f5d4 --- /dev/null +++ b/ngraph/test/type_prop/ti.cpp @@ -0,0 +1,204 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset5.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, tensor_iterator_lstm) +{ + // That which we iterate over + const size_t N = 32; // Batch size + const size_t L = 10; // Sequence length + const size_t I = 8; // Input size + const size_t H = 32; // Hidden size + auto SENT = make_shared(element::f32, Shape{N, L, I}); + + auto H_init = make_shared(element::f32, Shape{N, 1, H}); + auto C_init = make_shared(element::f32, Shape{N, 1, H}); + + auto W = make_shared(element::f32, Shape{4 * H, I}); + auto R = make_shared(element::f32, Shape{4 * H, H}); + auto H_t = make_shared(element::f32, Shape{N, 1, H}); + auto C_t = make_shared(element::f32, Shape{N, 1, H}); + + // Body + auto X = make_shared(element::f32, Shape{N, 1, I}); + auto W_body = make_shared(element::f32, Shape{4 * H, I}); + auto R_body = make_shared(element::f32, Shape{4 * H, H}); + auto LSTM_cell = make_shared( + make_shared(X, AxisVector{0, 1, 2}, Shape{N, I}), + make_shared(H_t, AxisVector{0, 1, 2}, Shape{N, H}), + make_shared(C_t, AxisVector{0, 1, 2}, Shape{N, H}), + W_body, + R_body, + H); + auto H_o = make_shared(LSTM_cell->output(0), AxisVector{0, 1}, Shape{N, 1, H}); + auto C_o = make_shared(LSTM_cell->output(1), AxisVector{0, 1}, Shape{N, 1, H}); + auto body = make_shared(OutputVector{H_o, C_o}, + ParameterVector{X, H_t, C_t, W_body, R_body}); + + auto tensor_iterator = make_shared(); + tensor_iterator->set_body(body); + // start=0, stride=1, part_size=1, end=39, axis=1 + tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1); + // H_t is Hinit on the first iteration, Ho after that + tensor_iterator->set_merged_input(H_t, H_init, H_o); + tensor_iterator->set_merged_input(C_t, C_init, C_o); + tensor_iterator->set_invariant_input(W_body, W); + tensor_iterator->set_invariant_input(R_body, R); + + // Output 0 is last Ho, result 0 of body + auto out0 = tensor_iterator->get_iter_value(H_o, -1); + // Output 1 is last Co, result 1 of body + auto out1 = tensor_iterator->get_iter_value(C_o, -1); + + auto results = ResultVector{make_shared(out0), make_shared(out1)}; + auto f = make_shared(results, ParameterVector{SENT, H_init, C_init, W, R}); +} + +TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto M = make_shared(element::f32, Shape{32, 2, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto Xi = make_shared(element::f32, Shape{32, 2, 10}); + auto Yi = make_shared(element::f32, Shape{32, 2, 10}); + auto M_body = make_shared(element::f32, Shape{32, 2, 10}); + + // Body + auto Zo = (Xi + Yi) * M_body; + auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); + + auto tensor_iterator = make_shared(); + tensor_iterator->set_body(body); + // The Xi are the elements of Xseq + // start=0, stride=2, part_size=2, end=39, axis=1 + tensor_iterator->set_sliced_input(Xi, X, 0, 2, 2, 39, 1); + // The Yi are the elements of Yseq + // start=0, stride=2, part_size=2, end=-1, axis=1 + tensor_iterator->set_sliced_input(Yi, Y, 0, 2, 2, -1, 1); + tensor_iterator->set_invariant_input(M_body, M); + + // Output 0 is last Zo + auto out0 = tensor_iterator->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=2, part_size=2, end=39, axis=1 + auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 39, 1); + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + Shape out0_shape{32, 2, 10}; + Shape out1_shape{32, 40, 10}; + + auto results = ResultVector{result0, result1}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); +} + +TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) +{ + // That which we iterate over + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto M = make_shared(element::f32, Shape{32, 2, 10}); + + // Set up the cell body, a function from (Xi, Yi) -> (Zo) + // Body parameters + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + + // Body + auto Zo = (Xi + Yi) * M_body; + auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); + + auto tensor_iterator = make_shared(); + tensor_iterator->set_body(body); + // The Xi are the elements of Xseq + // start=0, stride=2, part_size=2, end=38, axis=1 + tensor_iterator->set_sliced_input(Xi, X, 0, 2, 2, 38, 1); + // The Yi are the elements of Yseq + // start=0, stride=2, part_size=2, end=-2, axis=1 + tensor_iterator->set_sliced_input(Yi, Y, 0, 2, 2, -2, 1); + tensor_iterator->set_invariant_input(M_body, M); + + // check input descriptors + for (auto& desc : tensor_iterator->get_input_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) + { + auto input_desc = + as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) + { + auto input_desc = as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) + { + auto input_desc = as_type_ptr(desc); + EXPECT_NE(input_desc, nullptr); + } + } + + // Output 0 is last Zo + auto out0 = tensor_iterator->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=2, part_size=2, end=38, axis=1 + auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 38, 1); + + // check output descriptors + for (auto& desc : tensor_iterator->get_output_descriptions()) + { + auto type_info = desc->get_type_info(); + if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) + { + auto output_desc = + as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) + { + auto output_desc = as_type_ptr(desc); + EXPECT_NE(output_desc, nullptr); + } + } + + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + Shape out0_shape{32, 2, 10}; + Shape out1_shape{32, 38, 10}; + + auto results = ResultVector{result0, result1}; + auto f = make_shared(results, ParameterVector{X, Y, M}); + EXPECT_EQ(result0->get_output_shape(0), out0_shape); + EXPECT_EQ(result1->get_output_shape(0), out1_shape); + + EXPECT_EQ(body->get_results()[0]->get_output_shape(0), out0_shape); +} From 5965010bec08da471abe1da7495e87a6d7b3f6da Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Mon, 19 Oct 2020 07:40:04 +0200 Subject: [PATCH 24/35] Revise LRN reference implementation (#2672) * fix typo in LRN docs * fix link to reference in LRN doc * LRN, LRN_IE types alignment with spec * align LRN ref implementation to plugins behavior * update LRN docs * Improve LRN reference implementation performance * restore LRN constructor with no axes in the input * apply code format * revert double->float size_t->int change * small fix to example in doc * revert double->float size_t->int in onnx_importer and backend tests * Changes to docs after review --- docs/ops/normalization/LRN_1.md | 31 +++- .../single_layer_tests/lrn.cpp | 4 +- .../single_layer_tests/lrn.cpp | 4 +- .../include/ngraph/runtime/reference/lrn.hpp | 118 ++++++------ ngraph/test/backend/lrn.in.cpp | 173 +++++++++--------- .../generate_lrn_across_axes.py | 16 +- ngraph/test/runtime/ie/unit_test.manifest | 2 - 7 files changed, 195 insertions(+), 153 deletions(-) diff --git a/docs/ops/normalization/LRN_1.md b/docs/ops/normalization/LRN_1.md index 989b40bc521bc6..c03abdd4ec1715 100644 --- a/docs/ops/normalization/LRN_1.md +++ b/docs/ops/normalization/LRN_1.md @@ -26,7 +26,7 @@ * *bias* - * **Description**: *beta* represents the offset. Usually positive number to avoid dividing by zero. + * **Description**: *bias* represents the offset. Usually positive number to avoid dividing by zero. * **Range of values**: no restrictions * **Type**: float * **Default value**: None @@ -50,13 +50,26 @@ * **1**: Output tensor of the same shape and type as the `data` input tensor. -**Detailed description**: [Reference](http://yeephycho.github.io/2016/08/03/Normalizations-in-neural-networks/#Local-Response-Normalization-LRN) - -Here is an example for 4D `data` input tensor and `axes` = `[1]`: - - sqr_sum[a, b, c, d] = - sum(input[a, b - local_size : b + local_size + 1, c, d] ** 2) - output = input / (bias + alpha * sqr_sum) ** beta +**Detailed description**: +Local Response Normalization performs a normalization over local input regions. +Each input value is divided by +\f[ (bias + \frac{alpha}{{size}^{len(axes)}} \cdot \sum_{i} data_{i})^{beta} \f] +The sum is taken over a region of a side length `size` and number of dimensions equal to number of axes. +The region is centered at the input value that's being normalized (with zero padding added if needed). + +Here is an example for 4D `data` input tensor and `axes = [1]`: +``` +sqr_sum[a, b, c, d] = + sum(data[a, max(0, b - size / 2) : min(data.shape[1], b + size / 2 + 1), c, d] ** 2) +output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta +``` + +Example for 4D `data` input tensor and `axes = [2, 3]`: +``` +sqr_sum[a, b, c, d] = + sum(data[a, b, max(0, c - size / 2) : min(data.shape[2], c + size / 2 + 1), max(0, d - size / 2) : min(data.shape[3], d + size / 2 + 1)] ** 2) +output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta +``` **Example** @@ -83,4 +96,4 @@ Here is an example for 4D `data` input tensor and `axes` = `[1]`: -``` \ No newline at end of file +``` diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp index 51edd93c978dbc..67a5b1939d5221 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp @@ -17,6 +17,8 @@ namespace { const std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; +const std::vector> axes = {{1}, {2, 3}}; + const double alpha = 9.9e-05; const double beta = 2; const double bias = 1.0; @@ -27,7 +29,7 @@ INSTANTIATE_TEST_CASE_P(smoke_LrnCheck, LrnLayerTest, ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::Values(std::vector({1})), + ::testing::ValuesIn(axes), ::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp index e7d0828931ec1c..572e897a6f96c9 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp @@ -15,6 +15,8 @@ namespace { const std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; +const std::vector> axes = {{1}, {2, 3}}; + const double alpha = 9.9e-05; const double beta = 2; const double bias = 1.0; @@ -25,7 +27,7 @@ INSTANTIATE_TEST_CASE_P(smoke_LrnCheck, LrnLayerTest, ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::Values(std::vector({1})), + ::testing::ValuesIn(axes), ::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp index 2494c6a30235d0..8232a5d78e9221 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp @@ -29,38 +29,51 @@ namespace ngraph { namespace reference { - template - void sum_region_across_axes(const T* arg, - size_t current_axis_index, - const std::vector& axes, - Coordinate& sum_coord, - T& square_sum, - const std::vector& begin_area, - const std::vector& end_area, - const CoordinateTransform& input_transform) + static size_t point_to_flat_idx(const Shape& shape, const std::vector& point) { - // all nested axes were visited - if (current_axis_index == axes.size()) + size_t idx = point[0]; + for (int i = 1; i < point.size(); i++) { - square_sum += arg[input_transform.index(sum_coord)] * - arg[input_transform.index(sum_coord)]; - return; + idx *= shape[i]; + idx += point[i]; } - auto current_axis = axes[current_axis_index]; - for (auto current_axis_coord = begin_area[current_axis]; - current_axis_coord < end_area[current_axis]; - ++current_axis_coord) + return idx; + } + + static std::vector slice_indices(const Shape& full_shape, + const std::vector& begin, + const Shape& slice_shape) + { + size_t begin_idx = begin[0]; + size_t slice_size = shape_size(slice_shape); + size_t rank = begin.size(); + auto coord = begin; + std::vector indices; + indices.reserve(slice_size); + indices.push_back(point_to_flat_idx(full_shape, coord)); + for (int i = 0; i < slice_size - 1; i++) { - sum_coord.at(current_axis) = current_axis_coord; - sum_region_across_axes(arg, - current_axis_index + 1, - axes, - sum_coord, - square_sum, - begin_area, - end_area, - input_transform); + for (int r = rank - 1; r >= 0; r--) + { + coord[r]++; + if (coord[r] < (begin[r] + slice_shape[r])) + break; + coord[r] = begin[r]; + } + indices.push_back(point_to_flat_idx(full_shape, coord)); } + return indices; + } + + template + static T sum_region_across_axes(const T* arg, const std::vector& indices) + { + T square_sum = 0; + for (auto index : indices) + { + square_sum += arg[index] * arg[index]; + } + return square_sum; } template @@ -76,39 +89,42 @@ namespace ngraph T alpha = static_cast(dalpha); T beta = static_cast(dbeta); T bias = static_cast(dbias); + T scale = alpha / std::pow(size, axes.size()); std::vector begin_area(arg_shape.size()); - std::vector end_area(arg_shape.size()); + Shape area_shape(arg_shape.size(), 1); + std::vector axes_map(arg_shape.size(), false); + for (const auto& axis_coord : axes) + { + axes_map[axis_coord] = true; + } CoordinateTransform input_transform(arg_shape); for (const Coordinate& in_coord : input_transform) { // area determined by in_coord local neighborhood - for (const auto& axis_coord : axes) + for (size_t i = 0; i < axes_map.size(); i++) { - begin_area[axis_coord] = - std::max(0, in_coord.at(axis_coord) - (size - 1) / 2); - end_area[axis_coord] = std::min( - arg_shape.at(axis_coord), in_coord.at(axis_coord) + (size - 1) / 2 + 1); + if (axes_map[i]) + { + begin_area[i] = std::max(0, in_coord.at(i) - (size - 1) / 2); + area_shape[i] = std::min(arg_shape.at(i), + in_coord.at(i) + (size - 1) / 2 + 1) - + begin_area[i]; + } + else + { + begin_area[i] = in_coord.at(i); + } } - T square_sum = 0; - auto sum_coord = in_coord; - auto axes_vec = std::vector(axes.begin(), axes.end()); - sum_region_across_axes(arg, - 0, - axes_vec, - sum_coord, - square_sum, - begin_area, - end_area, - input_transform); - - T x = arg[input_transform.index(in_coord)]; - out[input_transform.index(in_coord)] = - x / (std::pow(bias + (alpha / size) * square_sum, beta)); + T square_sum = sum_region_across_axes( + arg, slice_indices(arg_shape, begin_area, area_shape)); + auto index = input_transform.index(in_coord); + T x = arg[index]; + out[index] = x / (std::pow(bias + scale * square_sum, beta)); } } - } - } -} + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/test/backend/lrn.in.cpp b/ngraph/test/backend/lrn.in.cpp index a3902be70877b1..3c568e76d041d3 100644 --- a/ngraph/test/backend/lrn.in.cpp +++ b/ngraph/test/backend/lrn.in.cpp @@ -46,7 +46,6 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) double beta = 0.5; double bias = 1; size_t size = 3; - // lrn is performed across channel as default auto lrn = make_shared(A, alpha, beta, bias, size); auto f = make_shared(lrn, ParameterVector{A}); @@ -55,11 +54,11 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.f, + {0.0000000f, 0.3015113f, - 0.4364357f, - 0.5f, - 0.8728715f, + 0.4364358f, + 0.5000000f, + 0.8728716f, 0.8451542f, 0.5970223f, 0.6115928f, @@ -67,6 +66,7 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) 0.5669467f, 0.7784989f, 0.7720487f}); + test_case.run(); } @@ -87,7 +87,7 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, + {0.0000000f, 0.7071068f, 0.5345225f, 0.8017837f, @@ -97,8 +97,9 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) 0.7548294f, 0.6620847f, 0.7448453f, - 0.671156f, + 0.6711560f, 0.7382717f}); + test_case.run(); } @@ -119,18 +120,19 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_hw) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, - 0.7071068f, - 0.5345225f, - 0.8017837f, - 0.6172134f, - 0.7715167f, - 0.6469966f, - 0.7548294f, - 0.6620847f, - 0.7448453f, - 0.671156f, - 0.7382717f}); + {0.0000000f, + 0.8660254f, + 0.8660254f, + 1.2990381f, + 1.0444659f, + 1.3055824f, + 1.1078234f, + 1.2924607f, + 1.1389896f, + 1.2813632f, + 1.1572751f, + 1.2730026f}); + test_case.run(); } @@ -151,18 +153,19 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_all_dims) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, - 0.0638877f, - 0.0888231f, - 0.1332347f, - 0.1949481f, - 0.2436851f, - 0.3833259f, - 0.4472136f, - 0.3552925f, - 0.399704f, - 0.4873702f, - 0.5361072f}); + {0.0000000f, + 0.3156438f, + 0.4501407f, + 0.6752110f, + 0.9830783f, + 1.2288479f, + 1.8938627f, + 2.2095065f, + 1.8005627f, + 2.0256331f, + 2.4576957f, + 2.7034652f}); + test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); } @@ -183,18 +186,19 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_nw) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, - 0.140028f, - 0.2407717f, - 0.3144855f, - 0.3698001f, - 0.4123931f, - 0.9863939f, - 0.9801961f, - 0.9630868f, - 0.9434564f, - 0.9245003f, - 0.9072647f}); + {0.0000000f, + 0.2379155f, + 0.4111132f, + 0.5388159f, + 0.6351073f, + 0.7094756f, + 1.6641006f, + 1.6654084f, + 1.6444529f, + 1.6164477f, + 1.5877683f, + 1.5608464f}); + test_case.run(); } @@ -215,18 +219,19 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_empty) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, - 0.7071068f, - 0.8944272f, - 0.9486833f, - 0.9701425f, - 0.9805807f, - 0.9863939f, - 0.9899495f, - 0.9922779f, - 0.9938837f, - 0.9950372f, - 0.9958932f}); + {0.0000000f, + 0.5000000f, + 0.5547002f, + 0.5669467f, + 0.5714286f, + 0.5735393f, + 0.5746958f, + 0.5753965f, + 0.5758526f, + 0.5761660f, + 0.5763904f, + 0.5765567f}); + test_case.run(); } @@ -248,10 +253,11 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_6D_across_2_axes) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output( - shape, {0.0f, 0.2581989f, 0.5163978f, 0.7745967f, 0.3549426f, 0.4436783f, - 0.5324139f, 0.6211495f, 0.4175966f, 0.4697962f, 0.5219957f, 0.5741953f, - 0.4426267f, 0.4795122f, 0.5163978f, 0.5532833f, 0.4560274f, 0.4845291f, - 0.5130308f, 0.5415326f, 0.4643635f, 0.4875816f, 0.5107998f, 0.534018f}); + shape, {0.0000000f, 0.4200840f, 0.8401681f, 1.2602521f, 0.6099943f, 0.7624928f, + 0.9149914f, 1.0674900f, 0.7213357f, 0.8115027f, 0.9016696f, 0.9918366f, + 0.7656109f, 0.8294119f, 0.8932127f, 0.9570137f, 0.7892218f, 0.8385482f, + 0.8878745f, 0.9372009f, 0.8038679f, 0.8440613f, 0.8842546f, 0.9244481f}); + test_case.run(); } @@ -272,18 +278,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_empty) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.0f, - 0.7071068f, - 0.8944272f, - 0.9486833f, - 0.9701425f, - 0.9805807f, - 0.9863939f, - 0.9899495f, - 0.9922779f, - 0.9938837f, - 0.9950372f, - 0.9958932f}); + {0.0000000f, + 0.5000000f, + 0.5547002f, + 0.5669467f, + 0.5714286f, + 0.5735393f, + 0.5746958f, + 0.5753964f, + 0.5758526f, + 0.5761660f, + 0.5763904f, + 0.5765566f}); test_case.run(); } @@ -315,17 +321,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_outermost_axis) auto test_case = test::TestCase(f); test_case.add_input(shape, a); test_case.add_expected_output(shape, - {0.45900404f, - 0.14999892f, - -1.04828012f, - -0.99727529f, - 0.41144446f, - 0.08083449f, - -0.16259004f, - -0.09422511f, - -0.02180192f, - -0.34259823f, - 0.35597473f, - -0.70393407f}); + {0.4590040f, + 0.1499989f, + -1.0482801f, + -0.9972753f, + 0.4114444f, + 0.0808345f, + -0.1625900f, + -0.0942251f, + -0.0218018f, + -0.3425926f, + 0.3559732f, + -0.7039225f}); + test_case.run(23); } diff --git a/ngraph/test/ref_generators/generate_lrn_across_axes.py b/ngraph/test/ref_generators/generate_lrn_across_axes.py index e352471d9835ff..0920481e10a0dc 100644 --- a/ngraph/test/ref_generators/generate_lrn_across_axes.py +++ b/ngraph/test/ref_generators/generate_lrn_across_axes.py @@ -25,20 +25,24 @@ def LRN(input, size=3, bias=1.0, alpha=3.0, beta=0.5): H = input.shape[2] W = input.shape[3] for n in range(N): + begin_n = max(0, n - (size-1)//2) + end_n = min(N, n + (size-1)//2 + 1) for c in range(C): + begin_c = max(0, c - (size-1)//2) + end_c = min(C, c + (size-1)//2 + 1) for h in range(H): - begin_h = max(0, h - (size-1)/2) - end_h = min(H, h + (size-1)/2 + 1) + begin_h = max(0, h - (size-1)//2) + end_h = min(H, h + (size-1)//2 + 1) for w in range(W): - begin_w = max(0, w - (size-1)/2) - end_w = min(W, w + (size-1)/2 + 1) + begin_w = max(0, w - (size-1)//2) + end_w = min(W, w + (size-1)//2 + 1) patch = input[n, c, begin_h:end_h, begin_w:end_w] output[n, c, h, w] /= ( - np.power(bias + (alpha/size) * np.sum(patch * patch), beta)) + np.power(bias + (alpha/(size**2)) * np.sum(patch * patch), beta)) return output input = np.arange(0, 12, 1).reshape(2, 3, 2, 1).astype(np.float32) result = LRN(input) for elem in np.nditer(result): - print(str(round(elem, 7)) + "f, ") + print("{:.7f}f,".format(elem)) diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index d565eab733ddf9..abee8acb08fee2 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -452,7 +452,6 @@ max_pool_3d avg_pool_2d_2channel_2image_padded_only_above_include_in_computation avg_pool_3d_uneven_strided_padded multiple_result -lrn_across_hw lrn_across_all_dims elu elu_negative_alpha @@ -1331,7 +1330,6 @@ IE_GPU.max_3d_to_matrix_least_sig IE_GPU.max_3d_to_vector IE_GPU.max_3d_to_scalar IE_GPU.max_3d_to_scalar_int32 -IE_GPU.lrn_across_channel IE_GPU.log IE_GPU.gather_4d_indices_no_axis_2d_input IE_GPU.gather_3d_indices_no_axis_2d_input From 3c5aefb4275aa7a025e6915a9e03f5a9d8461d5f Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Mon, 19 Oct 2020 09:48:38 +0300 Subject: [PATCH 25/35] Remove `memcheck_pregen_irs_tests` MemCheck configs due obsolescence (#2693) --- .../desktop_references_config.xml | 533 ------------------ .../nightly_configs/desktop_test_config.xml | 155 ----- .../desktop_references_config.xml | 533 ------------------ .../weekly_configs/desktop_test_config.xml | 155 ----- 4 files changed, 1376 deletions(-) delete mode 100644 tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_references_config.xml delete mode 100644 tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_test_config.xml delete mode 100644 tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_references_config.xml delete mode 100644 tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_test_config.xml diff --git a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_references_config.xml b/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_references_config.xml deleted file mode 100644 index 32ef748d0a8875..00000000000000 --- a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_references_config.xml +++ /dev/null @@ -1,533 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_test_config.xml deleted file mode 100644 index 6601460da837d5..00000000000000 --- a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/nightly_configs/desktop_test_config.xml +++ /dev/null @@ -1,155 +0,0 @@ - - - CPU - GPU - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_references_config.xml b/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_references_config.xml deleted file mode 100644 index 32ef748d0a8875..00000000000000 --- a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_references_config.xml +++ /dev/null @@ -1,533 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_test_config.xml deleted file mode 100644 index 6601460da837d5..00000000000000 --- a/tests/stress_tests/.automation/memcheck_pregen_irs_tests/weekly_configs/desktop_test_config.xml +++ /dev/null @@ -1,155 +0,0 @@ - - - CPU - GPU - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file From e1428ecf1d74d6b1fb88c610a49b39b7eba5f2f8 Mon Sep 17 00:00:00 2001 From: Krzysztof Bruniecki Date: Mon, 19 Oct 2020 11:21:01 +0200 Subject: [PATCH 26/35] Improve GNA MT sychronization (#2553) * Sync GNA lib calls to avoid multi threads and plugins crash * Remove TODO * Enable sync for GNA1 * Fix GNA1 sync * Add core_threading_tests to GNA Plugin to address story 31709 * Disable and change test description --- .../src/gna_plugin/gna_device.cpp | 7 ++++++ .../src/gna_plugin/gna_device.hpp | 3 +++ .../inference_engine/core_threading_tests.cpp | 2 +- .../behavior/core_threading_tests.cpp | 23 +++++++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/core_threading_tests.cpp diff --git a/inference-engine/src/gna_plugin/gna_device.cpp b/inference-engine/src/gna_plugin/gna_device.cpp index 97d4026066a1b3..ce15f2b895bc31 100644 --- a/inference-engine/src/gna_plugin/gna_device.cpp +++ b/inference-engine/src/gna_plugin/gna_device.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #if GNA_LIB_VER == 2 @@ -24,6 +25,8 @@ #include "details/ie_exception.hpp" #include "gna_plugin_log.hpp" +std::mutex GNADeviceHelper::acrossPluginsSync{}; + uint8_t* GNADeviceHelper::alloc(uint32_t size_requested, uint32_t *size_granted) { void * memPtr = nullptr; #if GNA_LIB_VER == 1 @@ -62,6 +65,7 @@ uint32_t GNADeviceHelper::propagate(const intel_nnet_type_t *pNeuralNetwork, return reqId; } #else + void GNADeviceHelper::setUpActiveList(const uint32_t requestConfigId, uint32_t layerIndex, uint32_t* ptr_active_indices, uint32_t num_active_indices) { const auto status = Gna2RequestConfigEnableActiveList(requestConfigId, layerIndex, num_active_indices, ptr_active_indices); checkGna2Status(status); @@ -363,6 +367,7 @@ void GNADeviceHelper::checkStatus() const { #endif void GNADeviceHelper::open(uint8_t n_threads) { + std::unique_lock lockGnaCalls{ acrossPluginsSync }; #if GNA_LIB_VER == 1 nGNAHandle = GNADeviceOpenSetThreads(&nGNAStatus, n_threads); checkStatus(); @@ -379,6 +384,7 @@ void GNADeviceHelper::open(uint8_t n_threads) { } void GNADeviceHelper::close() { + std::unique_lock lockGnaCalls{ acrossPluginsSync }; #if GNA_LIB_VER == 1 GNADeviceClose(nGNAHandle); nGNAHandle = 0; @@ -398,6 +404,7 @@ void GNADeviceHelper::close() { } void GNADeviceHelper::setOMPThreads(uint8_t const n_threads) { + std::unique_lock lockGnaCalls{ acrossPluginsSync }; #if GNA_LIB_VER == 1 gmmSetThreads(n_threads); #else diff --git a/inference-engine/src/gna_plugin/gna_device.hpp b/inference-engine/src/gna_plugin/gna_device.hpp index 7b35f3c4a64cfb..0f71772d62e975 100644 --- a/inference-engine/src/gna_plugin/gna_device.hpp +++ b/inference-engine/src/gna_plugin/gna_device.hpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -37,6 +38,7 @@ enum GnaWaitStatus : int { * holds gna - style handle in RAII way */ class GNADeviceHelper { + static std::mutex acrossPluginsSync; #if GNA_LIB_VER == 1 intel_gna_status_t nGNAStatus = GNA_NOERROR; intel_gna_handle_t nGNAHandle = 0; @@ -168,6 +170,7 @@ class GNADeviceHelper { void setOMPThreads(uint8_t const n_threads); void initGnaPerfCounters() { + std::unique_lock lockGnaCalls{ acrossPluginsSync }; #if GNA_LIB_VER == 1 nGNAPerfResults = {{0, 0, 0, 0, 0, 0, 0}, {0, 0}, {0, 0, 0}, {0, 0}}; nGNAPerfResultsTotal = {{0, 0, 0, 0, 0, 0, 0}, {0, 0}, {0, 0, 0}, {0, 0}}; diff --git a/inference-engine/tests/functional/inference_engine/core_threading_tests.cpp b/inference-engine/tests/functional/inference_engine/core_threading_tests.cpp index 576f11219b1e8f..2271ddd037e46c 100644 --- a/inference-engine/tests/functional/inference_engine/core_threading_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/core_threading_tests.cpp @@ -117,7 +117,7 @@ TEST_F(CoreThreadingTests, RegisterPlugins) { } // tested function: GetAvailableDevices, UnregisterPlugin -// TODO: some plugins initialization (e.g. GNA) failed during such stress-test scenario +// TODO: some initialization (e.g. thread/dlopen) sporadically fails during such stress-test scenario TEST_F(CoreThreadingTests, DISABLED_GetAvailableDevices) { InferenceEngine::Core ie; runParallel([&] () { diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/core_threading_tests.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/core_threading_tests.cpp new file mode 100644 index 00000000000000..139bd9f512c4c7 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/behavior/core_threading_tests.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +namespace { + +Params params[] = { + std::tuple{ CommonTestUtils::DEVICE_GNA, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}}, + std::tuple{ CommonTestUtils::DEVICE_HETERO, {{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_GNA }}}, + std::tuple{ CommonTestUtils::DEVICE_MULTI, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES), CommonTestUtils::DEVICE_GNA }}}, +}; + +} // namespace + +INSTANTIATE_TEST_CASE_P(GNA, CoreThreadingTests, testing::ValuesIn(params), CoreThreadingTests::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(DISABLED_GNA, CoreThreadingTestsWithIterations, + testing::Combine(testing::ValuesIn(params), + testing::Values(2), + testing::Values(2)), + CoreThreadingTestsWithIterations::getTestCaseName); From 8715b60d8810fcfea46703d47fb78fbac2aaf8b8 Mon Sep 17 00:00:00 2001 From: Anton Potapov Date: Mon, 19 Oct 2020 12:35:59 +0300 Subject: [PATCH 27/35] [PP GAPI] Extended plug-ins shared precision conversion tests to use (#2677) `GetBlob()` as well - test were extended to cover case when input tensors are copied into Blob return by `InferRequest::GetBlob` - channel number of input tensor is made a test parameter --- .../behavior/preprocessing.cpp | 13 +++++- .../behavior/preprocessing.cpp | 12 +++++- .../shared/include/behavior/preprocessing.hpp | 40 +++++++++++++++++-- 3 files changed, 60 insertions(+), 5 deletions(-) diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp index 747f286efbd962..344ceacf4da865 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp @@ -19,9 +19,20 @@ const std::vector> configs = { {} }; -INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest, +INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest, ::testing::Combine( ::testing::ValuesIn(inputPrecisions), + ::testing::Values(1, 2, 3, 4, 5), // Number of input tensor channels + ::testing::Values(true), // Use SetInput + ::testing::Values("TEMPLATE"), + ::testing::ValuesIn(configs)), + PreprocessingPrecisionConvertTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest, + ::testing::Combine( + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(4, 5), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors) + ::testing::Values(false), // use GetBlob ::testing::Values("TEMPLATE"), ::testing::ValuesIn(configs)), PreprocessingPrecisionConvertTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp index 769cf8fe0f8f21..a0106b02350a92 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp @@ -19,11 +19,21 @@ const std::vector> configs = { {} }; -INSTANTIATE_TEST_CASE_P(smoke_BehaviourPreprocessingTests, PreprocessingPrecisionConvertTest, +INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaSetInput, PreprocessingPrecisionConvertTest, ::testing::Combine( ::testing::ValuesIn(inputPrecisions), + ::testing::Values(1, 2, 3, 4, 5), // Number of input tensor channels + ::testing::Values(true), // Use SetInput ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(configs)), PreprocessingPrecisionConvertTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaGetBlob, PreprocessingPrecisionConvertTest, + ::testing::Combine( + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(4, 5), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors) + ::testing::Values(false), // use GetBlob + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + PreprocessingPrecisionConvertTest::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp index 7d9e773bbc4bf3..bb27da54654b18 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp @@ -27,6 +27,8 @@ namespace BehaviorTestsDefinitions { using PreprocessingPrecisionConvertParams = std::tuple< InferenceEngine::Precision, // Input precision + unsigned, // channels number + bool, // Use normal (i.e. SetInput() or unusal i.e. GetBlob()) inut method std::string, // Device name std::map // Config >; @@ -37,11 +39,15 @@ struct PreprocessingPrecisionConvertTest : public: static std::string getTestCaseName(testing::TestParamInfo obj) { InferenceEngine::Precision inPrc; + bool useSetInput; + unsigned channels; std::string targetDevice; std::map configuration; - std::tie(inPrc, targetDevice, configuration) = obj.param; + std::tie(inPrc, channels, useSetInput, targetDevice, configuration) = obj.param; std::ostringstream result; result << "inPRC=" << inPrc.name() << "_"; + result << channels << "Ch" << "_"; + result << (useSetInput ? "SetInput" : "GetBlob") << "_"; result << "targetDevice=" << targetDevice; if (!configuration.empty()) { for (auto& configItem : configuration) { @@ -51,6 +57,32 @@ struct PreprocessingPrecisionConvertTest : return result.str(); } + // Need to override Infer() due to usage of GetBlob() as input method. + // Mostly a copy of LayerTestsCommon::Infer() + void Infer() override { + inferRequest = executableNetwork.CreateInferRequest(); + inputs.clear(); + + for (const auto &input : executableNetwork.GetInputsInfo()) { + const auto &info = input.second; + auto blob = GenerateInput(*info); + if (!use_set_input) { + InferenceEngine::Blob::Ptr input = inferRequest.GetBlob(info->name()); + blob_copy(blob, input); + } else { + inferRequest.SetBlob(info->name(), blob); + } + + inputs.push_back(blob); + } + if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) && + configuration.count(InferenceEngine::PluginConfigParams::YES)) { + auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2; + inferRequest.SetBatch(batchSize); + } + inferRequest.Infer(); + } + void SetUp() override { // This test: // - Strive to test the plugin internal preprocessing (precision conversion) only. @@ -60,11 +92,11 @@ struct PreprocessingPrecisionConvertTest : SetRefMode(LayerTestsUtils::RefMode::INTERPRETER); - std::tie(inPrc, targetDevice, configuration) = this->GetParam(); + std::tie(inPrc, channels, use_set_input, targetDevice, configuration) = this->GetParam(); bool specialZero = true; - std::vector inputShape {4, 4}; + std::vector inputShape(channels, 4); auto make_ngraph = [&](bool with_extra_conv) { auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32)); @@ -95,6 +127,8 @@ struct PreprocessingPrecisionConvertTest : public: std::shared_ptr ie = PluginCache::get().ie(); std::shared_ptr reference_function; + bool use_set_input = true; + unsigned channels = 0; }; From ff7fc01c7614b5b50442410b9dbd4f66d22fbd47 Mon Sep 17 00:00:00 2001 From: Nikolay Shchegolev Date: Mon, 5 Oct 2020 11:58:54 +0300 Subject: [PATCH 28/35] [CPU] CTCLoss performance improvement. --- .../src/mkldnn_plugin/nodes/ctc_loss.cpp | 405 ++++++++---------- 1 file changed, 179 insertions(+), 226 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp b/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp index f29b7ce7bd2a09..1453a1498108d8 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp @@ -60,6 +60,8 @@ class CTCLossImpl : public ExtLayerBase { StatusCode execute(std::vector& inputs, std::vector& outputs, ResponseDesc *resp) noexcept override { + StatusCode returnCode = OK; + const float* logits = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); const int* logitsLength = inputs[1]->cbuffer().as() + @@ -72,257 +74,210 @@ class CTCLossImpl : public ExtLayerBase { outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); const auto& logitsShape = inputs[0]->getTensorDesc().getDims(); - const auto batchNum = logitsShape[0]; - const auto maxTime = logitsShape[1]; - const auto classesNum = logitsShape[2]; + const size_t batchNum = logitsShape[0]; + const size_t maxTime = logitsShape[1]; + const size_t classesNum = logitsShape[2]; int blankIndex = classesNum - 1; if (inputs.size() > 4) { blankIndex = inputs[4]->cbuffer().as()[0]; } - std::vector targetD(maxTime); - - const size_t TC = maxTime * classesNum; - - for (size_t b = 0; b < batchNum; b++) { - const int actualLogitLen = logitsLength[b]; - const int actualTargetLen = labelsLength[b]; - if (actualLogitLen < 0 || actualTargetLen < 0 || actualLogitLen > maxTime || actualTargetLen > maxTime - || actualTargetLen > actualLogitLen) { - std::string errorMsg = _logPrefix + ". Logit or label length cannot be greater than max sequence length. " - + "Also a label length cannot be greater than a logit length" - + " and both cannot be negative.\nMaxSeqLen: " - + std::to_string(maxTime) + "; Logit len: " + std::to_string(actualLogitLen) - + "; Label len: " + std::to_string(actualTargetLen); - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - return GENERAL_ERROR; - } - - const int* target = &labels[b * maxTime]; - // Decoding target: merge repeated characters if preprocess_collapse_repeated == True, - // find unique elemnts if unique == True - size_t decodedTargetLen = 0lu; - if (_unique) { - std::unordered_set uniqVals; - for (size_t t = 0lu; t < actualTargetLen; t++) { - if (uniqVals.find(target[t]) != uniqVals.end()) { - continue; - } - uniqVals.insert(target[t]); - targetD[decodedTargetLen++] = target[t]; + std::vector decodedTargetLenB(batchNum, 0); + std::vector> targetDB(batchNum); + std::vector>> logProbabilitiesB(batchNum); + size_t workAmount2 = 0lu; + std::vector errorMsgB(parallel_get_max_threads()); + + auto threadBody_1 = [&](const int ithr, const int nthr) { + size_t start(0lu), end(0lu); + splitter(batchNum, nthr, ithr, start, end); + if (start >= end) + return; + + for (size_t b = start; b < end; b++) { + if (logitsLength[b] < 0 || labelsLength[b] < 0 || logitsLength[b] > maxTime || labelsLength[b] > logitsLength[b]) { + errorMsgB[ithr] = _logPrefix + ". Logit length cannot be greater than max sequence length. " + + "Label length cannot be greater than a logit length" + + " and both cannot be negative.\nMaxSeqLen: " + + std::to_string(maxTime) + "; Logit len: " + std::to_string(logitsLength[b]) + + "; Label len: " + std::to_string(labelsLength[b]); + returnCode = GENERAL_ERROR; + return; } - } else if (_preprocessCollapseRepeated) { - int prevValue = target[0]; - targetD[decodedTargetLen++] = target[0]; - for (size_t t = 1lu; t < actualTargetLen; t++) { - if (target[t] == prevValue) { - continue; + const size_t actualLogitLen = logitsLength[b]; + const size_t actualTargetLen = labelsLength[b]; + size_t decodedTargetLen = 0lu; + + // Decoding target: merge repeated characters if preprocess_collapse_repeated == True, + // find unique elemnts if unique == True. + // Inserts blanks before each index and a blank at the end. + const int* target = &labels[b * maxTime]; + targetDB[b].resize(actualTargetLen * 2 + 1); + auto& targetD = targetDB[b]; + if (_unique) { + std::unordered_set uniqVals; + for (size_t t = 0lu; t < actualTargetLen; t++) { + if (uniqVals.find(target[t]) != uniqVals.end()) { + continue; + } + uniqVals.insert(target[t]); + targetD[decodedTargetLen++] = blankIndex; + targetD[decodedTargetLen++] = target[t]; + } + targetD[decodedTargetLen++] = blankIndex; + } else if (_preprocessCollapseRepeated) { + auto prevValue = target[0]; + targetD[decodedTargetLen++] = blankIndex; + targetD[decodedTargetLen++] = target[0]; + for (size_t t = 1lu; t < actualTargetLen; t++) { + if (target[t] == prevValue) { + continue; + } + targetD[decodedTargetLen++] = blankIndex; + targetD[decodedTargetLen++] = prevValue = target[t]; } - targetD[decodedTargetLen++] = target[t]; - prevValue = target[t]; + targetD[decodedTargetLen++] = blankIndex; + } else { + for (size_t t = 0lu; t < actualTargetLen; t++) { + targetD[decodedTargetLen++] = blankIndex; + targetD[decodedTargetLen++] = target[t]; + } + targetD[decodedTargetLen++] = blankIndex; } - } else { - std::copy(target, target + actualTargetLen, targetD.data()); - decodedTargetLen = actualTargetLen; - } - - const size_t BTC = b * TC; + decodedTargetLenB[b] = decodedTargetLen; - std::vector> logProbabilities(actualLogitLen); - float logProb = 0.f, kExp = 0.f; - for (size_t t = 0; t < actualLogitLen; t++) { - kExp = 0.f; - const size_t btcT = BTC + classesNum * t; - for (size_t c = 0; c < classesNum; c++) { - kExp += std::exp(logits[btcT + c]); + auto& logProbabilities = logProbabilitiesB[b]; + logProbabilities.resize(actualLogitLen); + for (size_t ll = 0; ll < actualLogitLen; ll++) { + logProbabilities[ll].resize(decodedTargetLen); } - for (size_t s = 0; s < decodedTargetLen; s++) { - logProb = logits[btcT + targetD[s]] - std::log(kExp); - logProbabilities[t].insert({targetD[s], logProb}); - } - logProb = logits[btcT + blankIndex] - std::log(kExp); - logProbabilities[t].insert({blankIndex, logProb}); + workAmount2 += actualLogitLen; + } // for batch + }; // threadBody_1 + + parallel_nt(0, threadBody_1); + if (returnCode != OK) { + std::string resErr(""); + for (auto& err : errorMsgB) { + if (!err.empty()) + resErr += err + "\n"; + resErr.copy(resp->msg, sizeof(resp->msg) - 1); } + return returnCode; + } - const auto float_inf = std::numeric_limits::infinity(); - size_t work_amount = actualLogitLen - decodedTargetLen + 1lu; - std::vector sumPerThread(parallel_get_max_threads(), -float_inf); + const size_t TC = maxTime * classesNum; - // Looking for aligned paths - auto thread_body = [&](const int ithr, const int nthr) { - size_t start0(0lu), end0(0lu); - splitter(work_amount, nthr, ithr, start0, end0); - if (start0 >= end0) - return; - if (ithr >= sumPerThread.size()) - sumPerThread.push_back(-float_inf); - - std::function findPaths = - [&](size_t targetIdx, size_t start, size_t end, float prevLogProb) { - if (end > actualLogitLen) { - if (sumPerThread[ithr] == -float_inf) { - sumPerThread[ithr] = prevLogProb; - } else if (prevLogProb != -float_inf) { - if (sumPerThread[ithr] > prevLogProb) - sumPerThread[ithr] = sumPerThread[ithr] + std::log1pf(std::exp(prevLogProb - sumPerThread[ithr])); - else - sumPerThread[ithr] = prevLogProb + std::log1pf(std::exp(sumPerThread[ithr] - prevLogProb)); - } - return; + auto threadBody_2 = [&](const int ithr, const int nthr) { + size_t start(0lu), end(0lu); + size_t sB(0lu), sT(0lu); + splitter(workAmount2, nthr, ithr, start, end); + if (start >= end) + return; + int64_t cw = 0, st = start; + for (; sB < batchNum; sB++) { + cw += logitsLength[sB]; + if (cw >= st) { + sT = logitsLength[sB] + st - cw; + break; + } + } + size_t workCounter = start; + + for (size_t b = sB; b < batchNum; b++) { + const size_t actualLogitLen = logitsLength[b]; + const size_t decodedTargetLen = decodedTargetLenB[b]; + auto& logProbabilities = logProbabilitiesB[b]; + auto& targetD = targetDB[b]; + + double expSum = 0.0; + size_t btcT = b * TC + sT * classesNum; + // logProbabilities = logSoftmax = logits[b][t][c] - ln(sum_c(exp(logits[b][t]))) + for (size_t t = sT; t < actualLogitLen; t++) { + expSum = 0.0; + for (size_t c = 0lu; c < classesNum; c++) { + expSum += std::exp(logits[btcT + c]); } - - size_t nextIdx = targetIdx + 1; - int64_t st64 = start; - float newLogProb = prevLogProb; - if (!_ctcMergeRepeated) { - for (size_t pos = start; pos < end; pos++) { - newLogProb = prevLogProb; - for (size_t bl = start; bl < pos; bl++) { - auto lnProbIt = logProbabilities[bl].find(blankIndex); - if (lnProbIt != logProbabilities[bl].end()) - newLogProb += lnProbIt->second; - } - auto lnProbIt = logProbabilities[pos].find(targetD[targetIdx]); - if (lnProbIt != logProbabilities[pos].end()) - newLogProb += lnProbIt->second; - if (end == actualLogitLen) { - for (int64_t ble = pos + 1; ble < actualLogitLen; ble++) { - auto lnProbIt = logProbabilities[ble].find(blankIndex); - if (lnProbIt != logProbabilities[ble].end()) - newLogProb += lnProbIt->second; - } - } - findPaths(nextIdx, pos + 1, end + 1, newLogProb); - } - } else { - for (size_t pos = start; pos < end; pos++) { - newLogProb = prevLogProb; - size_t next_start = pos + 1; - for (size_t bl = start; bl < pos; bl++) { - auto lnProbIt = logProbabilities[bl].find(blankIndex); - if (lnProbIt != logProbabilities[bl].end()) - newLogProb += lnProbIt->second; - } - if (end == actualLogitLen) { - for (int64_t ble = pos + 1; ble < actualLogitLen; ble++) { - auto lnProbIt = logProbabilities[ble].find(blankIndex); - if (lnProbIt != logProbabilities[ble].end()) - newLogProb += lnProbIt->second; - } - } - if (targetIdx < decodedTargetLen - 1 - && targetD[targetIdx] == targetD[targetIdx + 1]) { - auto lnProbIt = logProbabilities[next_start++].find(blankIndex); - if (lnProbIt != logProbabilities[next_start].end()) - newLogProb += lnProbIt->second; - } - for (int64_t bl = pos; bl >= st64; bl--) { - newLogProb += logProbabilities[bl].find(targetD[targetIdx])->second; - findPaths(nextIdx, next_start, end + 1, newLogProb); - if (bl > 0) { - auto lnProbIt = logProbabilities[bl - 1].find(blankIndex); - if (lnProbIt != logProbabilities[bl - 1].end()) - newLogProb -= lnProbIt->second; - } - } - } + for (size_t s = 0lu; s < decodedTargetLen; s++) { + logProbabilities[t][s] = logits[btcT + targetD[s]] - std::log(expSum); } - }; // findPaths - - // First tartget symbol - int64_t st64 = start0; - float newLogProb = 0.f; - if (!_ctcMergeRepeated) { - for (size_t pos = start0; pos < end0; pos++) { - newLogProb = 0.f; - for (size_t bl = 0; bl < pos; bl++) { - auto lnProbIt = logProbabilities[bl].find(blankIndex); - if (lnProbIt != logProbabilities[bl].end()) - newLogProb += lnProbIt->second; - } - auto lnProbIt = logProbabilities[pos].find(targetD[0]); - if (lnProbIt != logProbabilities[pos].end()) - newLogProb += lnProbIt->second; - if (work_amount == actualLogitLen) { - for (int64_t ble = pos + 1; ble < actualLogitLen; ble++) { - auto lnProbIt = logProbabilities[ble].find(blankIndex); - if (lnProbIt != logProbabilities[ble].end()) - newLogProb += lnProbIt->second; - } - } - if (decodedTargetLen > 1) { - findPaths(1, pos + 1, work_amount + 1, newLogProb); - } else { - if (sumPerThread[ithr] == -float_inf) - sumPerThread[ithr] = newLogProb; - else if (newLogProb != -float_inf) - sumPerThread[ithr] = sumPerThread[ithr] + std::log1pf(std::exp(newLogProb - sumPerThread[ithr])); - } + btcT += classesNum; + if (++workCounter >= end) { + return; } - } else { - for (size_t pos = start0; pos < end0; pos++) { - newLogProb = 0.f; - size_t next_start = pos + 1; - for (size_t bl = 0; bl < pos; bl++) { - auto lnProbIt = logProbabilities[bl].find(blankIndex); - if (lnProbIt != logProbabilities[bl].end()) - newLogProb += lnProbIt->second; - } - if (work_amount == actualLogitLen) { - for (int64_t ble = pos + 1; ble < actualLogitLen; ble++) { - auto lnProbIt = logProbabilities[ble].find(blankIndex); - if (lnProbIt != logProbabilities[ble].end()) - newLogProb += lnProbIt->second; - } + } + sT = 0lu; + } // for batch + }; // threadBody_2 + + parallel_nt(0, threadBody_2); + + const auto float_inf = std::numeric_limits::infinity(); + + auto sumLogs = [&float_inf](float log1, float log2) { + if (log1 == -float_inf) { + return log2; + } else if (log2 == -float_inf) { + return log1; + } else { + if (log1 > log2) + return log1 + std::log1pf(std::exp(log2 - log1)); + else + return log2 + std::log1pf(std::exp(log1 - log2)); + } + }; + + auto threadBody_3 = [&](const int ithr, const int nthr) { + size_t start(0lu), end(0lu); + splitter(batchNum, nthr, ithr, start, end); + if (start >= end) + return; + + // As per Connectionist Temporal Classification - Labeling Unsegmented Sequence Data with Recurrent Neural Networks: + // Graves et al., 2016, paragraph 4.1 (10) + for (size_t b = start; b < end; b++) { + auto& targetD = targetDB[b]; + auto& logProbabilities = logProbabilitiesB[b]; + const int actualLogitLen = logitsLength[b]; + const int decodedTargetLen = decodedTargetLenB[b]; + std::vector> logBwd(decodedTargetLen, std::vector(actualLogitLen, -float_inf)); + for (int s = decodedTargetLen - 2; s < decodedTargetLen; s++) + logBwd[s][actualLogitLen - 1] = 0.f; + + for (int t = actualLogitLen - 2; t >= 0; t--) { + const int t_1 = t + 1; + for (int s = std::max(0, decodedTargetLen - (2 * (actualLogitLen - t))); + s < std::min(decodedTargetLen, 2 * (t_1)); s++) { + if (_ctcMergeRepeated || targetD[s] == blankIndex) { + logBwd[s][t] = sumLogs(logBwd[s][t], + logBwd[s][t_1] + logProbabilities[t_1][s]); } - if (decodedTargetLen > 1 - && targetD[0] == targetD[1]) { - auto lnProbIt = logProbabilities[next_start++].find(blankIndex); - if (lnProbIt != logProbabilities[next_start].end()) - newLogProb += lnProbIt->second; + + if (s + 1 < decodedTargetLen) { + logBwd[s][t] = sumLogs(logBwd[s][t], + logBwd[s + 1][t_1] + logProbabilities[t_1][s + 1]); } - for (int64_t bl = pos; bl >= 0; bl--) { - auto lnProbIt = logProbabilities[bl].find(targetD[0]); - if (lnProbIt != logProbabilities[bl].end()) - newLogProb += lnProbIt->second; - if (decodedTargetLen > 1) { - findPaths(1, next_start, work_amount + 1, newLogProb); - } else { - if (sumPerThread[ithr] == -float_inf) - sumPerThread[ithr] = newLogProb; - else if (newLogProb != -float_inf) - sumPerThread[ithr] = sumPerThread[ithr] + std::log1pf(std::exp(newLogProb - sumPerThread[ithr])); - } - if (bl > 0) { - auto lnProbIt = logProbabilities[bl - 1].find(blankIndex); - if (lnProbIt != logProbabilities[bl - 1].end()) - newLogProb -= lnProbIt->second; + + if (s + 2 < decodedTargetLen) { + if (targetD[s] != blankIndex && (!_ctcMergeRepeated || (targetD[s] != targetD[s + 2]))) { + logBwd[s][t] = sumLogs(logBwd[s][t], + logBwd[s + 2][t_1] + logProbabilities[t_1][s + 2]); } } } } - }; // thread_body - - parallel_nt(0, thread_body); - float res = -float_inf; + logBwd[0][0] += logProbabilities[0][0]; + logBwd[1][0] += logProbabilities[0][(decodedTargetLen > 1) ? 1 : 0]; - for (auto sum : sumPerThread) { - if (res == -float_inf) { - res = sum; - } else if (sum != -float_inf) { - if (res > sum) - res = res + std::log1pf(std::exp(sum - res)); - else - res = sum + std::log1pf(std::exp(res - sum)); - } - } + dstData[b] = -sumLogs(logBwd[0][0], logBwd[1][0]); + } // for batch + }; // threadBody_3 - dstData[b] = -res; - } // for (size_t b = 0; b < batchNum; b++) + parallel_nt(0, threadBody_3); - return OK; + return returnCode; } // execute protected: @@ -334,8 +289,6 @@ class CTCLossImpl : public ExtLayerBase { }; REG_FACTORY_FOR(CTCLossImpl, CTCLoss); - } // namespace Cpu } // namespace Extensions } // namespace InferenceEngine - From 9367266ed5e55954de1c4737d10752aa86a5abbd Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 19 Oct 2020 18:45:05 +0300 Subject: [PATCH 29/35] [IE CLDNN] DispatchData refactoring (#2508) --- .../activation/activation_kernel_base.cpp | 36 +++---- .../activation/activation_kernel_base.h | 2 +- .../activation/activation_kernel_opt.cpp | 22 ++-- .../activation/activation_kernel_opt.h | 2 +- .../activation/activation_kernel_ref.cpp | 4 +- .../activation/activation_kernel_ref.h | 2 +- .../arg_max_min/arg_max_min_kernel_axis.cpp | 19 +--- .../arg_max_min/arg_max_min_kernel_base.cpp | 22 ++-- .../arg_max_min/arg_max_min_kernel_opt.cpp | 25 ++--- .../average_unpooling_kernel_base.cpp | 40 ++++--- .../batch_to_space_kernel_base.cpp | 27 ++--- .../binary_convolution_kernel_1x1.cpp | 26 +++-- .../binary_convolution_kernel_1x1.h | 4 +- ...y_convolution_kernel_1x1_b_fs_yx_fsv16.cpp | 20 ++-- ...ary_convolution_kernel_1x1_b_fs_yx_fsv16.h | 4 +- .../binary_convolution_kernel_base.cpp | 73 ++++++------- .../binary_convolution_kernel_base.h | 4 +- .../binary_convolution_kernel_generic.cpp | 27 +++-- .../binary_convolution_kernel_generic.h | 4 +- .../binary_convolution_kernel_ref.cpp | 27 +++-- .../binary_convolution_kernel_ref.h | 4 +- .../border/border_kernel_base.cpp | 24 ++--- .../broadcast/broadcast_kernel_base.cpp | 24 ++--- .../concatenation_kernel_b_fs_yx_fsv16.cpp | 18 ++-- .../concatenation_kernel_base.cpp | 30 +++--- ...ncatenation_kernel_depth_bfyx_no_pitch.cpp | 21 ++-- .../concatenation_kernel_fs_b_yx_fsv32.cpp | 26 ++--- .../concatenation_kernel_simple_ref.cpp | 24 ++--- .../convolution_kernel_b_fs_yx_fsv16.cpp | 38 +++---- .../convolution_kernel_b_fs_yx_fsv16.h | 2 +- .../convolution_kernel_b_fs_yx_fsv16_1x1.cpp | 34 +++--- .../convolution_kernel_b_fs_yx_fsv16_1x1.h | 2 +- ...olution_kernel_b_fs_yx_fsv16_depthwise.cpp | 29 ++--- ...nvolution_kernel_b_fs_yx_fsv16_depthwise.h | 2 +- ...volution_kernel_b_fs_yx_fsv16_imad_1x1.cpp | 42 ++++---- ...onvolution_kernel_b_fs_yx_fsv16_imad_1x1.h | 2 +- .../convolution_kernel_b_fs_yx_fsv4_int8.cpp | 26 ++--- .../convolution_kernel_b_fs_yx_fsv4_int8.h | 2 +- ...ution_kernel_b_fs_yx_fsv_16_32_imad_dw.cpp | 40 +++---- ...ution_kernel_b_fs_yx_fsv_16_32_imad_dw.hpp | 2 +- .../convolution_kernel_b_fs_zyx_fsv16.cpp | 76 ++++++------- .../convolution_kernel_b_fs_zyx_fsv16.h | 2 +- ...convolution_kernel_b_fs_zyx_fsv16_imad.cpp | 36 +++---- .../convolution_kernel_b_fs_zyx_fsv16_imad.h | 4 +- .../convolution/convolution_kernel_base.cpp | 88 +++++++-------- .../convolution/convolution_kernel_base.h | 4 +- .../convolution_kernel_bfyx_1x1.cpp | 22 ++-- .../convolution/convolution_kernel_bfyx_1x1.h | 4 +- .../convolution_kernel_bfyx_1x1_gemm_buf.cpp | 22 ++-- .../convolution_kernel_bfyx_1x1_gemm_buf.h | 4 +- .../convolution_kernel_bfyx_1x1_opt.cpp | 24 ++--- .../convolution_kernel_bfyx_1x1_opt.h | 4 +- .../convolution_kernel_bfyx_3x3_dw_opt.cpp | 41 ++++--- .../convolution_kernel_bfyx_3x3_dw_opt.h | 4 +- ...tion_kernel_bfyx_depthwise_weights_lwg.cpp | 27 ++--- ...lution_kernel_bfyx_depthwise_weights_lwg.h | 4 +- ...onvolution_kernel_bfyx_direct_10_12_16.cpp | 41 ++++--- .../convolution_kernel_bfyx_direct_10_12_16.h | 4 +- .../convolution_kernel_bfyx_gemm_like.cpp | 46 ++++---- .../convolution_kernel_bfyx_gemm_like.h | 4 +- .../convolution_kernel_bfyx_iyxo.cpp | 24 ++--- .../convolution_kernel_bfyx_iyxo.h | 2 +- .../convolution_kernel_bfyx_os_iyx_osv16.cpp | 51 +++++---- .../convolution_kernel_bfyx_os_iyx_osv16.h | 2 +- ...volution_kernel_bfyx_os_iyx_osv16_2_sg.cpp | 49 +++++---- ...onvolution_kernel_bfyx_os_iyx_osv16_2_sg.h | 4 +- ...nvolution_kernel_bfyx_to_b_fs_yx_fsv16.cpp | 28 ++--- ...convolution_kernel_bfyx_to_b_fs_yx_fsv16.h | 2 +- ...on_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp | 8 +- ...onvolution_kernel_bfyx_to_fs_byx_fsv32.cpp | 34 +++--- .../convolution_kernel_bfyx_to_fs_byx_fsv32.h | 2 +- .../convolution_kernel_fs_byx_fsv32.cpp | 32 +++--- .../convolution_kernel_fs_byx_fsv32.h | 2 +- .../convolution_kernel_fs_byx_fsv32_1x1.cpp | 30 +++--- .../convolution_kernel_fs_byx_fsv32_1x1.h | 2 +- ...volution_kernel_fs_byx_fsv32_depthwise.cpp | 40 +++---- ...onvolution_kernel_fs_byx_fsv32_depthwise.h | 2 +- .../convolution/convolution_kernel_imad.cpp | 36 +++---- .../convolution/convolution_kernel_imad.h | 2 +- ...nvolution_kernel_imad_b_fs_yx_fsv4_1x1.cpp | 48 ++++----- ...convolution_kernel_imad_b_fs_yx_fsv4_1x1.h | 2 +- ...onvolution_kernel_imad_b_fs_yx_fsv4_dw.cpp | 54 ++++------ ...onvolution_kernel_imad_b_fs_yx_fsv4_dw.hpp | 2 +- ...n_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.cpp | 26 ++--- ...ion_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.h | 2 +- ...n_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.cpp | 26 ++--- ...ion_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.h | 2 +- .../convolution_kernel_mmad_b_fs_yx_fsv32.cpp | 38 +++---- .../convolution_kernel_mmad_b_fs_yx_fsv32.h | 2 +- ...nvolution_kernel_mmad_b_fs_yx_fsv32_dw.cpp | 22 ++-- ...convolution_kernel_mmad_b_fs_yx_fsv32_dw.h | 2 +- ...tion_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp | 52 ++++----- ...lution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h | 2 +- ...ution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.cpp | 38 +++---- ...olution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h | 2 +- .../convolution/convolution_kernel_ref.cpp | 23 ++-- .../convolution/convolution_kernel_ref.h | 2 +- .../convolution_kernel_winograd_2x3_s1.cpp | 27 +++-- .../convolution_kernel_winograd_2x3_s1.h | 4 +- ...nvolution_kernel_winograd_2x3_s1_fused.cpp | 30 +++--- ...convolution_kernel_winograd_2x3_s1_fused.h | 4 +- ...nvolution_kernel_winograd_6x3_s1_fused.cpp | 22 ++-- ...convolution_kernel_winograd_6x3_s1_fused.h | 4 +- .../convolution_kernel_yxfb_yxio_b16.cpp | 20 ++-- .../convolution_kernel_yxfb_yxio_b16.h | 2 +- .../convolution_kernel_yxfb_yxio_b1_block.cpp | 12 +-- .../convolution_kernel_yxfb_yxio_b1_block.h | 4 +- ...n_kernel_yxfb_yxio_b1_block_multiple_x.cpp | 26 ++--- ...ion_kernel_yxfb_yxio_b1_block_multiple_x.h | 4 +- .../convolution_kernel_yxfb_yxio_b8.cpp | 24 ++--- .../convolution_kernel_yxfb_yxio_b8.h | 4 +- ...eformable_convolution_kernel_bfyx_conv.cpp | 20 ++-- .../deformable_convolution_kernel_bfyx_conv.h | 2 +- ...ormable_convolution_kernel_bfyx_interp.cpp | 22 ++-- .../ctc_greedy_decoder_kernel_base.cpp | 29 ++--- .../ctc_greedy_decoder_kernel_base.h | 2 +- .../cum_sum/cum_sum_kernel_base.cpp | 27 ++--- .../cum_sum/cum_sum_kernel_base.h | 2 +- .../cum_sum/cum_sum_kernel_partial_sum.cpp | 50 ++++----- .../cum_sum/cum_sum_kernel_partial_sum.h | 2 +- .../cum_sum/cum_sum_kernel_ref.cpp | 4 +- .../cum_sum/cum_sum_kernel_ref.h | 2 +- .../deconvolution_kernel_b_fs_zyx_fsv16.cpp | 76 ++++++------- ...deconvolution_kernel_b_fs_zyx_fsv16_dw.cpp | 18 ++-- .../deconvolution_kernel_base.cpp | 28 ++--- .../deconvolution_kernel_bfyx_opt.cpp | 21 ++-- ...nvolution_kernel_imad_along_f_tile_bfx.cpp | 20 ++-- .../deconvolution_kernel_imad_ref.cpp | 18 +--- .../deconvolution_kernel_ref.cpp | 18 ++-- .../depth_to_space_kernel_base.cpp | 24 ++--- .../depth_to_space_kernel_block2_opt.cpp | 21 ++-- .../detection_output_kernel_base.cpp | 26 ++--- .../detection_output_kernel_ref.cpp | 22 ++-- .../detection_output_kernel_sort.cpp | 22 ++-- .../eltwise/eltwise_kernel_b_fs_yx_fsv16.cpp | 32 +++--- .../eltwise/eltwise_kernel_base.cpp | 79 +++++++------- ...se_kernel_mixed_byxf_and_fs_b_yx_fsv32.cpp | 20 ---- .../embedding_bag_kernel_ref.cpp | 25 ++--- .../extract_image_patches_kernel_base.cpp | 25 ++--- .../fully_connected_block_kernel_base.h | 2 +- .../fully_connected_kernel_base.cpp | 20 ++-- .../fully_connected_kernel_base.h | 4 +- .../fully_connected_kernel_bf_io_gemm.cpp | 22 ++-- .../fully_connected_kernel_bf_io_gemm.h | 4 +- ...y_connected_kernel_bf_io_input_spatial.cpp | 21 ++-- .../fully_connected_kernel_bf_io_ref.cpp | 4 +- .../fully_connected_kernel_bf_io_ref.h | 2 +- .../fully_connected_kernel_bf_tiled.cpp | 60 +++++------ .../fully_connected_kernel_bf_tiled.h | 2 +- .../fully_connected_kernel_bfyx_ref.cpp | 22 ++-- .../fully_connected_kernel_bfyx_ref.h | 2 +- .../fully_connected_kernel_bs_f_bsv16_af8.cpp | 14 +-- .../fully_connected_kernel_bs_f_bsv16_b1.cpp | 44 ++++---- .../fully_connected_kernel_bs_f_bsv16_b1.h | 4 +- .../fully_connected_kernel_bs_f_bsv8_af8.cpp | 14 +-- .../fully_connected_kernel_fb_io_b8_f8.cpp | 14 +-- .../fully_connected_kernel_fb_io_block.cpp | 56 +++++----- .../fully_connected_kernel_fb_io_block.h | 4 +- .../fully_connected_kernel_fb_io_ref.cpp | 4 +- .../fully_connected_kernel_fb_io_ref.h | 2 +- .../fully_connected_kernel_fb_oi_b8_ref.cpp | 14 +-- .../fully_connected_kernel_fb_oi_ref.cpp | 4 +- .../fully_connected_kernel_fb_oi_ref.h | 2 +- .../fully_connected_kernel_fs_byx_fsv32.cpp | 22 ++-- .../fully_connected_kernel_fs_byx_fsv32.h | 2 +- .../fully_connected_kernel_imad.cpp | 20 ++-- .../fully_connected_kernel_imad.h | 2 +- .../fully_connected_kernel_mmad.cpp | 20 ++-- .../fully_connected_kernel_mmad.h | 2 +- .../fully_connected_kernel_yxfb_ref.cpp | 4 +- .../fully_connected_kernel_yxfb_ref.h | 2 +- .../fused_conv_eltwise_kernel_base.cpp | 85 +++++++-------- .../fused_conv_eltwise_kernel_base.h | 2 +- ...fused_conv_eltwise_kernel_bfyx_1x1_opt.cpp | 24 ++--- .../fused_conv_eltwise_kernel_bfyx_1x1_opt.h | 4 +- .../fused_conv_eltwise_kernel_bfyx_iyxo.cpp | 24 ++--- .../fused_conv_eltwise_kernel_bfyx_iyxo.h | 2 +- ..._conv_eltwise_kernel_bfyx_os_iyx_osv16.cpp | 51 +++++---- ...ed_conv_eltwise_kernel_bfyx_os_iyx_osv16.h | 4 +- ...used_conv_eltwise_kernel_yxfb_yxio_b16.cpp | 20 ++-- .../fused_conv_eltwise_kernel_yxfb_yxio_b16.h | 4 +- .../gather/gather_kernel_ref.cpp | 33 ++---- .../gather_tree/gather_tree_kernel_base.cpp | 45 ++++---- .../actual_kernels/gemm/gemm_kernel_base.cpp | 25 ++--- .../actual_kernels/gemm/gemm_kernel_base.h | 2 +- .../gemm/gemm_kernel_mmad_int8.cpp | 25 ++--- .../gemm/gemm_kernel_mmad_int8_slm.cpp | 20 ++-- .../gemm/gemm_kernel_tiled_opt.cpp | 16 +-- .../actual_kernels/grn/grn_kernel_base.cpp | 26 ++--- .../core/actual_kernels/grn/grn_kernel_base.h | 2 +- ...ernel_across_channel_multiple_features.cpp | 30 +++--- ..._kernel_across_channel_multiple_features.h | 2 +- ...across_channel_multiple_features_fsv16.cpp | 27 ++--- ...l_across_channel_multiple_features_fsv16.h | 2 +- .../lrn/lrn_kernel_across_channel_opt_b8.cpp | 12 +-- .../lrn/lrn_kernel_across_channel_opt_b8.h | 2 +- .../lrn/lrn_kernel_across_channel_ref.cpp | 20 ++-- .../lrn/lrn_kernel_across_channel_ref.h | 2 +- .../actual_kernels/lrn/lrn_kernel_base.cpp | 39 ++++--- .../core/actual_kernels/lrn/lrn_kernel_base.h | 2 +- .../actual_kernels/lrn/lrn_kernel_ref.cpp | 22 ++-- .../core/actual_kernels/lrn/lrn_kernel_ref.h | 2 +- .../lrn_kernel_within_channel_byxf_opt.cpp | 25 ++--- .../lrn/lrn_kernel_within_channel_byxf_opt.h | 2 +- .../lrn/lrn_kernel_within_channel_ref.cpp | 22 ++-- .../lrn/lrn_kernel_within_channel_ref.h | 2 +- .../lrn/lrn_kernel_within_channel_ref_opt.cpp | 22 ++-- .../lrn/lrn_kernel_within_channel_ref_opt.h | 2 +- .../lstm_dynamic_input_bfyx_opt.cpp | 22 ++-- .../lstm_dynamic_input_kernel_base.cpp | 23 ++-- .../lstm_dynamic_timeloop_kernel_base.cpp | 25 ++--- .../max_unpooling_kernel_base.cpp | 38 +++---- .../mvn/mvn_kernel_b_fs_yx_fsv16_imad.cpp | 100 ++++++++--------- .../mvn/mvn_kernel_b_fs_yx_fsv16_imad.hpp | 2 +- .../actual_kernels/mvn/mvn_kernel_base.cpp | 31 ++---- .../core/actual_kernels/mvn/mvn_kernel_base.h | 2 +- .../mvn/mvn_kernel_bfyx_opt.cpp | 58 +++++----- .../actual_kernels/mvn/mvn_kernel_bfyx_opt.h | 2 +- .../actual_kernels/mvn/mvn_kernel_ref.cpp | 4 +- .../core/actual_kernels/mvn/mvn_kernel_ref.h | 2 +- .../normalize/normalize_kernel_base.cpp | 29 ++--- .../one_hot/one_hot_kernel_base.cpp | 30 ++---- .../pooling/pooling_kernel_base.cpp | 58 +++++----- .../pooling/pooling_kernel_base.h | 2 +- .../pooling_kernel_gpu_b_fs_yx_fsv16.cpp | 22 ++-- .../pooling_kernel_gpu_b_fs_yx_fsv16.h | 2 +- .../pooling_kernel_gpu_b_fs_yx_fsv4.cpp | 21 ++-- .../pooling/pooling_kernel_gpu_b_fs_yx_fsv4.h | 2 +- ...pooling_kernel_gpu_b_fs_zyx_fsv16_imad.cpp | 21 ++-- .../pooling_kernel_gpu_b_fs_zyx_fsv16_imad.h | 2 +- .../pooling_kernel_gpu_bfyx_block_opt.cpp | 10 +- .../pooling_kernel_gpu_bfyx_block_opt.h | 2 +- ...ooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.cpp | 22 ++-- .../pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.h | 2 +- .../pooling_kernel_gpu_bsv16_fsv16.cpp | 22 ++-- .../pooling/pooling_kernel_gpu_bsv16_fsv16.h | 2 +- .../pooling/pooling_kernel_gpu_byxf_opt.cpp | 10 +- .../pooling/pooling_kernel_gpu_byxf_opt.h | 2 +- .../pooling_kernel_gpu_byxf_padding_opt.cpp | 10 +- .../pooling_kernel_gpu_byxf_padding_opt.h | 2 +- .../pooling_kernel_gpu_fs_b_yx_fsv32.cpp | 20 ++-- .../pooling_kernel_gpu_fs_b_yx_fsv32.h | 2 +- .../pooling/pooling_kernel_gpu_int8_ref.cpp | 4 +- .../pooling/pooling_kernel_gpu_int8_ref.h | 2 +- .../pooling/pooling_kernel_gpu_ref.cpp | 4 +- .../pooling/pooling_kernel_gpu_ref.h | 2 +- .../pyramid_roi_align_kernel_base.cpp | 28 ++--- .../pyramid_roi_align_kernel_ref.cpp | 18 +--- .../quantize/quantize_kernel_base.cpp | 16 +-- .../quantize/quantize_kernel_base.h | 2 +- .../quantize/quantize_kernel_ref.cpp | 38 +++---- .../quantize/quantize_kernel_ref.h | 2 +- .../quantize_kernel_scale_shift_opt.cpp | 36 +++---- .../quantize_kernel_scale_shift_opt.h | 2 +- .../reduce/reduce_kernel_b_fs_yx_fsv16.cpp | 19 ++-- .../reduce/reduce_kernel_base.cpp | 4 +- .../reduce/reduce_kernel_ref.cpp | 21 ++-- .../region_yolo/region_yolo_kernel_ref.cpp | 28 ++--- .../reorder_from_winograd_2x3_kernel.cpp | 18 ++-- .../reorder/reorder_kernel_base.cpp | 62 ++++------- .../reorder/reorder_kernel_binary.cpp | 18 +--- .../reorder/reorder_kernel_fast_b1.cpp | 18 ++-- .../reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp | 18 ++-- .../reorder_kernel_to_yxfb_batched.cpp | 18 ++-- .../reorder_to_winograd_2x3_kernel.cpp | 18 ++-- .../reorder/reorder_weights_binary_kernel.cpp | 18 +--- .../reorder_weights_image_fyx_b_kernel.cpp | 23 ++-- ...rder_weights_image_winograd_6x3_kernel.cpp | 18 ++-- .../reorder/reorder_weights_opt.cpp | 23 ++-- .../reorder_weights_winograd_2x3_kernel.cpp | 18 ++-- .../reorder_weights_winograd_6x3_kernel.cpp | 18 ++-- .../reorg_yolo/reorg_yolo_kernel_ref.cpp | 28 ++--- .../resample/resample_kernel_base.cpp | 39 +++---- .../resample/resample_kernel_opt.cpp | 19 ++-- .../resample/resample_kernel_ref.cpp | 21 +--- .../reverse_sequence_kernel_ref.cpp | 26 ++--- .../roi_pooling/roi_pooling_kernel_base.cpp | 28 +++-- .../scatter_update_kernel_ref.cpp | 65 +++++------ .../select/select_kernel_base.cpp | 24 ++--- .../shuffle_channels_kernel_ref.cpp | 25 ++--- .../softmax_items_class_kernel_base.cpp | 4 +- .../softmax/softmax_items_class_kernel_base.h | 2 +- .../softmax/softmax_kernel_base.cpp | 61 ++++++----- .../softmax/softmax_kernel_base.h | 4 +- .../softmax/softmax_kernel_bf.cpp | 32 +++--- .../softmax/softmax_kernel_fb.cpp | 35 +++--- .../softmax_kernel_items_class_optimized.cpp | 31 +++--- .../softmax_kernel_items_class_optimized.h | 4 +- .../softmax/softmax_kernel_ref.cpp | 20 ++-- .../space_to_batch_kernel_base.cpp | 27 ++--- .../space_to_depth_kernel_ref.cpp | 25 ++--- .../strided_slice_kernel_ref.cpp | 23 ++-- .../actual_kernels/tile/tile_kernel_ref.cpp | 36 +++---- .../core/common/common_kernel_base.cpp | 35 +----- .../core/common/common_kernel_base.h | 12 +-- .../kernel_selector/core/common/jitter.cpp | 102 +++++++++--------- .../kernel_selector/core/kernel_base.cpp | 41 +++++++ .../clDNN/kernel_selector/core/kernel_base.h | 15 +++ 298 files changed, 2529 insertions(+), 3223 deletions(-) diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.cpp index 31b20418efb697..751278c1f75670 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.cpp @@ -23,31 +23,21 @@ namespace kernel_selector { ActivationKernelBase::DispatchData ActivationKernelBase::SetDefault(const activation_params& arg) const { const auto& out = arg.output; - DispatchData runInfo; - std::vector global; - std::vector local; + DispatchData dispatchData; if (out.GetLayout() == DataLayout::yxfb) { - global = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v}; - local = GetOptimalLocalWorkGroupSizes(global, arg.engineInfo); + dispatchData.gws = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v}; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, arg.engineInfo); } else if (out.GetLayout() == DataLayout::b_fs_yx_fsv16) { - global = {Align(out.Feature().v, 16) * out.Batch().v, out.X().v, out.Y().v}; - local = {16, 1, 1}; + dispatchData.gws = {Align(out.Feature().v, 16) * out.Batch().v, out.X().v, out.Y().v}; + dispatchData.lws = {16, 1, 1}; } else { - global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; - local = GetOptimalLocalWorkGroupSizes(global, arg.engineInfo); + dispatchData.gws = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, arg.engineInfo); } - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - runInfo.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - runInfo.fp16UnitUsed = out.GetDType() == Datatype::F16; - - return runInfo; + return dispatchData; } JitConstants ActivationKernelBase::GetJitConstants(const activation_params& params, DispatchData) const { @@ -94,20 +84,20 @@ KernelsData ActivationKernelBase::GetCommonKernelsData(const Params& params, con activation_params& newParams = *static_cast(kd.params.get()); const std::string kernel_id = GetEntryPoint(kernelName, params.layerID, options); - auto runInfo = SetDefault(newParams); - auto cldnn_jit = GetJitConstants(newParams, runInfo); + auto dispatchData = SetDefault(newParams); + auto cldnn_jit = GetJitConstants(newParams, dispatchData); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, GetFusedPrimitiveInputsCount(params)); if (!newParams.inputActivationParams.empty()) { kernel.arguments.push_back({ArgumentDescriptor::Types::SLOPE, 0}); } - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; return {kd}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.h index 2ae92447f93e62..3059e5d99dd9d9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_base.h @@ -65,7 +65,7 @@ class ActivationKernelBase : public common_kernel_base { protected: bool Validate(const Params& p, const optional_params& o) const override; - virtual JitConstants GetJitConstants(const activation_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const activation_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const activation_params& arg) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params& options) const; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.cpp index cbe17079757aa4..57aaba87cfc741 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.cpp @@ -38,24 +38,16 @@ ParamsKey ActivationKernelOpt::GetSupportedKey() const { } ActivationKernelOpt::Parent::DispatchData ActivationKernelOpt::SetDefault(const activation_params& params) const { - auto runInfo = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); const auto totalSize = params.inputs[0].LogicalSize(); - std::vector global = {totalSize / NUM_COLS_WI}; - std::vector local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { totalSize / NUM_COLS_WI, 1, 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.efficiency = FORCE_PRIORITY_6; - runInfo.lws0 = local[0]; - runInfo.lws1 = 1; - runInfo.lws2 = 1; - - runInfo.efficiency = FORCE_PRIORITY_6; - - return runInfo; + return dispatchData; } bool ActivationKernelOpt::Validate(const Params& p, const optional_params& o) const { @@ -87,8 +79,8 @@ bool ActivationKernelOpt::Validate(const Params& p, const optional_params& o) co return true; } -JitConstants ActivationKernelOpt::GetJitConstants(const activation_params& params, DispatchData kd) const { - auto jit = ActivationKernelBase::GetJitConstants(params, kd); +JitConstants ActivationKernelOpt::GetJitConstants(const activation_params& params, DispatchData dispatchData) const { + auto jit = ActivationKernelBase::GetJitConstants(params, dispatchData); auto input_dt = params.inputs[0].GetDType(); jit.AddConstant(MakeJitConstant("NUM_COLS_WI", NUM_COLS_WI)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.h index 51545bed5fa466..7a4a9bcedec6ad 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_opt.h @@ -33,7 +33,7 @@ class ActivationKernelOpt : public ActivationKernelBase { static const int NUM_COLS_WI = 4; DispatchData SetDefault(const activation_params& arg) const override; bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const activation_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const activation_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return {FusedOpType::QUANTIZE, FusedOpType::SCALE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.cpp index 89f019c7af6e66..9e35b7bfc174f5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.cpp @@ -41,8 +41,8 @@ ParamsKey ActivationKernelRef::GetSupportedKey() const { return k; } -JitConstants ActivationKernelRef::GetJitConstants(const activation_params& params, DispatchData kd) const { - auto jit = ActivationKernelBase::GetJitConstants(params, kd); +JitConstants ActivationKernelRef::GetJitConstants(const activation_params& params, DispatchData dispatchData) const { + auto jit = ActivationKernelBase::GetJitConstants(params, dispatchData); auto input_dt = params.inputs[0].GetDType(); if (!params.fused_ops.empty()) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.h index e8e170be277acf..0f946221094ee3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/activation/activation_kernel_ref.h @@ -27,7 +27,7 @@ class ActivationKernelRef : public ActivationKernelBase { KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; ParamsKey GetSupportedKey() const override; - JitConstants GetJitConstants(const activation_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const activation_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return {FusedOpType::QUANTIZE, FusedOpType::SCALE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_axis.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_axis.cpp index 15fc570e30d336..7e2aff5883988c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_axis.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_axis.cpp @@ -86,22 +86,11 @@ KernelsData ArgMaxMinKernelAxis::GetKernelsData(const Params& params, const opti } const arg_max_min_params& orgParams = static_cast(params); - DispatchData runInfo; - runInfo.fp16UnitUsed = orgParams.inputs[0].GetDType() == Datatype::F16; - size_t sort_size = orgParams.argMaxMinSortType == ArgMaxMinSortType::VALUE ? getSortSize(orgParams) : 1; - std::vector local, global; - global = { Align(getOperationNumber(orgParams), 32), sort_size, 1 }; - local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; + DispatchData dispatchData; + dispatchData.gws = { Align(getOperationNumber(orgParams), 32), sort_size, 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); KernelData kd = KernelData::Default(params); @@ -110,7 +99,7 @@ KernelsData ArgMaxMinKernelAxis::GetKernelsData(const Params& params, const opti auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); if (orgParams.outputs_num == 2) { kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_base.cpp index 257e1cb2eae680..2d0c3e57a3ee2f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_base.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,20 +37,12 @@ JitConstants ArgMaxMinKernelBase::GetJitConstants(const arg_max_min_params& para } ArgMaxMinKernelBase::DispatchData ArgMaxMinKernelBase::SetDefault(const arg_max_min_params& params) const { - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + dispatchData.gws = { 128, params.inputs[0].Batch().v, 1 }; + dispatchData.lws = { 128, 1, 1 }; - // Determine global work sizes. - kd.gws0 = 128; - kd.gws1 = params.inputs[0].Batch().v; - kd.gws2 = 1; - - kd.lws0 = 128; - kd.lws1 = 1; - kd.lws2 = 1; - - return kd; + return dispatchData; } KernelsData ArgMaxMinKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options, float estimatedTime) const { @@ -60,7 +52,7 @@ KernelsData ArgMaxMinKernelBase::GetCommonKernelsData(const Params& params, cons const arg_max_min_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); @@ -69,7 +61,7 @@ KernelsData ArgMaxMinKernelBase::GetCommonKernelsData(const Params& params, cons auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_opt.cpp index ac03f6f218245b..30938a2acdb941 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/arg_max_min/arg_max_min_kernel_opt.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,11 +37,11 @@ KernelsData ArgMaxMinKernelOpt::GetKernelsData(const Params& params, const optio const arg_max_min_params& orgParams = static_cast(params); - int topK = orgParams.topK; - long size = (long)(orgParams.inputs[0].X().v * orgParams.inputs[0].Y().v * orgParams.inputs[0].Feature().v) / 8; - long outSize = size / 16 * topK; + size_t topK = orgParams.topK; + size_t size = (size_t)(orgParams.inputs[0].X().v * orgParams.inputs[0].Y().v * orgParams.inputs[0].Feature().v) / 8; + size_t outSize = size / 16 * topK; int kernelAmount = 1; - for (; outSize > 128; outSize = (long)((outSize / 128 + 1) * topK)) { + for (; outSize > 128; outSize = (size_t)((outSize / 128 + 1) * topK)) { kernelAmount++; } KernelData kd = KernelData::Default(params, kernelAmount); @@ -57,22 +57,15 @@ KernelsData ArgMaxMinKernelOpt::GetKernelsData(const Params& params, const optio newParams.inputs[0] = input; auto& kernel = kd.kernels[i]; - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto cldnnJit = GetJitConstants(newParams); auto entryPoint = GetEntryPoint(kernelName, newParams.layerID, options); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); - runInfo.fp16UnitUsed = orgParams.inputs[0].GetDType() == Datatype::F16; + dispatchData.gws = { Align(size, 16), orgParams.inputs[0].Batch().v, 1 }; + dispatchData.lws = { 16, 1, 1 }; - runInfo.gws0 = Align(size, 16); - runInfo.gws1 = orgParams.inputs[0].Batch().v; // B - runInfo.gws2 = 1; - - runInfo.lws0 = 16; - runInfo.lws1 = 1; - runInfo.lws2 = 1; - - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entryPoint); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entryPoint); size = (size / 128 + 1) * topK; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/average_unpooling/average_unpooling_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/average_unpooling/average_unpooling_kernel_base.cpp index b73ce22f00510f..7b6a475afb0f55 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/average_unpooling/average_unpooling_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/average_unpooling/average_unpooling_kernel_base.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,32 +38,30 @@ AverageUnpoolingKernelBase::DispatchData AverageUnpoolingKernelBase::SetDefault( const average_unpooling_params& params) const { const auto& input = params.inputs[0]; - DispatchData kd; + DispatchData dispatchData; if (input.GetLayout() == DataLayout::bfyx || input.GetLayout() == DataLayout::byxf) { // Determine global work sizes. - kd.gws2 = input.Batch().v * input.Feature().v; // B, F - kd.gws0 = Align(input.X().v, 32); // X - kd.gws1 = input.Y().v; // Y + dispatchData.gws = { Align(input.X().v, 32), // X + input.Y().v, // Y + input.Batch().v * input.Feature().v, // B, F + }; - kd.lws0 = 32; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws = { 32, 1, 1 }; } else { // Determine global work sizes. - kd.gws0 = input.Batch().v * input.Feature().v; // B, F - kd.gws1 = input.X().v; // X - kd.gws2 = input.Y().v; // Y - - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.gws = { input.Batch().v * input.Feature().v, // B, F + input.X().v, // X + input.Y().v }; // Y + + dispatchData.lws = {1, 1, 1}; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; } - return kd; + return dispatchData; } KernelsData AverageUnpoolingKernelBase::GetCommonKernelsData(const Params& params, @@ -75,7 +73,7 @@ KernelsData AverageUnpoolingKernelBase::GetCommonKernelsData(const Params& param const average_unpooling_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); @@ -84,10 +82,10 @@ KernelsData AverageUnpoolingKernelBase::GetCommonKernelsData(const Params& param auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = estimatedTime; return {kd}; } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/batch_to_space/batch_to_space_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/batch_to_space/batch_to_space_kernel_base.cpp index ffbeb872a78b90..1b7f0bc5ce3c24 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/batch_to_space/batch_to_space_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/batch_to_space/batch_to_space_kernel_base.cpp @@ -41,27 +41,16 @@ bool BatchToSpaceKernelBase::Validate(const Params& p, const optional_params& o) CommonDispatchData BatchToSpaceKernelBase::SetDefault(const batch_to_space_params& params, const optional_params&) const { const auto& out = params.output; - CommonDispatchData runInfo; - std::vector global; - std::vector local; - + CommonDispatchData dispatchData; if (out.GetLayout() == DataLayout::b_fs_yx_fsv16 && out.Feature().v % 16 == 0) { - global = { out.Batch().v, out.Feature().v, out.Y().v * out.X().v }; - local = {1, 16, 1}; + dispatchData.gws = { out.Batch().v, out.Feature().v, out.Y().v * out.X().v }; + dispatchData.lws = { 1, 16, 1 }; } else { - global = { out.Batch().v, out.Feature().v, out.W().v * out.Z().v * out.Y().v * out.X().v }; - local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.Batch().v, out.Feature().v, out.W().v * out.Z().v * out.Y().v * out.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); } - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants BatchToSpaceKernelBase::GetJitConstants(const batch_to_space_params& params) const { @@ -101,14 +90,14 @@ KernelsData BatchToSpaceKernelBase::GetCommonKernelsData(const Params& params, c return {}; } - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, 1, GetFusedPrimitiveInputsCount(params)); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.cpp index 15e6c483b592a8..9fb6b259a261aa 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.cpp @@ -43,10 +43,8 @@ ParamsKey BinaryConvolutionKernel1x1::GetSupportedKey() const { return k; } -BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernel1x1::SetDefault( - const binary_convolution_params& params, - int) const { - DispatchData kd = BinaryConvolutionKernelBase::SetDefault(params); +BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernel1x1::SetDefault(const binary_convolution_params& params, int) const { + DispatchData dispatchData = BinaryConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -55,17 +53,17 @@ BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernel1x1::SetDefault auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(x * y, sub_group_size); - kd.gws1 = CeilDiv(f, 2 * sub_group_size); // 1 WI calcs 32 OC - kd.gws2 = b; + dispatchData.gws[0] = Align(x * y, sub_group_size); + dispatchData.gws[1] = CeilDiv(f, 2 * sub_group_size); // 1 WI calcs 32 OC + dispatchData.gws[2] = b; - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return kd; + return dispatchData; } bool BinaryConvolutionKernel1x1::Validate(const Params& p, const optional_params& o) const { @@ -89,8 +87,8 @@ bool BinaryConvolutionKernel1x1::Validate(const Params& p, const optional_params } JitConstants BinaryConvolutionKernel1x1::GetJitConstants(const binary_convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", sub_group_size)); jit.AddConstant(MakeJitConstant("INPUT0_FEATURE_NUM_PACKED", CeilDiv(params.inputs[0].Feature().v, ic_pack_size))); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.h index 7be1117b29dc60..fe47b1bac891b0 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1.h @@ -35,9 +35,9 @@ class BinaryConvolutionKernel1x1 : public BinaryConvolutionKernelBase { return WeightsLayout::os_is_yx_osv32_isv32p; } JitConstants GetFusedPrimitivesJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const override; + const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const binary_convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.cpp index bf680a496a366f..ccf6420efec861 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.cpp @@ -46,7 +46,7 @@ ParamsKey BinaryConvolutionKernel1x1_b_fs_yx_fsv16::GetSupportedKey() const { BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernel1x1_b_fs_yx_fsv16::SetDefault( const binary_convolution_params& params, int) const { - DispatchData kd = BinaryConvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = BinaryConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -55,17 +55,15 @@ BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernel1x1_b_fs_yx_fsv auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(x * y, sub_group_size); - kd.gws1 = CeilDiv(f, sub_group_size); // 1 WI calcs 16 OC - kd.gws2 = b; + dispatchData.gws[0] = Align(x * y, sub_group_size); + dispatchData.gws[1] = CeilDiv(f, sub_group_size); // 1 WI calcs 16 OC + dispatchData.gws[2] = b; - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws = { sub_group_size, 1, 1 }; - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return kd; + return dispatchData; } bool BinaryConvolutionKernel1x1_b_fs_yx_fsv16::Validate(const Params& p, const optional_params& o) const { @@ -89,8 +87,8 @@ bool BinaryConvolutionKernel1x1_b_fs_yx_fsv16::Validate(const Params& p, const o } JitConstants BinaryConvolutionKernel1x1_b_fs_yx_fsv16::GetJitConstants(const binary_convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", sub_group_size)); jit.AddConstant(MakeJitConstant("INPUT0_FEATURE_NUM_PACKED", CeilDiv(params.inputs[0].Feature().v, ic_pack_size))); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.h index 182267fd3c6e33..74cc9b9fa0f89b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_1x1_b_fs_yx_fsv16.h @@ -35,9 +35,9 @@ class BinaryConvolutionKernel1x1_b_fs_yx_fsv16 : public BinaryConvolutionKernelB return WeightsLayout::os_is_yx_osv32_isv32p; } JitConstants GetFusedPrimitivesJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const override; + const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const binary_convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.cpp index 3ee6895ef35b60..fe6f3495200a8c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.cpp @@ -43,9 +43,9 @@ bool BinaryConvolutionKernelBase::Validate(const Params& p, const optional_param } JitConstants BinaryConvolutionKernelBase::GetJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const { + const DispatchData& dispatchData) const { JitConstants jit = WeightBiasKernelBase::GetJitConstants(params); - jit.Merge(GetFusedPrimitivesJitConstants(params, kd)); + jit.Merge(GetFusedPrimitivesJitConstants(params, dispatchData)); jit.AddConstants({ MakeJitConstant("STRIDE", params.stride), @@ -63,25 +63,25 @@ JitConstants BinaryConvolutionKernelBase::GetFusedPrimitivesJitConstants(const b return {}; } -bool BinaryConvolutionKernelBase::CheckWorkGroups(const BinaryConvolutionKernelBase::DispatchData& kd) { - if (kd.gws0 == 0 || kd.gws1 == 0 || kd.gws2 == 0 || kd.lws0 == 0 || kd.lws1 == 0 || kd.lws2 == 0) { +bool BinaryConvolutionKernelBase::CheckWorkGroups(const BinaryConvolutionKernelBase::DispatchData& dispatchData) { + if (dispatchData.gws.size() != 3 || dispatchData.lws.size() != 3) return false; - } - if ((kd.gws0 % kd.lws0) != 0 || (kd.gws1 % kd.lws1) != 0 || (kd.gws2 % kd.lws2) != 0) { - return false; + for (size_t i = 0; i < dispatchData.gws.size(); i++) { + if (dispatchData.gws[i] == 0 || dispatchData.lws[i] == 0) + return false; + if ((dispatchData.gws[i] % dispatchData.lws[i]) != 0) + return false; } return true; } -BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelBase::SetDefault( - const binary_convolution_params& params, - int) const { - DispatchData kd; +BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelBase::SetDefault(const binary_convolution_params& params, + int) const { + DispatchData dispatchData; const auto& out = params.output; - kd.fp16UnitUsed = out.GetDType() == Datatype::F16; std::vector global; if (params.output.GetLayout() == DataLayout::bfyx || params.output.GetLayout() == DataLayout::byxf) { global = {out.X().v, out.Y().v, out.Feature().v * out.Batch().v}; @@ -91,28 +91,23 @@ BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelBase::SetDefaul auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - kd.cldnnStyle.blockWidth = 1; - kd.cldnnStyle.blockHeight = 1; - kd.cldnnStyle.prefetch = 0; - kd.cldnnStyle.inputBlockArraySize = 0; - kd.cldnnStyle.inputBlockWidth = 0; - - kd.gemmStyle.globalWorkSizeDX = 1; - kd.gemmStyle.globalWorkSizeDY = 1; - kd.gemmStyle.globalWorkSizeDZ = 1; - kd.gemmStyle.subBlockDimK = 1; - kd.gemmStyle.subBlockDimM = 0; - kd.gemmStyle.subBlockDimN = 0; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + dispatchData.gws = global; + dispatchData.lws = local; + + dispatchData.cldnnStyle.blockWidth = 1; + dispatchData.cldnnStyle.blockHeight = 1; + dispatchData.cldnnStyle.prefetch = 0; + dispatchData.cldnnStyle.inputBlockArraySize = 0; + dispatchData.cldnnStyle.inputBlockWidth = 0; + + dispatchData.gemmStyle.globalWorkSizeDX = 1; + dispatchData.gemmStyle.globalWorkSizeDY = 1; + dispatchData.gemmStyle.globalWorkSizeDZ = 1; + dispatchData.gemmStyle.subBlockDimK = 1; + dispatchData.gemmStyle.subBlockDimM = 0; + dispatchData.gemmStyle.subBlockDimN = 0; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + return dispatchData; } KernelsData BinaryConvolutionKernelBase::GetCommonKernelsData(const Params& params, @@ -129,9 +124,9 @@ KernelsData BinaryConvolutionKernelBase::GetCommonKernelsData(const Params& para if (NeedPaddedInput()) { kd.reorderInput = CovolutionBinaryUpdateInputParams(newParams); } - DispatchData runInfo = SetDefault(newParams, autoTuneIndex); + DispatchData dispatchData = SetDefault(newParams, autoTuneIndex); - if (!CheckWorkGroups(runInfo)) { + if (!CheckWorkGroups(dispatchData)) { // Internal Error - wrong calculation of global/local work group sizes return {}; } @@ -147,7 +142,7 @@ KernelsData BinaryConvolutionKernelBase::GetCommonKernelsData(const Params& para } auto finalKernelName = GetKernelName(newParams); - auto cldnnJit = GetJitConstants(newParams, runInfo); + auto cldnnJit = GetJitConstants(newParams, dispatchData); auto entryPoint = GetEntryPoint(finalKernelName, newParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnnJit, entryPoint); @@ -161,7 +156,7 @@ KernelsData BinaryConvolutionKernelBase::GetCommonKernelsData(const Params& para } FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, finalKernelName, jit, @@ -173,7 +168,7 @@ KernelsData BinaryConvolutionKernelBase::GetCommonKernelsData(const Params& para fused_deps_total); kernel.arguments.push_back({ArgumentDescriptor::Types::SPLIT, 0}); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; kd.autoTuneIndex = autoTuneIndex; return {kd}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.h index ffa92e0cb9c8c6..b8ff2d38b81f5a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_base.h @@ -66,9 +66,9 @@ class BinaryConvolutionKernelBase : public WeightBiasKernelBase { virtual std::string GetKernelName(const binary_convolution_params&) const { return kernelName; } virtual bool NeedPaddedInput() const { return false; } bool Validate(const Params& p, const optional_params& o) const override; - virtual JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& kd) const; + virtual JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& dispatchData) const; virtual JitConstants GetFusedPrimitivesJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const; + const DispatchData& dispatchData) const; virtual DispatchData SetDefault(const binary_convolution_params& params, int autoTuneIndex = -1) const; static bool CheckWorkGroups(const DispatchData&); KernelsData GetCommonKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.cpp index dbbd4bc270b8ac..85535b94d87b5e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.cpp @@ -43,10 +43,9 @@ ParamsKey BinaryConvolutionKernelGeneric::GetSupportedKey() const { return k; } -BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelGeneric::SetDefault( - const binary_convolution_params& params, - int) const { - DispatchData kd = BinaryConvolutionKernelBase::SetDefault(params); +BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelGeneric::SetDefault(const binary_convolution_params& params, + int) const { + DispatchData dispatchData = BinaryConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -55,17 +54,17 @@ BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelGeneric::SetDef auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(x, sub_group_size) * y; - kd.gws1 = CeilDiv(f, 2 * sub_group_size); // 1 WI calc 2 OC x 16 X - kd.gws2 = b; + dispatchData.gws[0] = Align(x, sub_group_size) * y; + dispatchData.gws[1] = CeilDiv(f, 2 * sub_group_size); // 1 WI calc 2 OC x 16 X + dispatchData.gws[2] = b; - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } bool BinaryConvolutionKernelGeneric::Validate(const Params& p, const optional_params& o) const { @@ -81,8 +80,8 @@ bool BinaryConvolutionKernelGeneric::Validate(const Params& p, const optional_pa } JitConstants BinaryConvolutionKernelGeneric::GetJitConstants(const binary_convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); auto input = params.inputs[0]; auto output = params.output; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.h index fdbc1532b93ccc..62f086340cfff0 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_generic.h @@ -35,9 +35,9 @@ class BinaryConvolutionKernelGeneric : public BinaryConvolutionKernelBase { return WeightsLayout::os_is_yx_osv32_isv32p; } JitConstants GetFusedPrimitivesJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const override; + const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const binary_convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.cpp index 47870bb678421b..fad9ce3170c9dc 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.cpp @@ -38,10 +38,9 @@ ParamsKey BinaryConvolutionKernelRef::GetSupportedKey() const { return k; } -BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelRef::SetDefault( - const binary_convolution_params& params, - int) const { - DispatchData kd = BinaryConvolutionKernelBase::SetDefault(params); +BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelRef::SetDefault(const binary_convolution_params& params, + int) const { + DispatchData dispatchData = BinaryConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -50,22 +49,22 @@ BinaryConvolutionKernelBase::DispatchData BinaryConvolutionKernelRef::SetDefault auto y = out.Y().v; auto x = out.X().v; - kd.gws0 = b; - kd.gws1 = f; - kd.gws2 = x * y; + dispatchData.gws[0] = b; + dispatchData.gws[1] = f; + dispatchData.gws[2] = x * y; - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + return dispatchData; } JitConstants BinaryConvolutionKernelRef::GetJitConstants(const binary_convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); int pad_physical_val = params.pad_value == -1.0f ? 0x00000000 : 0xFFFFFFFF; int leftovers_mask = (0xFFFFFFFF >> (32 - params.inputs[0].Feature().v % 32)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.h index 7ce702f8cabdc3..092318684d437e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/binary_convolution/binary_convolution_kernel_ref.h @@ -35,9 +35,9 @@ class BinaryConvolutionKernelRef : public BinaryConvolutionKernelBase { return WeightsLayout::os_is_yx_osv32_isv32p; } JitConstants GetFusedPrimitivesJitConstants(const binary_convolution_params& params, - const DispatchData& kd) const override; + const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const binary_convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const binary_convolution_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/border/border_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/border/border_kernel_base.cpp index 5f5e414b513305..16e1c386d821e3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/border/border_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/border/border_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -31,22 +31,12 @@ JitConstants BorderKernelBase::GetJitConstants(const border_params& params) cons BorderKernelBase::DispatchData BorderKernelBase::SetDefault(const border_params& params) const { const auto& output = params.output; - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + dispatchData.gws = { output.X().v * output.Z().v, output.Y().v * output.W().v, output.Batch().v * output.Feature().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - std::vector global{output.X().v * output.Z().v, output.Y().v * output.W().v, output.Batch().v * output.Feature().v}; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData BorderKernelBase::GetCommonKernelsData(const Params& params, @@ -57,7 +47,7 @@ KernelsData BorderKernelBase::GetCommonKernelsData(const Params& params, const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -65,7 +55,7 @@ KernelsData BorderKernelBase::GetCommonKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = k_data.kernels[0]; - FillCLKernelData(kernel, run_info, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); k_data.estimatedTime = estimated_time; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/broadcast/broadcast_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/broadcast/broadcast_kernel_base.cpp index 6c8c69ff58fc7c..b0b7ce1a0fd181 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/broadcast/broadcast_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/broadcast/broadcast_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,22 +28,12 @@ JitConstants BroadcastKernelBase::GetJitConstants(const broadcast_params& params BroadcastKernelBase::DispatchData BroadcastKernelBase::SetDefault(const broadcast_params& params) { const auto& output = params.output; - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + dispatchData.gws = { output.X().v, output.Y().v * output.Z().v, output.Batch().v * output.Feature().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - std::vector global{output.X().v, output.Y().v * output.Z().v, output.Batch().v * output.Feature().v}; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData BroadcastKernelBase::GetCommonKernelsData(const Params& params, @@ -54,7 +44,7 @@ KernelsData BroadcastKernelBase::GetCommonKernelsData(const Params& params, const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -62,7 +52,7 @@ KernelsData BroadcastKernelBase::GetCommonKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = k_data.kernels[0]; - FillCLKernelData(kernel, run_info, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); k_data.estimatedTime = estimated_time; return {k_data}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_b_fs_yx_fsv16.cpp index 57fc05002494ef..1cc98111416661 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_b_fs_yx_fsv16.cpp @@ -109,23 +109,23 @@ bool ConcatenationKernel_b_fs_yx_fsv16::Validate(const Params& p, const optional } ConcatenationKernelBase::DispatchData ConcatenationKernel_b_fs_yx_fsv16::SetDefault(const concatenation_params& params) const { - DispatchData runInfo = ConcatenationKernelBase::SetDefault(params); + DispatchData dispatchData = ConcatenationKernelBase::SetDefault(params); const auto& input = params.inputs[0]; auto tileXY = getTileXY(params); size_t tileF = params.misalignment == 0 ? 1 : 2; - runInfo.gws0 = CeilDiv(input.X().v * input.Y().v, tileXY); - runInfo.gws1 = Align(input.Feature().v, 16 * tileF) / tileF; - runInfo.gws2 = input.Batch().v; + dispatchData.gws[0] = CeilDiv(input.X().v * input.Y().v, tileXY); + dispatchData.gws[1] = Align(input.Feature().v, 16 * tileF) / tileF; + dispatchData.gws[2] = input.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 16; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 16; + dispatchData.lws[2] = 1; - runInfo.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return runInfo; + return dispatchData; } JitConstants ConcatenationKernel_b_fs_yx_fsv16::GetJitConstants(const concatenation_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_base.cpp index 0eb3fb2074164c..b70ac9f914cd81 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_base.cpp @@ -69,7 +69,7 @@ JitConstants ConcatenationKernelBase::GetJitConstants(const concatenation_params } ConcatenationKernelBase::DispatchData ConcatenationKernelBase::SetDefault(const concatenation_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& dims = params.inputs[0].GetDims(); auto layout = params.inputs[0].GetLayout(); @@ -80,19 +80,19 @@ ConcatenationKernelBase::DispatchData ConcatenationKernelBase::SetDefault(const DataTensor::Channelndex(layout, Tensor::DataChannelName::X) }; // Determine global work sizes. - kd.gws0 = idx[2] != -1 ? dims[idx[2]].v : 1; // Y - kd.gws1 = idx[1] != -1 ? dims[idx[1]].v : 1; // F - kd.gws2 = idx[0] != -1 ? dims[idx[0]].v : 1; // B + dispatchData.gws[0] = idx[2] != -1 ? dims[idx[2]].v : 1; // Y + dispatchData.gws[1] = idx[1] != -1 ? dims[idx[1]].v : 1; // F + dispatchData.gws[2] = idx[0] != -1 ? dims[idx[0]].v : 1; // B - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + return dispatchData; } KernelsData ConcatenationKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options) const { @@ -120,13 +120,13 @@ KernelsData ConcatenationKernelBase::GetCommonKernelsData(const Params& params, ifm_offset += ifm; auto& kernel = kd.kernels[i]; - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto cldnnJit = GetJitConstants(newParams); auto entryPoint = GetEntryPoint(kernelName, newParams.layerID, options); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entryPoint, params.engineInfo); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, (uint32_t)i }); kernel.arguments.push_back({ArgumentDescriptor::Types::OUTPUT, 0}); @@ -138,7 +138,7 @@ KernelsData ConcatenationKernelBase::GetCommonKernelsData(const Params& params, kernel.arguments.push_back({ArgumentDescriptor::Types::SCALAR, 0}); lastOffset += (uint32_t)input.GetDims()[concatChannelIndex].v; - efficiency = std::max(efficiency, runInfo.efficiency); + efficiency = std::max(efficiency, dispatchData.efficiency); } kd.estimatedTime = efficiency; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_depth_bfyx_no_pitch.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_depth_bfyx_no_pitch.cpp index 62e5a65acc76ce..b5046fff01adbe 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_depth_bfyx_no_pitch.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_depth_bfyx_no_pitch.cpp @@ -67,22 +67,21 @@ bool ConcatenationKernel_depth_bfyx_no_pitch::Validate(const Params& p, const op return true; } -ConcatenationKernelBase::DispatchData ConcatenationKernel_depth_bfyx_no_pitch::SetDefault( - const concatenation_params& params) const { - DispatchData runInfo = ConcatenationKernelBase::SetDefault(params); +ConcatenationKernelBase::DispatchData ConcatenationKernel_depth_bfyx_no_pitch::SetDefault(const concatenation_params& params) const { + DispatchData dispatchData = ConcatenationKernelBase::SetDefault(params); const auto& input = params.inputs[0]; const auto batch = input.Batch().v; - runInfo.gws0 = batch; - runInfo.gws1 = Align(std::max((size_t)1, input.LogicalSize() / batch), 16 * 8) / 8; - runInfo.gws2 = 1; + dispatchData.gws[0] = batch; + dispatchData.gws[1] = Align(std::max((size_t)1, input.LogicalSize() / batch), 16 * 8) / 8; + dispatchData.gws[2] = 1; - runInfo.lws0 = 1; - runInfo.lws1 = 16; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 16; + dispatchData.lws[2] = 1; - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; - return runInfo; + return dispatchData; } KernelsData ConcatenationKernel_depth_bfyx_no_pitch::GetKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_fs_b_yx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_fs_b_yx_fsv32.cpp index 7eb9e19d3b759a..4285f5280ba4eb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_fs_b_yx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_fs_b_yx_fsv32.cpp @@ -62,20 +62,20 @@ bool ConcatenationKernel_fs_b_yx_fsv32::Validate(const Params& p, const optional } ConcatenationKernelBase::DispatchData ConcatenationKernel_fs_b_yx_fsv32::SetDefault(const concatenation_params& params) const { - DispatchData runInfo = ConcatenationKernelBase::SetDefault(params); + DispatchData dispatchData = ConcatenationKernelBase::SetDefault(params); const auto& input = params.inputs[0]; - runInfo.gws0 = input.X().v; - runInfo.gws1 = input.Y().v; - runInfo.gws2 = CeilDiv(input.Feature().v, fsv) * subGroupSize * input.Batch().v; + dispatchData.gws[0] = input.X().v; + dispatchData.gws[1] = input.Y().v; + dispatchData.gws[2] = CeilDiv(input.Feature().v, fsv) * subGroupSize * input.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = subGroupSize; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = subGroupSize; - runInfo.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return runInfo; + return dispatchData; } JitConstants ConcatenationKernel_fs_b_yx_fsv32::GetJitConstants(const concatenation_params& params) const { @@ -113,13 +113,13 @@ KernelsData ConcatenationKernel_fs_b_yx_fsv32::GetKernelsData(const Params& para ifm_offset += ifm; auto& kernel = kd.kernels[i]; - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto cldnnJit = GetJitConstants(newParams); auto entryPoint = GetEntryPoint(kernelName, newParams.layerID, optParams); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entryPoint, params.engineInfo); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, (uint32_t)i}); kernel.arguments.push_back({ArgumentDescriptor::Types::OUTPUT, 0}); @@ -131,7 +131,7 @@ KernelsData ConcatenationKernel_fs_b_yx_fsv32::GetKernelsData(const Params& para kernel.arguments.push_back({ArgumentDescriptor::Types::SCALAR, 0}); lastOffset += (uint32_t)input.GetDims()[concatChannelIndex].v; - efficiency = std::max(efficiency, runInfo.efficiency); + efficiency = std::max(efficiency, dispatchData.efficiency); } kd.estimatedTime = efficiency; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_simple_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_simple_ref.cpp index 36abefe1eae157..9d1fcfad284c9a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_simple_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/concatenation/concatenation_kernel_simple_ref.cpp @@ -88,27 +88,17 @@ bool ConcatenationKernel_simple_Ref::Validate(const Params& p, const optional_pa } ConcatenationKernelBase::DispatchData ConcatenationKernel_simple_Ref::SetDefault(const concatenation_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.inputs[0]; - std::vector global; - global = { - input.X().v * input.Y().v, - input.Z().v * input.W().v, - input.Feature().v * input.Batch().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { input.X().v * input.Y().v, + input.Z().v * input.W().v, + input.Feature().v * input.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; // X * Y - kd.gws1 = global[1]; // Z * W - kd.gws2 = global[2]; // F * B + dispatchData.efficiency = FORCE_PRIORITY_9; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - kd.efficiency = FORCE_PRIORITY_9; - - return kd; + return dispatchData; } KernelsData ConcatenationKernel_simple_Ref::GetKernelsData(const Params& params, const optional_params& optParams) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.cpp index 5ea9e20e148d69..8bf50831b7835b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.cpp @@ -34,7 +34,7 @@ ConvolutionKernel_b_fs_yx_fsv16::ConvolutionKernel_b_fs_yx_fsv16() : Convolution } ConvolutionKernel_b_fs_yx_fsv16::AutoTuneOption ConvolutionKernel_b_fs_yx_fsv16::GetAutoTuneOptions(const Params& params, - int /*autoTuneIndex*/) const { + int /*autoTuneIndex*/) const { const convolution_params& cp = static_cast(params); auto x = cp.output.X().v; auto f = cp.output.Feature().v; @@ -89,33 +89,33 @@ ParamsKey ConvolutionKernel_b_fs_yx_fsv16::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_yx_fsv16::SetDefault(const convolution_params& params, - int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params); + int autoTuneIndex) const { + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params); const auto& out = params.output; auto autoTune = GetAutoTuneOptions(params, autoTuneIndex); - kd.cldnnStyle.blockWidth = autoTune.blockWidth; + dispatchData.cldnnStyle.blockWidth = autoTune.blockWidth; auto x = out.X().v; auto y = out.Y().v; auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x, autoTune.blockWidth) * y; - kd.gws1 = Align(f, sub_group_size); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x, autoTune.blockWidth) * y; + dispatchData.gws[1] = Align(f, sub_group_size); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; if (b == 1) - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; else - kd.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; - return kd; + return dispatchData; } bool ConvolutionKernel_b_fs_yx_fsv16::Validate(const Params& p, const optional_params& o) const { @@ -155,12 +155,12 @@ bool ConvolutionKernel_b_fs_yx_fsv16::Validate(const Params& p, const optional_p } JitConstants ConvolutionKernel_b_fs_yx_fsv16::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { auto input = params.inputs[0]; auto output = params.output; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); FusedOpsConfiguration conf_vec = { "_VEC", @@ -213,8 +213,8 @@ JitConstants ConvolutionKernel_b_fs_yx_fsv16::GetJitConstants(const convolution_ } KernelsData ConvolutionKernel_b_fs_yx_fsv16::GetTunedKernelsDataByIndex(const Params& params, - const optional_params& options, - const int autoTuneIndex) const { + const optional_params& options, + const int autoTuneIndex) const { auto tuneOptions = GetAutoTuneOptions(params, autoTuneIndex); return GetCommonKernelsData(params, options, tuneOptions.exeMode, autoTuneIndex); } @@ -224,7 +224,7 @@ KernelsData ConvolutionKernel_b_fs_yx_fsv16::GetKernelsData(const Params& params } KernelsData ConvolutionKernel_b_fs_yx_fsv16::GetKernelsDataForAutoTune(const Params& params, - const optional_params& options) const { + const optional_params& options) const { if (!Validate(params, options)) { return {}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.h index b371a023fae103..ca6a78482a76cd 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.h @@ -49,7 +49,7 @@ class ConvolutionKernel_b_fs_yx_fsv16 : public ConvolutionKernelBase { bool NeedPaddedInput() const override { return false; } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; private: struct AutoTuneOption { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp index c3b10842ad2e7e..7d9a70aa73a03b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp @@ -34,7 +34,7 @@ ConvolutionKernel_b_fs_yx_fsv16_1x1::ConvolutionKernel_b_fs_yx_fsv16_1x1() : Con } ConvolutionKernel_b_fs_yx_fsv16_1x1::AutoTuneOption ConvolutionKernel_b_fs_yx_fsv16_1x1::GetAutoTuneOptions(const Params& params, - int /*autoTuneIndex*/) const { + int /*autoTuneIndex*/) const { const convolution_params& cp = static_cast(params); auto x = cp.output.X().v; auto f = cp.output.Feature().v; @@ -73,10 +73,10 @@ ParamsKey ConvolutionKernel_b_fs_yx_fsv16_1x1::GetSupportedKey() const { ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_yx_fsv16_1x1::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params); auto autoTune = GetAutoTuneOptions(params, autoTuneIndex); - kd.cldnnStyle.blockWidth = autoTune.blockWidth; + dispatchData.cldnnStyle.blockWidth = autoTune.blockWidth; const auto& input = params.inputs[0]; const auto& out = params.output; @@ -85,29 +85,29 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_yx_fsv16_1x1::SetDefa auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x * y, autoTune.blockWidth); - kd.gws1 = Align(f, feature_block_size); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x * y, autoTune.blockWidth); + dispatchData.gws[1] = Align(f, feature_block_size); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; auto bBlockSizeX = x % autoTune.blockWidth == 0; auto bBlockSizeXY = out.X().pad.Total() + out.Y().pad.Total() == 0; auto bInputPad = input.X().pad.Total() + input.Y().pad.Total() != 0; - + if (b == 1) { if ((bBlockSizeX || bBlockSizeXY) && !bInputPad) { - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; } else { - kd.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; } } else { - kd.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; } - return kd; + return dispatchData; } bool ConvolutionKernel_b_fs_yx_fsv16_1x1::Validate(const Params& p, const optional_params& o) const { @@ -134,10 +134,10 @@ bool ConvolutionKernel_b_fs_yx_fsv16_1x1::Validate(const Params& p, const option } JitConstants ConvolutionKernel_b_fs_yx_fsv16_1x1::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); FusedOpsConfiguration conf_vec = { "_VEC", diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.h index ff547e40534993..e51475143e8d8f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.h @@ -43,7 +43,7 @@ class ConvolutionKernel_b_fs_yx_fsv16_1x1 : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; struct AutoTuneOption { size_t blockWidth; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.cpp index e2766a5163055b..82a92fa3ebb00d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.cpp @@ -70,28 +70,29 @@ bool ConvolutionKernel_b_fs_yx_fsv16_depthwise::Validate(const Params& p, const } ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_yx_fsv16_depthwise::SetDefault(const convolution_params& params, - int) const { - DispatchData runInfo = Parent::SetDefault(params); + int) const { + DispatchData dispatchData = Parent::SetDefault(params); const auto& out = params.output; - runInfo.gws0 = CeilDiv(out.X().v, x_block_size) * out.Y().v; - runInfo.gws1 = Align(out.Feature().v, feature_block_size); - runInfo.gws2 = out.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = sub_group_size; - runInfo.lws2 = 1; + dispatchData.gws[0] = CeilDiv(out.X().v, x_block_size) * out.Y().v; + dispatchData.gws[1] = Align(out.Feature().v, feature_block_size); + dispatchData.gws[2] = out.Batch().v; + + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; if (out.Batch().v == 1) - runInfo.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; else - runInfo.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_b_fs_yx_fsv16_depthwise::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); const size_t block_width = 8; @@ -129,7 +130,7 @@ JitConstants ConvolutionKernel_b_fs_yx_fsv16_depthwise::GetJitConstants(const co } KernelsData ConvolutionKernel_b_fs_yx_fsv16_depthwise::GetKernelsData(const Params& params, - const optional_params& options) const { + const optional_params& options) const { return GetCommonKernelsData(params, options); } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.h index 69a4073ea5979c..d2d1b3d1947b9a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_depthwise.h @@ -41,7 +41,7 @@ class ConvolutionKernel_b_fs_yx_fsv16_depthwise : public ConvolutionKernelBase { } bool NeedPaddedInput() const override { return true; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.cpp index 64144f2f930409..148d91b26842b6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.cpp @@ -76,11 +76,11 @@ ParamsKey Convolution_kernel_b_fs_yx_fsv16_imad_1x1::GetSupportedKey() const { } JitConstants Convolution_kernel_b_fs_yx_fsv16_imad_1x1::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); - mem_consts.AddConstant(MakeJitConstant("OUT_BLOCK_SPATIAL", kd.cldnnStyle.blockWidth)); - mem_consts.AddConstant(MakeJitConstant("OUT_BLOCK_FEATURES", kd.cldnnStyle.blockHeight)); - mem_consts.AddConstant(MakeJitConstant("FEATURE_SLM_SPLIT", kd.cldnnStyle.prefetch)); + const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); + mem_consts.AddConstant(MakeJitConstant("OUT_BLOCK_SPATIAL", dispatchData.cldnnStyle.blockWidth)); + mem_consts.AddConstant(MakeJitConstant("OUT_BLOCK_FEATURES", dispatchData.cldnnStyle.blockHeight)); + mem_consts.AddConstant(MakeJitConstant("FEATURE_SLM_SPLIT", dispatchData.cldnnStyle.prefetch)); mem_consts.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); mem_consts.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); @@ -106,27 +106,27 @@ JitConstants Convolution_kernel_b_fs_yx_fsv16_imad_1x1::GetJitConstants(const co ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_yx_fsv16_imad_1x1::SetDefault(const convolution_params& params, int index) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; auto tune_params = GetAutoTuneParams(params, index); size_t k_slices = tune_params.feature_slm_split; - kd.gws0 = CeilDiv(output.X().v * output.Y().v, tune_params.out_block_spatial); - kd.gws1 = CeilDiv(output.Feature().v, tune_params.out_block_features * simd) * simd * k_slices; - kd.gws2 = output.Batch().v; + dispatchData.gws[0] = CeilDiv(output.X().v * output.Y().v, tune_params.out_block_spatial); + dispatchData.gws[1] = CeilDiv(output.Feature().v, tune_params.out_block_features * simd) * simd * k_slices; + dispatchData.gws[2] = output.Batch().v; - kd.lws0 = 1; - kd.lws1 = simd * k_slices; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = simd * k_slices; + dispatchData.lws[2] = 1; - kd.cldnnStyle = {0, 0, 0, 0, 0}; - kd.gemmStyle = {0, 0, 0, 0, 0, 0}; + dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; + dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; - kd.cldnnStyle.blockWidth = tune_params.out_block_spatial; - kd.cldnnStyle.blockHeight = tune_params.out_block_features; - kd.cldnnStyle.prefetch = k_slices; + dispatchData.cldnnStyle.blockWidth = tune_params.out_block_spatial; + dispatchData.cldnnStyle.blockHeight = tune_params.out_block_features; + dispatchData.cldnnStyle.prefetch = k_slices; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; auto in_f = params.weights.IFM().v; auto out_f = params.weights.OFM().v; @@ -158,14 +158,14 @@ ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_yx_fsv16_imad_1x1::S general_is_faster |= in_f == 256 && out_f == 128 && out_x == 3 && out_y == 3 && batch == 1; if (general_is_faster && !x_strided) { - kd.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; } // Better to use kernel with 4 input features in a loop if (static_cast(params.weights.IFM().v) / static_cast(Align(params.weights.IFM().v, fsv)) < 0.5f) - kd.efficiency = FORCE_PRIORITY_4; + dispatchData.efficiency = FORCE_PRIORITY_4; - return kd; + return dispatchData; } // SetDefault bool Convolution_kernel_b_fs_yx_fsv16_imad_1x1::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.h index 44f3f4ac82aeab..90c5da271ea048 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16_imad_1x1.h @@ -35,7 +35,7 @@ class Convolution_kernel_b_fs_yx_fsv16_imad_1x1 : public ConvolutionKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params&) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.cpp index 8b43b5914051ad..47f1fbe39ddc10 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.cpp @@ -47,20 +47,20 @@ ParamsKey ConvolutionKernel_b_fs_yx_fsv4_int8::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_yx_fsv4_int8::SetDefault(const convolution_params& cp, int) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; if (cp.output.X().v > 512 && cp.filterSize.x == 5 && cp.filterSize.y == 5) - runInfo.efficiency = FORCE_PRIORITY_2; - runInfo.gws0 = CeilDiv(cp.output.X().v, sub_group_size) / 2; - runInfo.gws1 = cp.output.Y().v; - runInfo.gws2 = sub_group_size; + dispatchData.efficiency = FORCE_PRIORITY_2; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, sub_group_size) / 2; + dispatchData.gws[1] = cp.output.Y().v; + dispatchData.gws[2] = sub_group_size; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = sub_group_size; - return runInfo; + return dispatchData; } bool ConvolutionKernel_b_fs_yx_fsv4_int8::Validate(const Params& p, const optional_params& o) const { @@ -85,10 +85,10 @@ bool ConvolutionKernel_b_fs_yx_fsv4_int8::Validate(const Params& p, const option return true; } -JitConstants ConvolutionKernel_b_fs_yx_fsv4_int8::GetJitConstants(const convolution_params& params, const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); +JitConstants ConvolutionKernel_b_fs_yx_fsv4_int8::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); jit.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.h index 9cbc77559924f1..b4e8bbdd8846f2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv4_int8.h @@ -34,7 +34,7 @@ class ConvolutionKernel_b_fs_yx_fsv4_int8 : public ConvolutionKernelBase { return WeightsLayout::os_is_yx_osv16_isv4; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.cpp index d3f3a47a4bcbff..4b8053e77ab839 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.cpp @@ -242,7 +242,7 @@ bool ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::ValidateAutoTuneParams(const c ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::DispatchData ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd; + DispatchData dispatchData; auto& out = params.output; auto tune_params = GetAutoTuneParams(params, autoTuneIndex); @@ -254,29 +254,21 @@ ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::SetDefault(const convolution_params fsv = 32; } - std::vector global = { + dispatchData.gws = { Align(CeilDiv(out.X().v, tune_params.tile_x), tune_params.lws0), - Align(out.Y().v, tune_params.lws1), + Align(out.Y().v, tune_params.lws1), CeilDiv(out.Feature().v, fsv) * tune_params.simd * out.Batch().v }; - std::vector local = { tune_params.lws0, tune_params.lws1, tune_params.simd }; + dispatchData.lws = { tune_params.lws0, tune_params.lws1, tune_params.simd }; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.gemmStyle = { 0, 0, 0, 0, 0, 0 }; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.cldnnStyle.blockWidth = tune_params.tile_x; + dispatchData.cldnnStyle.prefetch = tune_params.preload_input_slm; - kd.gemmStyle = { 0, 0, 0, 0, 0, 0 }; + dispatchData.efficiency = params.stride.x == 1 ? FORCE_PRIORITY_1 : FORCE_PRIORITY_2; - kd.cldnnStyle.blockWidth = tune_params.tile_x; - kd.cldnnStyle.prefetch = tune_params.preload_input_slm; - - kd.efficiency = params.stride.x == 1 ? FORCE_PRIORITY_1 : FORCE_PRIORITY_2; - - return kd; + return dispatchData; } bool ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::HasPaddedInput(const convolution_params& params) const { @@ -317,20 +309,20 @@ bool ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::ParamsHavePadding(const convol return needs_pad; } -JitConstants ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); +JitConstants ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); constexpr size_t imad_width = 4; auto filter_spatial = params.weights.X().v * params.weights.Y().v; auto filter_blocked = filter_spatial / imad_width * imad_width; - mem_consts.AddConstant(MakeJitConstant("LWS0", kd.lws0)); - mem_consts.AddConstant(MakeJitConstant("LWS1", kd.lws1)); - mem_consts.AddConstant(MakeJitConstant("SIMD", kd.lws2)); + mem_consts.AddConstant(MakeJitConstant("LWS0", dispatchData.lws[0])); + mem_consts.AddConstant(MakeJitConstant("LWS1", dispatchData.lws[1])); + mem_consts.AddConstant(MakeJitConstant("SIMD", dispatchData.lws[2])); - mem_consts.AddConstant(MakeJitConstant("TILE_X", kd.cldnnStyle.blockWidth)); + mem_consts.AddConstant(MakeJitConstant("TILE_X", dispatchData.cldnnStyle.blockWidth)); mem_consts.AddConstant(MakeJitConstant("FILTER_BLOCKED", filter_blocked)); - mem_consts.AddConstant(MakeJitConstant("PRELOAD_INPUT_TO_SLM", kd.cldnnStyle.prefetch)); + mem_consts.AddConstant(MakeJitConstant("PRELOAD_INPUT_TO_SLM", dispatchData.cldnnStyle.prefetch)); auto needs_boundary_check = ParamsHavePadding(params) && (!HasPaddedInput(params) || diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.hpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.hpp index 31fe412602caf9..d191db2f373706 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.hpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv_16_32_imad_dw.hpp @@ -43,7 +43,7 @@ class ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw : public ConvolutionKernelBase bool NeedPaddedInput() const override { return false; } bool HasPaddedInput(const convolution_params& params) const; bool ParamsHavePadding(const convolution_params& params) const; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; struct AutoTuneParams { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.cpp index 4011302ebccf84..19d0398ae8651e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.cpp @@ -104,7 +104,7 @@ ParamsKey ConvolutionKernel_b_fs_zyx_fsv16::GetSupportedKey() const { ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_zyx_fsv16::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); const auto& out = params.output; const auto& input = params.inputs[0]; @@ -130,36 +130,36 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_zyx_fsv16::SetDefault else break; } - kd.cldnnStyle.blockWidth = ow_block; + dispatchData.cldnnStyle.blockWidth = ow_block; if (out.GetDType() == Datatype::F16) { - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.gws0 = (f / 2); - kd.gws1 = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; - kd.gws2 = b % 2 == 0 ? b / 2 : b; // unroll mb by 2 + dispatchData.gws[0] = (f / 2); + dispatchData.gws[1] = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; + dispatchData.gws[2] = b % 2 == 0 ? b / 2 : b; // unroll mb by 2 } else { - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; auto ocb = (f % 32 == 0) ? 32 : 16; - kd.gws0 = 16; - kd.gws1 = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; - kd.gws2 = b * f / ocb; + dispatchData.gws[0] = 16; + dispatchData.gws[1] = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; + dispatchData.gws[2] = b * f / ocb; } } else if (ver_16mb16c) { f = (g > 1) ? f/g : Align(f, 16); - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.gws0 = f; - kd.gws1 = x * y * z; - kd.gws2 = (out.GetDType() == Datatype::F16) ? b / 32 : b / 16; + dispatchData.gws[0] = f; + dispatchData.gws[1] = x * y * z; + dispatchData.gws[2] = (out.GetDType() == Datatype::F16) ? b / 32 : b / 16; - kd.cldnnStyle.blockWidth = 1; + dispatchData.cldnnStyle.blockWidth = 1; } else { auto oh_block = 1; f = Align(f / g, 16); @@ -180,22 +180,22 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_b_fs_zyx_fsv16::SetDefault ocb /= 2; } - kd.cldnnStyle.blockWidth = ow_block; + dispatchData.cldnnStyle.blockWidth = ow_block; - kd.gws0 = ocb; - kd.gws1 = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; - kd.gws2 = b * (f / ocb) * g; + dispatchData.gws[0] = ocb; + dispatchData.gws[1] = CeilDiv(y, oh_block) * CeilDiv(x, ow_block) * z; + dispatchData.gws[2] = b * (f / ocb) * g; - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } if (b == 1) - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; else - kd.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; - return kd; + return dispatchData; } bool ConvolutionKernel_b_fs_zyx_fsv16::Validate(const Params& p, const optional_params& o) const { @@ -231,10 +231,10 @@ bool ConvolutionKernel_b_fs_zyx_fsv16::Validate(const Params& p, const optional_ } JitConstants ConvolutionKernel_b_fs_zyx_fsv16::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { auto input = params.inputs[0]; auto output = params.output; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); const bool is_1stconv = input.Feature().v == 3 && input.GetLayout() == DataLayout::bfzyx; const bool ver_16mb16c = !is_1stconv && ((output.GetDType() == Datatype::F16 && output.Batch().v % 32 == 0) || @@ -253,9 +253,9 @@ JitConstants ConvolutionKernel_b_fs_zyx_fsv16::GetJitConstants(const convolution else jit.AddConstant(MakeJitConstant("CASE_3D", 1)); - jit.AddConstant(MakeJitConstant("LWS_0", runInfo.lws0)); - jit.AddConstant(MakeJitConstant("LWS_1", runInfo.lws1)); - jit.AddConstant(MakeJitConstant("LWS_2", runInfo.lws2)); + jit.AddConstant(MakeJitConstant("LWS_0", dispatchData.lws[0])); + jit.AddConstant(MakeJitConstant("LWS_1", dispatchData.lws[1])); + jit.AddConstant(MakeJitConstant("LWS_2", dispatchData.lws[2])); if (is_1stconv) { if (output.GetDType() == Datatype::F16) { @@ -267,11 +267,11 @@ JitConstants ConvolutionKernel_b_fs_zyx_fsv16::GetJitConstants(const convolution } else if (ver_16mb16c) { jit.AddConstant(MakeJitConstant("OCB", 1)); } else { - jit.AddConstant(MakeJitConstant("OCB", runInfo.gws0)); + jit.AddConstant(MakeJitConstant("OCB", dispatchData.gws[0])); } jit.AddConstant(MakeJitConstant("SUM_SCALE", 1)); - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; if (ver_16mb16c) { jit.AddConstant(MakeJitConstant("MB_BLOCK", 16)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.h index 19fa02c7a4eb85..cd947315c23458 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16.h @@ -55,7 +55,7 @@ class ConvolutionKernel_b_fs_zyx_fsv16 : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::ELTWISE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp index cfd6abde579f60..82b1252a826177 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp @@ -204,7 +204,7 @@ Convolution_kernel_b_fs_zyx_fsv16_imad::GetBlockParams(const convolution_params& size_t in_block_depth = 1; bool break_external_loop = false; - + for (size_t d = 1; d < 16; ++d) { if (params.output.Z().v % d != 0) continue; @@ -283,7 +283,7 @@ float Convolution_kernel_b_fs_zyx_fsv16_imad::EstimateOccupancy(const convolutio } float Convolution_kernel_b_fs_zyx_fsv16_imad::EstimateSLMUsage(const convolution_params& params, const BlockParams& block) const { - size_t slm_elements = block.output_block_width * block.output_block_height * block.output_block_depth * + size_t slm_elements = block.output_block_width * block.output_block_height * block.output_block_depth * block.output_block_features * (block.feature_slm_split - 1); size_t slm_bytes = slm_elements * BytesPerElement(GetAccumulatorType(params)); @@ -331,8 +331,8 @@ KernelsData Convolution_kernel_b_fs_zyx_fsv16_imad::GetKernelsData(const Params& } JitConstants Convolution_kernel_b_fs_zyx_fsv16_imad::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); auto block_params = GetBlockParams(params); @@ -369,7 +369,7 @@ JitConstants Convolution_kernel_b_fs_zyx_fsv16_imad::GetJitConstants(const convo idx_order[idx_order.size() - 3] = "out_z"; } } - + if (block_params.output_block_height != 1) { loop_axes.push_back(Tensor::DataChannelName::Y); } else { @@ -392,28 +392,28 @@ JitConstants Convolution_kernel_b_fs_zyx_fsv16_imad::GetJitConstants(const convo } // GetJitConstants ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_zyx_fsv16_imad::SetDefault(const convolution_params& params, - int) const { - DispatchData kd; + int) const { + DispatchData dispatchData; const auto& output = params.output; const auto& weights = params.weights; auto block_params = GetBlockParams(params); - kd.gws0 = CeilDiv(output.X().v, block_params.output_block_width); - kd.gws1 = CeilDiv(output.Y().v, block_params.output_block_height) * CeilDiv(output.Z().v, block_params.output_block_depth); - kd.gws2 = output.Batch().v * CeilDiv(weights.OFM().v, block_params.output_block_features) * params.groups * simd * block_params.feature_slm_split; + dispatchData.gws[0] = CeilDiv(output.X().v, block_params.output_block_width); + dispatchData.gws[1] = CeilDiv(output.Y().v, block_params.output_block_height) * CeilDiv(output.Z().v, block_params.output_block_depth); + dispatchData.gws[2] = output.Batch().v * CeilDiv(weights.OFM().v, block_params.output_block_features) * params.groups * simd * block_params.feature_slm_split; - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = simd * block_params.feature_slm_split; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = simd * block_params.feature_slm_split; - kd.cldnnStyle = {0, 0, 0, 0, 0}; - kd.gemmStyle = {0, 0, 0, 0, 0, 0}; + dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; + dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; if (static_cast(params.weights.IFM().v) / static_cast(Align(params.weights.IFM().v, fsv)) < 0.5f) - kd.efficiency = FORCE_PRIORITY_4; + dispatchData.efficiency = FORCE_PRIORITY_4; - return kd; + return dispatchData; } // SetDefault bool Convolution_kernel_b_fs_zyx_fsv16_imad::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h index bdde4a53259f6d..35427cb91cbba2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h @@ -32,7 +32,7 @@ class Convolution_kernel_b_fs_zyx_fsv16_imad : public ConvolutionKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params& p) const override { @@ -50,7 +50,7 @@ class Convolution_kernel_b_fs_zyx_fsv16_imad : public ConvolutionKernelBase { size_t output_block_width; size_t output_block_height; size_t output_block_depth; - + size_t output_block_features; size_t input_block_width; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.cpp index 11088e250a3b95..5386cc3be8a7c3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.cpp @@ -46,9 +46,9 @@ bool ConvolutionKernelBase::Validate(const Params& p, const optional_params& o) return true; } -JitConstants ConvolutionKernelBase::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { +JitConstants ConvolutionKernelBase::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { JitConstants mem_consts = WeightBiasKernelBase::GetJitConstants(params); - mem_consts.Merge(GetFusedPrimitivesJitConstants(params, kd)); + mem_consts.Merge(GetFusedPrimitivesJitConstants(params, dispatchData)); const auto& padding = params.padding; const auto& input = params.inputs[0]; @@ -101,12 +101,12 @@ JitConstants ConvolutionKernelBase::GetJitConstants(const convolution_params& pa std::vector unrollLoopParams{params.filterSize.x, params.filterSize.y, - (uint32_t)kd.gemmStyle.globalWorkSizeDX, - (uint32_t)kd.gemmStyle.globalWorkSizeDY, - (uint32_t)kd.gemmStyle.globalWorkSizeDZ, - (uint32_t)kd.gemmStyle.subBlockDimM, - (uint32_t)kd.gemmStyle.subBlockDimK, - (uint32_t)kd.gemmStyle.subBlockDimN}; + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDX, + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDY, + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDZ, + (uint32_t)dispatchData.gemmStyle.subBlockDimM, + (uint32_t)dispatchData.gemmStyle.subBlockDimK, + (uint32_t)dispatchData.gemmStyle.subBlockDimN}; auto loopCount = *std::max_element(unrollLoopParams.begin(), unrollLoopParams.end()); @@ -116,13 +116,15 @@ JitConstants ConvolutionKernelBase::GetJitConstants(const convolution_params& pa return mem_consts; } -bool ConvolutionKernelBase::CheckWorkGroups(const ConvolutionKernelBase::DispatchData& kd) { - if (kd.gws0 == 0 || kd.gws1 == 0 || kd.gws2 == 0 || kd.lws0 == 0 || kd.lws1 == 0 || kd.lws2 == 0) { +bool ConvolutionKernelBase::CheckWorkGroups(const ConvolutionKernelBase::DispatchData& dispatchData) { + if (dispatchData.gws.size() != 3 || dispatchData.lws.size() != 3) return false; - } - if ((kd.gws0 % kd.lws0) != 0 || (kd.gws1 % kd.lws1) != 0 || (kd.gws2 % kd.lws2) != 0) { - return false; + for (size_t i = 0; i < dispatchData.gws.size(); i++) { + if (dispatchData.gws[i] == 0 || dispatchData.lws[i] == 0) + return false; + if ((dispatchData.gws[i] % dispatchData.lws[i]) != 0) + return false; } return true; @@ -164,43 +166,33 @@ bool ConvolutionKernelBase::CheckPitchForSplitOnly(const convolution_params& par } ConvolutionKernelBase::DispatchData ConvolutionKernelBase::SetDefault(const convolution_params& params, int) const { - DispatchData kd; + DispatchData dispatchData; const auto& out = params.output; - kd.fp16UnitUsed = out.GetDType() == Datatype::F16; - std::vector global; if (params.output.GetLayout() == DataLayout::bfyx || params.output.GetLayout() == DataLayout::byxf) { - global = {out.X().v, out.Y().v, out.Feature().v * out.Batch().v}; + dispatchData.gws = {out.X().v, out.Y().v, out.Feature().v * out.Batch().v}; } else if (params.output.GetLayout() == DataLayout::bfzyx) { - global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.gws = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; } else { - global = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v}; + dispatchData.gws = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v}; } - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - kd.cldnnStyle.blockWidth = 1; - kd.cldnnStyle.blockHeight = 1; - kd.cldnnStyle.prefetch = 0; - kd.cldnnStyle.inputBlockArraySize = 0; - kd.cldnnStyle.inputBlockWidth = 0; - - kd.gemmStyle.globalWorkSizeDX = 1; - kd.gemmStyle.globalWorkSizeDY = 1; - kd.gemmStyle.globalWorkSizeDZ = 1; - kd.gemmStyle.subBlockDimK = 1; - kd.gemmStyle.subBlockDimM = 0; - kd.gemmStyle.subBlockDimN = 0; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + + dispatchData.cldnnStyle.blockWidth = 1; + dispatchData.cldnnStyle.blockHeight = 1; + dispatchData.cldnnStyle.prefetch = 0; + dispatchData.cldnnStyle.inputBlockArraySize = 0; + dispatchData.cldnnStyle.inputBlockWidth = 0; + + dispatchData.gemmStyle.globalWorkSizeDX = 1; + dispatchData.gemmStyle.globalWorkSizeDY = 1; + dispatchData.gemmStyle.globalWorkSizeDZ = 1; + dispatchData.gemmStyle.subBlockDimK = 1; + dispatchData.gemmStyle.subBlockDimM = 0; + dispatchData.gemmStyle.subBlockDimN = 0; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + return dispatchData; } KernelsData ConvolutionKernelBase::GetCommonKernelsData(const Params& params, @@ -232,21 +224,21 @@ KernelsData ConvolutionKernelBase::GetCommonKernelsData(const Params& params, if (kd.reorderInput && !options.allowInputReordering) return {}; } - DispatchData runInfo = SetDefault(newParams, autoTuneIndex); + DispatchData dispatchData = SetDefault(newParams, autoTuneIndex); - if (!CheckWorkGroups(runInfo)) { + if (!CheckWorkGroups(dispatchData)) { // Internal Error - wrong calculation of global/local work group sizes return {}; } auto finalKernelName = GetKernelName(newParams); - auto cldnnJit = GetJitConstants(newParams, runInfo); + auto cldnnJit = GetJitConstants(newParams, dispatchData); auto entryPoint = GetEntryPoint(finalKernelName, newParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnnJit, entryPoint); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, finalKernelName, jit, @@ -276,7 +268,7 @@ KernelsData ConvolutionKernelBase::GetCommonKernelsData(const Params& params, } kernel.arguments.push_back({ArgumentDescriptor::Types::SPLIT, 0}); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; kd.autoTuneIndex = autoTuneIndex; return {kd}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.h index d64f681d0b705d..24bbbba9d02678 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.h @@ -66,8 +66,8 @@ class ConvolutionKernelBase : public WeightBiasKernelBase { virtual std::string GetKernelName(const convolution_params&) const { return kernelName; } virtual bool NeedPaddedInput() const { return false; } bool Validate(const Params& p, const optional_params& o) const override; - virtual JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const; - virtual JitConstants GetFusedPrimitivesJitConstants(const convolution_params& params, const DispatchData& kd) const; + virtual JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const; + virtual JitConstants GetFusedPrimitivesJitConstants(const convolution_params& params, const DispatchData& dispatchData) const; virtual DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const; static bool CheckWorkGroups(const DispatchData&); static bool CheckPitchForSplitOnly(const convolution_params& params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.cpp index b016fe7bce0dcc..c7a0b9f26dda87 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.cpp @@ -41,7 +41,7 @@ ParamsKey ConvolutionKernel_bfyx_1x1::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_1x1::SetDefault(const convolution_params& params, int) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -50,17 +50,17 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_1x1::SetDefault(const auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(x * y, 16) / 16; - kd.gws1 = Align(f, 16); - kd.gws2 = b; + dispatchData.gws[0] = Align(x * y, 16) / 16; + dispatchData.gws[1] = Align(f, 16); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = 16; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 16; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } bool ConvolutionKernel_bfyx_1x1::Validate(const Params& p, const optional_params& o) const { @@ -86,8 +86,8 @@ bool ConvolutionKernel_bfyx_1x1::Validate(const Params& p, const optional_params return true; } -JitConstants ConvolutionKernel_bfyx_1x1::GetJitConstants(const convolution_params& params, const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); +JitConstants ConvolutionKernel_bfyx_1x1::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); if (params.output.Feature().v % 16) jit.AddConstant(MakeJitConstant("LEFTOVERS", 1)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.h index 62d5cb23cbdc40..fb4d6267956e1c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1.h @@ -36,6 +36,6 @@ class ConvolutionKernel_bfyx_1x1 : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.cpp index ac2ac409c3abd4..c15ffcbe406402 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.cpp @@ -32,7 +32,7 @@ ParamsKey ConvolutionKernel_bfyx_1x1_gemm_buf::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_1x1_gemm_buf::SetDefault(const convolution_params& params, int) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -41,17 +41,17 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_1x1_gemm_buf::SetDefa auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(f, 16); - kd.gws1 = CeilDiv(x * y, 16); - kd.gws2 = b; + dispatchData.gws[0] = Align(f, 16); + dispatchData.gws[1] = CeilDiv(x * y, 16); + dispatchData.gws[2] = b; - kd.lws0 = 16; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return kd; + return dispatchData; } bool ConvolutionKernel_bfyx_1x1_gemm_buf::Validate(const Params& p, const optional_params& o) const { @@ -75,8 +75,8 @@ bool ConvolutionKernel_bfyx_1x1_gemm_buf::Validate(const Params& p, const option return true; } -JitConstants ConvolutionKernel_bfyx_1x1_gemm_buf::GetJitConstants(const convolution_params& params, const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); +JitConstants ConvolutionKernel_bfyx_1x1_gemm_buf::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); const auto& out = params.output; const auto& input = params.inputs[0]; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.h index 1b7b7bc8cbc448..de75aca2f1d137 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_gemm_buf.h @@ -36,6 +36,6 @@ class ConvolutionKernel_bfyx_1x1_gemm_buf : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.cpp index 2537828cfe10de..d8850b9eaf9188 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.cpp @@ -76,24 +76,24 @@ static block_params get_out_block_size(const convolution_params& p) { ConvolutionKernelBase::DispatchData convolution_kernel_bfyx_1x1_opt::SetDefault(const convolution_params& cp, int) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); constexpr size_t sub_group_size = 8; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; auto block = get_out_block_size(cp); - runInfo.gws0 = cp.output.X().v / block.out_width; - runInfo.gws1 = cp.output.Y().v / block.out_height; - runInfo.gws2 = - 2 * (cp.output.Feature().v * cp.output.Batch().v) / block.out_depth; // process 8 output channels per Workitem + dispatchData.gws[0] = cp.output.X().v / block.out_width; + dispatchData.gws[1] = cp.output.Y().v / block.out_height; + // process 8 output channels per Workitem + dispatchData.gws[2] = 2 * (cp.output.Feature().v * cp.output.Batch().v) / block.out_depth; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 2 * sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 2 * sub_group_size; - return runInfo; + return dispatchData; } bool convolution_kernel_bfyx_1x1_opt::Validate(const Params& p, const optional_params& o) const { @@ -128,8 +128,8 @@ bool convolution_kernel_bfyx_1x1_opt::Validate(const Params& p, const optional_p } JitConstants convolution_kernel_bfyx_1x1_opt::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); auto block = get_out_block_size(params); jit.AddConstant(MakeJitConstant("OUT_BLOCK_WIDTH", block.out_width)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.h index ce8f0a09429baa..9ce8b9e286cf9d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_1x1_opt.h @@ -31,9 +31,9 @@ class convolution_kernel_bfyx_1x1_opt : public ConvolutionKernelBase { protected: WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.cpp index 48d0cf23b8e92b..45c57a8fd195b8 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.cpp @@ -71,9 +71,8 @@ bool ConvolutionKernel_bfyx_3x3_dw_opt::Validate(const Params& p, const optional return true; } -ConvolutionKernel_bfyx_3x3_dw_opt::AutoTuneOption ConvolutionKernel_bfyx_3x3_dw_opt::GetAutoTuneOptions( - const Params&, - int autoTuneIndex) const { +ConvolutionKernel_bfyx_3x3_dw_opt::AutoTuneOption ConvolutionKernel_bfyx_3x3_dw_opt::GetAutoTuneOptions(const Params&, + int autoTuneIndex) const { if ((autoTuneIndex >= 0) && (autoTuneIndex < static_cast(autoTuneOptions.size()))) { return autoTuneOptions[autoTuneIndex]; } @@ -87,7 +86,7 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_3x3_dw_opt::SetDefaul int autoTuneIndex) const { constexpr int simdSize = 16; - DispatchData runInfo = Parent::SetDefault(params); + DispatchData dispatchData = Parent::SetDefault(params); auto options = GetAutoTuneOptions(params, autoTuneIndex); @@ -96,28 +95,28 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_3x3_dw_opt::SetDefaul const int numTilesY = static_cast( std::ceil(static_cast(params.inputs[0].Y().v) / static_cast(options.tileDims.y))); - runInfo.cldnnStyle.blockWidth = options.tileDims.x; - runInfo.cldnnStyle.blockHeight = options.tileDims.y; - runInfo.gws0 = numTilesX * simdSize; - runInfo.gws1 = numTilesY; - runInfo.gws2 = params.inputs[0].Feature().v * params.inputs[0].Batch().v; - runInfo.lws0 = simdSize; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.cldnnStyle.blockWidth = options.tileDims.x; + dispatchData.cldnnStyle.blockHeight = options.tileDims.y; + dispatchData.gws[0] = numTilesX * simdSize; + dispatchData.gws[1] = numTilesY; + dispatchData.gws[2] = params.inputs[0].Feature().v * params.inputs[0].Batch().v; + dispatchData.lws[0] = simdSize; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - runInfo.efficiency = FORCE_PRIORITY_5; + dispatchData.efficiency = FORCE_PRIORITY_5; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_bfyx_3x3_dw_opt::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - stSize tileDims = {kd.cldnnStyle.blockWidth, kd.cldnnStyle.blockHeight}; - auto mem_consts = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + stSize tileDims = {dispatchData.cldnnStyle.blockWidth, dispatchData.cldnnStyle.blockHeight}; + auto mem_consts = ConvolutionKernelBase::GetJitConstants(params, dispatchData); if (tileDims.y != 0 && tileDims.x != 0) { - mem_consts.AddConstant(MakeJitConstant("UNIT_BYTE_SIZE", kd.fp16UnitUsed ? sizeof(short) : sizeof(float))); - mem_consts.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", kd.lws0)); + mem_consts.AddConstant(MakeJitConstant("UNIT_BYTE_SIZE", BytesPerElement(params.output.GetDType()))); + mem_consts.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0])); mem_consts.AddConstant(MakeJitConstant("TILE_HEIGHT", tileDims.y)); mem_consts.AddConstant(MakeJitConstant("TILE_WIDTH", tileDims.x)); } @@ -132,9 +131,9 @@ KernelsData ConvolutionKernel_bfyx_3x3_dw_opt::GetTunedKernelsDataByIndex(const KernelData kd = KernelData::Default(params); convolution_params& convParams = *static_cast(kd.params.get()); - DispatchData runInfo = SetDefault(convParams, autoTuneIndex); + DispatchData dispatchData = SetDefault(convParams, autoTuneIndex); - if (static_cast(static_cast(runInfo.gws0 - 1) / simdSize) * runInfo.cldnnStyle.blockWidth + simdSize > + if (static_cast(static_cast(dispatchData.gws[0] - 1) / simdSize) * dispatchData.cldnnStyle.blockWidth + simdSize > convParams.inputs[0].Y().pitch) { // Internal Error - requested tile size is not supported for y pitch return {}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.h index 16c273564cbea5..c8e52854e70805 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_3x3_dw_opt.h @@ -38,7 +38,7 @@ class ConvolutionKernel_bfyx_3x3_dw_opt : public ConvolutionKernelBase { WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::oiyx; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; struct AutoTuneOption { @@ -49,4 +49,4 @@ class ConvolutionKernel_bfyx_3x3_dw_opt : public ConvolutionKernelBase { AutoTuneOption GetAutoTuneOptions(const Params& arg, int autoTuneIndex) const; std::vector autoTuneOptions = {}; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.cpp index 8046070f3ee450..4b3709fb661386 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.cpp @@ -56,29 +56,22 @@ bool ConvolutionKernel_bfyx_depthwise_weights_lwg::Validate(const Params& p, con return true; } -ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_depthwise_weights_lwg::SetDefault( - const convolution_params& params, - int) const { - DispatchData runInfo = Parent::SetDefault(params); +ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_depthwise_weights_lwg::SetDefault(const convolution_params& params, + int) const { + DispatchData dispatchData = Parent::SetDefault(params); const auto& out = params.output; - std::vector global = {out.X().v * out.Y().v, out.Feature().v, out.Batch().v}; + dispatchData.gws = { Align(out.X().v * out.Y().v, 16), out.Feature().v, out.Batch().v }; + dispatchData.lws = { 16, 1, 1 }; - runInfo.gws0 = Align(global[0], 16); - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - runInfo.lws0 = 16; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.efficiency = FORCE_PRIORITY_2; - runInfo.efficiency = FORCE_PRIORITY_2; - - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_bfyx_depthwise_weights_lwg::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto mem_consts = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto mem_consts = ConvolutionKernelBase::GetJitConstants(params, dispatchData); if (params.padding.x != 0 || params.padding.y != 0) mem_consts.AddConstant(MakeJitConstant("BOUNDARY_CHECK", 1)); @@ -90,4 +83,4 @@ KernelsData ConvolutionKernel_bfyx_depthwise_weights_lwg::GetKernelsData(const P const optional_params& options) const { return GetTunedKernelsDataByIndex(params, options); } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.h index 0aa4b4cb823f64..796d45f00e8cd8 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_depthwise_weights_lwg.h @@ -34,7 +34,7 @@ class ConvolutionKernel_bfyx_depthwise_weights_lwg : public ConvolutionKernelBas WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::goiyx; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.cpp index df57f4f8226137..2c5849f2296b55 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.cpp @@ -38,16 +38,16 @@ ParamsKey ConvolutionKernel_bfyx_Direct_10_10_12::GetSupportedKey() const { } JitConstants ConvolutionKernel_bfyx_Direct_10_10_12::GetJitConstants(const convolution_params& cp, - const DispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(cp, runInfo); + const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(cp, dispatchData); jit.AddConstants({ - MakeJitConstant("ALIGNED_OFM", RoundUp(cp.output.Feature().v / cp.groups, runInfo.gemmStyle.subBlockDimN) * cp.groups), - MakeJitConstant("ALIGNED_OFM_PER_GROUP", RoundUp(cp.output.Feature().v / cp.groups, runInfo.gemmStyle.subBlockDimN)), - MakeJitConstant("DX", runInfo.gemmStyle.globalWorkSizeDX), - MakeJitConstant("DY", runInfo.gemmStyle.globalWorkSizeDY), + MakeJitConstant("ALIGNED_OFM", RoundUp(cp.output.Feature().v / cp.groups, dispatchData.gemmStyle.subBlockDimN) * cp.groups), + MakeJitConstant("ALIGNED_OFM_PER_GROUP", RoundUp(cp.output.Feature().v / cp.groups, dispatchData.gemmStyle.subBlockDimN)), + MakeJitConstant("DX", dispatchData.gemmStyle.globalWorkSizeDX), + MakeJitConstant("DY", dispatchData.gemmStyle.globalWorkSizeDY), MakeJitConstant("KERNEL_SLICE_DIV2", (cp.filterSize.x * cp.filterSize.y) / 2), - MakeJitConstant("RIGHT_PARTIAL_TILE_K", cp.output.X().v % runInfo.gemmStyle.globalWorkSizeDX), + MakeJitConstant("RIGHT_PARTIAL_TILE_K", cp.output.X().v % dispatchData.gemmStyle.globalWorkSizeDX), MakeJitConstant("INPUT_BUFFER_WIDTH_PADDED", ""), // TODO: enable non padding path again MakeJitConstant("INPUT_BUFFER_HEIGHT_PADDED", ""), }); @@ -55,30 +55,29 @@ JitConstants ConvolutionKernel_bfyx_Direct_10_10_12::GetJitConstants(const convo return jit; } -ConvolutionKernel_bfyx_Direct_10_10_12::Parent::DispatchData ConvolutionKernel_bfyx_Direct_10_10_12::SetDefault( - const convolution_params& arg, - int) const { - Parent::DispatchData runInfo = Parent::SetDefault(arg); +ConvolutionKernel_bfyx_Direct_10_10_12::DispatchData ConvolutionKernel_bfyx_Direct_10_10_12::SetDefault(const convolution_params& arg, + int) const { + DispatchData dispatchData = Parent::SetDefault(arg); constexpr uint32_t TILE_N = 16; if (arg.filterSize.x == 5) { - runInfo.gemmStyle = {1, 1, TILE_N, /*GWS DX*/ 4, /*GWS DY*/ 4, 1}; + dispatchData.gemmStyle = {1, 1, TILE_N, /*GWS DX*/ 4, /*GWS DY*/ 4, 1}; } else { - runInfo.gemmStyle = {1, 1, TILE_N, /*GWS DX*/ 4, /*GWS DY*/ 3, 1}; + dispatchData.gemmStyle = {1, 1, TILE_N, /*GWS DX*/ 4, /*GWS DY*/ 3, 1}; } - runInfo.gws0 = RoundUp(arg.output.X().v, runInfo.gemmStyle.globalWorkSizeDX) / runInfo.gemmStyle.globalWorkSizeDX; - runInfo.gws1 = RoundUp(arg.output.Y().v, runInfo.gemmStyle.globalWorkSizeDY) / runInfo.gemmStyle.globalWorkSizeDY; - runInfo.gws2 = RoundUp(arg.output.Feature().v / arg.groups, TILE_N) * arg.output.Batch().v * arg.groups; + dispatchData.gws[0] = RoundUp(arg.output.X().v, dispatchData.gemmStyle.globalWorkSizeDX) / dispatchData.gemmStyle.globalWorkSizeDX; + dispatchData.gws[1] = RoundUp(arg.output.Y().v, dispatchData.gemmStyle.globalWorkSizeDY) / dispatchData.gemmStyle.globalWorkSizeDY; + dispatchData.gws[2] = RoundUp(arg.output.Feature().v / arg.groups, TILE_N) * arg.output.Batch().v * arg.groups; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = TILE_N; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = TILE_N; - runInfo.efficiency = FORCE_PRIORITY_4; + dispatchData.efficiency = FORCE_PRIORITY_4; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_Direct_10_10_12::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.h index 1f5a0f427626fe..f40dd23e5b9c4c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_direct_10_12_16.h @@ -34,9 +34,9 @@ class ConvolutionKernel_bfyx_Direct_10_10_12 : public ConvolutionKernelBase { return (p.groups > 1) ? WeightsLayout::gi_yxs_os_yxsv2_osv16 : WeightsLayout::i_yxs_os_yxsv2_osv16; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.cpp index f4fd240e34cbc9..97d7a3725ce7d7 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.cpp @@ -49,21 +49,21 @@ std::string ConvolutionKernel_bfyx_GEMMLike::GetKernelName(const convolution_par } JitConstants ConvolutionKernel_bfyx_GEMMLike::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstants({ - MakeJitConstant("ALIGNED_OFM_PER_GROUP", RoundUp(params.output.Feature().v / params.groups, runInfo.gemmStyle.subBlockDimN)), - MakeJitConstant("DX", runInfo.gemmStyle.globalWorkSizeDX), - MakeJitConstant("DY", runInfo.gemmStyle.globalWorkSizeDY), + MakeJitConstant("ALIGNED_OFM_PER_GROUP", RoundUp(params.output.Feature().v / params.groups, dispatchData.gemmStyle.subBlockDimN)), + MakeJitConstant("DX", dispatchData.gemmStyle.globalWorkSizeDX), + MakeJitConstant("DY", dispatchData.gemmStyle.globalWorkSizeDY), MakeJitConstant("FILTER_SIZE_X_DIV2", params.filterSize.x / 2), MakeJitConstant("INPUT_BUFFER_WIDTH_PADDED", ""), // TODO: enable non padding path again MakeJitConstant("INPUT_BUFFER_HEIGHT_PADDED", ""), }); - if (CeilDiv(RoundUp(params.output.X().v * params.output.Y().v, runInfo.gemmStyle.subBlockDimM), - runInfo.gemmStyle.globalWorkSizeDY) % - runInfo.lws1 != + if (CeilDiv(RoundUp(params.output.X().v * params.output.Y().v, dispatchData.gemmStyle.subBlockDimM), + dispatchData.gemmStyle.globalWorkSizeDY) % + dispatchData.lws[1] != 0) jit.AddConstant(MakeJitConstant("LEFTOVERS", 1)); @@ -73,29 +73,29 @@ JitConstants ConvolutionKernel_bfyx_GEMMLike::GetJitConstants(const convolution_ ConvolutionKernel_bfyx_GEMMLike::Parent::DispatchData ConvolutionKernel_bfyx_GEMMLike::SetDefault( const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = Parent::SetDefault(arg, autoTuneIndex); + DispatchData dispatchData = Parent::SetDefault(arg, autoTuneIndex); - runInfo.lws0 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[2] = 1; if (arg.inputs[0].GetDType() == Datatype::F16) { - runInfo.gemmStyle = {1, arg.filterSize.x, 32, 32, 1, 1}; - runInfo.lws1 = 16; - runInfo.efficiency = FORCE_PRIORITY_6; + dispatchData.gemmStyle = {1, arg.filterSize.x, 32, 32, 1, 1}; + dispatchData.lws[1] = 16; + dispatchData.efficiency = FORCE_PRIORITY_6; } else { - runInfo.gemmStyle = {2, arg.filterSize.x, 32, 32, 2, 1}; - runInfo.lws1 = 8; - runInfo.efficiency = FORCE_PRIORITY_8; + dispatchData.gemmStyle = {2, arg.filterSize.x, 32, 32, 2, 1}; + dispatchData.lws[1] = 8; + dispatchData.efficiency = FORCE_PRIORITY_8; } - size_t sgemm_m = RoundUp(arg.output.X().v * arg.output.Y().v, runInfo.gemmStyle.subBlockDimM); - size_t sgemm_n = RoundUp(arg.output.Feature().v / arg.groups, runInfo.gemmStyle.subBlockDimN); + size_t sgemm_m = RoundUp(arg.output.X().v * arg.output.Y().v, dispatchData.gemmStyle.subBlockDimM); + size_t sgemm_n = RoundUp(arg.output.Feature().v / arg.groups, dispatchData.gemmStyle.subBlockDimN); - runInfo.gws0 = RoundUp(CeilDiv(sgemm_n, runInfo.gemmStyle.globalWorkSizeDX), runInfo.lws0); - runInfo.gws1 = RoundUp(CeilDiv(sgemm_m, runInfo.gemmStyle.globalWorkSizeDY), runInfo.lws1); - runInfo.gws2 = arg.output.Batch().v * arg.groups; + dispatchData.gws[0] = RoundUp(CeilDiv(sgemm_n, dispatchData.gemmStyle.globalWorkSizeDX), dispatchData.lws[0]); + dispatchData.gws[1] = RoundUp(CeilDiv(sgemm_m, dispatchData.gemmStyle.globalWorkSizeDY), dispatchData.lws[1]); + dispatchData.gws[2] = arg.output.Batch().v * arg.groups; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_GEMMLike::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.h index fdf6527ce1d60b..e11336ca9424e5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_gemm_like.h @@ -34,8 +34,8 @@ class ConvolutionKernel_bfyx_GEMMLike : public ConvolutionKernelBase { WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override; std::string GetKernelName(const convolution_params& params) const override; bool NeedPaddedInput() const override { return true; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.cpp index 94cb32e4e76485..415fe3f05b9167 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.cpp @@ -39,19 +39,19 @@ ParamsKey ConvolutionKernel_bfyx_iyxo::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_iyxo::SetDefault(const convolution_params& cp, int) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; - runInfo.gws0 = CeilDiv(cp.output.X().v, sub_group_size) / 4; - runInfo.gws1 = cp.output.Y().v; - runInfo.gws2 = sub_group_size; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, sub_group_size) / 4; + dispatchData.gws[1] = cp.output.Y().v; + dispatchData.gws[2] = sub_group_size; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = sub_group_size; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_iyxo::Validate(const Params& p, const optional_params& o) const { @@ -76,10 +76,10 @@ bool ConvolutionKernel_bfyx_iyxo::Validate(const Params& p, const optional_param return true; } -JitConstants ConvolutionKernel_bfyx_iyxo::GetJitConstants(const convolution_params& params, const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); +JitConstants ConvolutionKernel_bfyx_iyxo::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); return jit; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.h index ec82082da7d3b2..2c574edd072033 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_iyxo.h @@ -34,7 +34,7 @@ class ConvolutionKernel_bfyx_iyxo : public ConvolutionKernelBase { return WeightsLayout::iyxo; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp index eb641bc3c27ef0..fce3e6ac9d4d91 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.cpp @@ -147,7 +147,6 @@ ConvolutionKernel_bfyx_os_iyx_osv16::AutoTuneOption ConvolutionKernel_bfyx_os_iy option.blockWidth = 4; option.blockHeight = 3; option.prefetch = 5; - // run_info.efficiency = FORCE_PRIORITY_7; // GEMM is better } // if this is not 1x1 batch1 case then shrink filters, other way we're memory bound and it's best to use 16x1 block @@ -161,39 +160,39 @@ ConvolutionKernel_bfyx_os_iyx_osv16::AutoTuneOption ConvolutionKernel_bfyx_os_iy ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_os_iyx_osv16::SetDefault(const convolution_params& cp, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); const auto of_maps = cp.output.Feature().v; const auto of_maps_per_group = of_maps / cp.groups; const size_t of_threads_per_batch = RoundUp(of_maps_per_group, sub_group_size) * cp.groups; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - auto input_block_dims = get_bfyx_req_input_block_dims(runInfo.cldnnStyle.blockWidth, - runInfo.cldnnStyle.blockHeight, + auto input_block_dims = get_bfyx_req_input_block_dims(dispatchData.cldnnStyle.blockWidth, + dispatchData.cldnnStyle.blockHeight, cp.filterSize, cp.stride, cp.dilation, sub_group_size, - runInfo.fp16UnitUsed ? sub_group_size : sub_group_size / 2, + cp.output.GetDType() == Datatype::F16 ? sub_group_size : sub_group_size / 2, sub_group_size); - runInfo.cldnnStyle.inputBlockArraySize = input_block_dims.first; - runInfo.cldnnStyle.inputBlockWidth = input_block_dims.second; + dispatchData.cldnnStyle.inputBlockArraySize = input_block_dims.first; + dispatchData.cldnnStyle.inputBlockWidth = input_block_dims.second; - runInfo.gws0 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth); - runInfo.gws1 = CeilDiv(cp.output.Y().v, runInfo.cldnnStyle.blockHeight); - runInfo.gws2 = of_threads_per_batch * cp.output.Batch().v; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth); + dispatchData.gws[1] = CeilDiv(cp.output.Y().v, dispatchData.cldnnStyle.blockHeight); + dispatchData.gws[2] = of_threads_per_batch * cp.output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = sub_group_size; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_os_iyx_osv16::Validate(const Params& p, const optional_params& o) const { @@ -205,13 +204,13 @@ bool ConvolutionKernel_bfyx_os_iyx_osv16::Validate(const Params& p, const option } JitConstants ConvolutionKernel_bfyx_os_iyx_osv16::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { const auto of_maps = params.output.Feature().v; const auto of_maps_per_group = of_maps / params.groups; const size_t of_threads_per_batch = RoundUp(of_maps_per_group, sub_group_size); size_t leftovers = of_threads_per_batch - of_maps_per_group; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); @@ -220,12 +219,12 @@ JitConstants ConvolutionKernel_bfyx_os_iyx_osv16::GetJitConstants(const convolut } - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", runInfo.cldnnStyle.blockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", runInfo.cldnnStyle.blockHeight)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", runInfo.cldnnStyle.inputBlockArraySize)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", runInfo.cldnnStyle.inputBlockWidth)); - jit.AddConstant(MakeJitConstant("PREFETCH", runInfo.cldnnStyle.prefetch)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", dispatchData.cldnnStyle.inputBlockArraySize)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", dispatchData.cldnnStyle.inputBlockWidth)); + jit.AddConstant(MakeJitConstant("PREFETCH", dispatchData.cldnnStyle.prefetch)); if (leftovers) { jit.AddConstant(MakeJitConstant("LEFTOVERS", leftovers)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h index 536970b7885aa4..72706b0508de4a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16.h @@ -40,7 +40,7 @@ class ConvolutionKernel_bfyx_os_iyx_osv16 : public ConvolutionKernelBase { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.cpp index 152fb25241ee38..f515fa30f2038a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.cpp @@ -146,7 +146,6 @@ ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::AutoTuneOption ConvolutionKernel_bfyx_ option.blockWidth = 4; option.blockHeight = 3; option.prefetch = 5; - // run_info.efficiency = FORCE_PRIORITY_7; // GEMM is better } // if this is not 1x1 batch1 case then shrink filters, other way we're memory bound and it's best to use 16x1 block @@ -160,38 +159,38 @@ ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::AutoTuneOption ConvolutionKernel_bfyx_ ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::SetDefault(const convolution_params& cp, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); const auto of_maps = cp.output.Feature().v; const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - auto input_block_dims = get_bfyx_req_input_block_dims(runInfo.cldnnStyle.blockWidth, - runInfo.cldnnStyle.blockHeight, + auto input_block_dims = get_bfyx_req_input_block_dims(dispatchData.cldnnStyle.blockWidth, + dispatchData.cldnnStyle.blockHeight, cp.filterSize, cp.stride, cp.dilation, sub_group_size, - runInfo.fp16UnitUsed ? sub_group_size : sub_group_size / 2, + cp.output.GetDType() == Datatype::F16 ? sub_group_size : sub_group_size / 2, sub_group_size); - runInfo.cldnnStyle.inputBlockArraySize = input_block_dims.first; - runInfo.cldnnStyle.inputBlockWidth = input_block_dims.second; + dispatchData.cldnnStyle.inputBlockArraySize = input_block_dims.first; + dispatchData.cldnnStyle.inputBlockWidth = input_block_dims.second; - runInfo.gws0 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth); - runInfo.gws1 = CeilDiv(cp.output.Y().v, runInfo.cldnnStyle.blockHeight); - runInfo.gws2 = 2 * of_threads_per_batch * cp.output.Batch().v; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth); + dispatchData.gws[1] = CeilDiv(cp.output.Y().v, dispatchData.cldnnStyle.blockHeight); + dispatchData.gws[2] = 2 * of_threads_per_batch * cp.output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 2 * sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 2 * sub_group_size; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::Validate(const Params& p, const optional_params& o) const { @@ -211,19 +210,19 @@ bool ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::Validate(const Params& p, const o } JitConstants ConvolutionKernel_bfyx_os_iyx_osv16_2_sg::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { const auto of_maps = params.output.Feature().v; const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size); size_t leftovers = of_threads_per_batch - of_maps; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", 16)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", runInfo.cldnnStyle.blockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", runInfo.cldnnStyle.blockHeight)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", runInfo.cldnnStyle.inputBlockArraySize)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", runInfo.cldnnStyle.inputBlockWidth)); - jit.AddConstant(MakeJitConstant("PREFETCH", runInfo.cldnnStyle.prefetch)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", dispatchData.cldnnStyle.inputBlockArraySize)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", dispatchData.cldnnStyle.inputBlockWidth)); + jit.AddConstant(MakeJitConstant("PREFETCH", dispatchData.cldnnStyle.prefetch)); if (leftovers) { jit.AddConstant(MakeJitConstant("LEFTOVERS", leftovers)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.h index b3bcf6df8e87a2..75e8c3bdec6871 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_os_iyx_osv16_2_sg.h @@ -33,7 +33,7 @@ class ConvolutionKernel_bfyx_os_iyx_osv16_2_sg : public ConvolutionKernelBase { protected: WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; @@ -50,4 +50,4 @@ class ConvolutionKernel_bfyx_os_iyx_osv16_2_sg : public ConvolutionKernelBase { std::vector autoTuneOptions = {}; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.cpp index 5df33ded795fde..a553b67f2eb63d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.cpp @@ -72,32 +72,32 @@ ParamsKey ConvolutionKernel_bfyx_to_bfyx_f16::GetSupportedKey() const { ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_to_bfyx_f16::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params); const auto& out = params.output; auto autoTune = GetAutoTuneOptions(params, autoTuneIndex); - kd.cldnnStyle.blockWidth = autoTune.blockWidth; + dispatchData.cldnnStyle.blockWidth = autoTune.blockWidth; auto x = out.X().v; auto y = out.Y().v; auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x, autoTune.blockWidth) * y; - kd.gws1 = Align(f, sub_group_size); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x, autoTune.blockWidth) * y; + dispatchData.gws[1] = Align(f, sub_group_size); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; if (b == 1) - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; else - kd.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; - return kd; + return dispatchData; } bool ConvolutionKernel_bfyx_to_bfyx_f16::Validate(const Params& p, const optional_params& o) const { @@ -124,12 +124,12 @@ bool ConvolutionKernel_bfyx_to_bfyx_f16::Validate(const Params& p, const optiona } JitConstants ConvolutionKernel_bfyx_to_bfyx_f16::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { auto input = params.inputs[0]; auto output = params.output; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.h index adf4d7513a207b..a1edefc84de7dc 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_b_fs_yx_fsv16.h @@ -48,7 +48,7 @@ class ConvolutionKernel_bfyx_to_bfyx_f16 : public ConvolutionKernelBase { bool NeedPaddedInput() const override { return false; } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; struct AutoTuneOption { size_t blockWidth; std::string exeMode; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp index 2ec407a00875b9..e2f73f03d28a84 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp @@ -51,12 +51,12 @@ ParamsKey ConvolutionKernel_bfyx_to_bfyx_bsv16_fsv16::GetSupportedKey() const { } ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_to_bfyx_bsv16_fsv16::SetDefault(const convolution_params& params, - int autoTuneIndex) const { - DispatchData kd = ConvolutionKernel_bfyx_to_bfyx_f16::SetDefault(params, autoTuneIndex); + int autoTuneIndex) const { + DispatchData dispatchData = ConvolutionKernel_bfyx_to_bfyx_f16::SetDefault(params, autoTuneIndex); - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } bool ConvolutionKernel_bfyx_to_bfyx_bsv16_fsv16::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.cpp index 69d9c5c8c14c83..b4db07753a0a97 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.cpp @@ -73,24 +73,24 @@ ConvolutionKernel_bfyx_to_fs_byx_fsv32::AutoTuneOption ConvolutionKernel_bfyx_to ConvolutionKernelBase::DispatchData ConvolutionKernel_bfyx_to_fs_byx_fsv32::SetDefault(const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); AutoTuneOption option = GetAutoTuneOptions(arg, autoTuneIndex); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - runInfo.cldnnStyle.blockHeight = option.blockHeight; - runInfo.cldnnStyle.blockWidth = option.blockWidth; + dispatchData.cldnnStyle.blockHeight = option.blockHeight; + dispatchData.cldnnStyle.blockWidth = option.blockWidth; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; - runInfo.gws0 = CeilDiv(arg.output.X().v, option.blockWidth); - runInfo.gws1 = CeilDiv(arg.output.Y().v, option.blockHeight); - runInfo.gws2 = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; + dispatchData.gws[0] = CeilDiv(arg.output.X().v, option.blockWidth); + dispatchData.gws[1] = CeilDiv(arg.output.Y().v, option.blockHeight); + dispatchData.gws[2] = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; - return runInfo; + return dispatchData; } bool ConvolutionKernel_bfyx_to_fs_byx_fsv32::Validate(const Params& p, const optional_params& o) const { @@ -107,16 +107,16 @@ bool ConvolutionKernel_bfyx_to_fs_byx_fsv32::Validate(const Params& p, const opt } JitConstants ConvolutionKernel_bfyx_to_fs_byx_fsv32::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", kd.cldnnStyle.blockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", kd.cldnnStyle.blockHeight)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); auto inputBlockWidth = - getInputSize(params.stride.x, params.filterSize.x, params.dilation.x, kd.cldnnStyle.blockWidth); + getInputSize(params.stride.x, params.filterSize.x, params.dilation.x, dispatchData.cldnnStyle.blockWidth); auto inputBlockHeight = - getInputSize(params.stride.y, params.filterSize.y, params.dilation.y, kd.cldnnStyle.blockHeight); + getInputSize(params.stride.y, params.filterSize.y, params.dilation.y, dispatchData.cldnnStyle.blockHeight); auto inputBlockWidthRound = RoundUp(inputBlockWidth, subGroupSize); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.h index 431034704ae8f0..4298f8882f6399 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_fs_byx_fsv32.h @@ -44,7 +44,7 @@ class ConvolutionKernel_bfyx_to_fs_byx_fsv32 : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.cpp index 9ae158ccfac612..87932436e4bfca 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.cpp @@ -111,25 +111,25 @@ ConvolutionKernel_fs_byx_fsv32::AutoTuneOption ConvolutionKernel_fs_byx_fsv32::G ConvolutionKernelBase::DispatchData ConvolutionKernel_fs_byx_fsv32::SetDefault(const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); AutoTuneOption option = GetAutoTuneOptions(arg, autoTuneIndex); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - runInfo.cldnnStyle.blockHeight = 1; - runInfo.cldnnStyle.blockWidth = option.blockWidth; - runInfo.cldnnStyle.inputBlockWidth = getInputWidth(arg, option.blockWidth); + dispatchData.cldnnStyle.blockHeight = 1; + dispatchData.cldnnStyle.blockWidth = option.blockWidth; + dispatchData.cldnnStyle.inputBlockWidth = getInputWidth(arg, option.blockWidth); - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; - runInfo.gws0 = CeilDiv(arg.output.X().v, option.blockWidth); - runInfo.gws1 = arg.output.Y().v; - runInfo.gws2 = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; + dispatchData.gws[0] = CeilDiv(arg.output.X().v, option.blockWidth); + dispatchData.gws[1] = arg.output.Y().v; + dispatchData.gws[2] = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; - return runInfo; + return dispatchData; } bool ConvolutionKernel_fs_byx_fsv32::Validate(const Params& p, const optional_params& o) const { @@ -150,15 +150,15 @@ bool ConvolutionKernel_fs_byx_fsv32::Validate(const Params& p, const optional_pa } JitConstants ConvolutionKernel_fs_byx_fsv32::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); auto accumulator_type = GetAccumulatorType(params); auto activation_type = GetAccumulatorType(params); jit.Merge(MakeTypeJitConstants(accumulator_type, "ACCUMULATOR")); jit.Merge(MakeTypeJitConstants(activation_type, "ACTIVATION")); - jit.AddConstant(MakeJitConstant("INPUT_BLOCK_WIDTH", kd.cldnnStyle.inputBlockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", kd.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("INPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.inputBlockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); jit.AddConstant(MakeJitConstant("FSV", fsv)); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", subGroupSize)); jit.AddConstant(MakeJitConstant("FSV_PER_THREAD", fsvPerThread)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.h index fd3f668a3ebfad..a873772204bddc 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32.h @@ -44,7 +44,7 @@ class ConvolutionKernel_fs_byx_fsv32 : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.cpp index 5533baa796d1a4..49e3c708d4638c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.cpp @@ -109,24 +109,24 @@ ConvolutionKernel_fs_byx_fsv32_1x1::AutoTuneOption ConvolutionKernel_fs_byx_fsv3 ConvolutionKernelBase::DispatchData ConvolutionKernel_fs_byx_fsv32_1x1::SetDefault(const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); AutoTuneOption option = GetAutoTuneOptions(arg, autoTuneIndex); - runInfo.efficiency = FORCE_PRIORITY_4; + dispatchData.efficiency = FORCE_PRIORITY_4; - runInfo.cldnnStyle.blockHeight = option.blockHeight; - runInfo.cldnnStyle.blockWidth = option.blockWidth; + dispatchData.cldnnStyle.blockHeight = option.blockHeight; + dispatchData.cldnnStyle.blockWidth = option.blockWidth; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; - runInfo.gws0 = CeilDiv(arg.output.X().v, option.blockWidth); - runInfo.gws1 = CeilDiv(arg.output.Y().v, option.blockHeight); - runInfo.gws2 = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; + dispatchData.gws[0] = CeilDiv(arg.output.X().v, option.blockWidth); + dispatchData.gws[1] = CeilDiv(arg.output.Y().v, option.blockHeight); + dispatchData.gws[2] = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; - return runInfo; + return dispatchData; } bool ConvolutionKernel_fs_byx_fsv32_1x1::Validate(const Params& p, const optional_params& o) const { @@ -150,11 +150,11 @@ bool ConvolutionKernel_fs_byx_fsv32_1x1::Validate(const Params& p, const optiona } JitConstants ConvolutionKernel_fs_byx_fsv32_1x1::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", kd.cldnnStyle.blockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", kd.cldnnStyle.blockHeight)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); jit.AddConstant(MakeJitConstant("FSV", fsv)); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", subGroupSize)); jit.AddConstant(MakeJitConstant("FSV_PER_THREAD", fsvPerThread)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.h index 5e822e26fc9ce8..cda0f51789f584 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_1x1.h @@ -45,7 +45,7 @@ class ConvolutionKernel_fs_byx_fsv32_1x1 : public ConvolutionKernelBase { bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.cpp index cbb399917355f0..37fc5c48424800 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.cpp @@ -109,26 +109,26 @@ ConvolutionKernel_fs_byx_fsv32_depthwise::AutoTuneOption ConvolutionKernel_fs_by } ConvolutionKernelBase::DispatchData ConvolutionKernel_fs_byx_fsv32_depthwise::SetDefault(const convolution_params& arg, - int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + int autoTuneIndex) const { + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); AutoTuneOption option = GetAutoTuneOptions(arg, autoTuneIndex); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - runInfo.cldnnStyle.blockHeight = 1; - runInfo.cldnnStyle.blockWidth = option.blockWidth; - runInfo.cldnnStyle.inputBlockWidth = getInputWidth(arg, option.blockWidth); + dispatchData.cldnnStyle.blockHeight = 1; + dispatchData.cldnnStyle.blockWidth = option.blockWidth; + dispatchData.cldnnStyle.inputBlockWidth = getInputWidth(arg, option.blockWidth); - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; - runInfo.gws0 = CeilDiv(arg.output.X().v, option.blockWidth); - runInfo.gws1 = arg.output.Y().v; - runInfo.gws2 = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; + dispatchData.gws[0] = CeilDiv(arg.output.X().v, option.blockWidth); + dispatchData.gws[1] = arg.output.Y().v; + dispatchData.gws[2] = CeilDiv(arg.output.Feature().v, 32) * 16 * arg.output.Batch().v; - return runInfo; + return dispatchData; } bool ConvolutionKernel_fs_byx_fsv32_depthwise::Validate(const Params& p, const optional_params& o) const { @@ -154,11 +154,11 @@ bool ConvolutionKernel_fs_byx_fsv32_depthwise::Validate(const Params& p, const o } JitConstants ConvolutionKernel_fs_byx_fsv32_depthwise::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("INPUT_BLOCK_WIDTH", kd.cldnnStyle.inputBlockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", kd.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("INPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.inputBlockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); jit.AddConstant(MakeJitConstant("FSV", fsv)); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", subGroupSize)); jit.AddConstant(MakeJitConstant("FSV_PER_THREAD", fsvPerThread)); @@ -178,8 +178,8 @@ JitConstants ConvolutionKernel_fs_byx_fsv32_depthwise::GetJitConstants(const con } KernelsData ConvolutionKernel_fs_byx_fsv32_depthwise::GetTunedKernelsDataByIndex(const Params& params, - const optional_params& options, - const int autoTuneIndex) const { + const optional_params& options, + const int autoTuneIndex) const { auto tuneOptions = GetAutoTuneOptions(params, autoTuneIndex); return GetCommonKernelsData(params, options, tuneOptions.exeMode, autoTuneIndex); } @@ -189,7 +189,7 @@ KernelsData ConvolutionKernel_fs_byx_fsv32_depthwise::GetKernelsData(const Param } KernelsData ConvolutionKernel_fs_byx_fsv32_depthwise::GetKernelsDataForAutoTune(const Params& params, - const optional_params& options) const { + const optional_params& options) const { if (!Validate(params, options)) { return {}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.h index 2d563f2f0a30b8..08914ec26b3d0a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_fs_byx_fsv32_depthwise.h @@ -44,7 +44,7 @@ class ConvolutionKernel_fs_byx_fsv32_depthwise : public ConvolutionKernelBase { } bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.cpp index bb4158f8fb27b4..8283fc5b8b1f2b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.cpp @@ -103,8 +103,8 @@ KernelsData ConvolutionKernel_imad::GetKernelsData(const Params& params, const o return GetCommonKernelsData(params, options); } -JitConstants ConvolutionKernel_imad::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); +JitConstants ConvolutionKernel_imad::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); const auto& input = params.inputs[0]; const auto& output = params.output; @@ -150,7 +150,7 @@ JitConstants ConvolutionKernel_imad::GetJitConstants(const convolution_params& p ConvolutionKernelBase::DispatchData ConvolutionKernel_imad::SetDefault(const convolution_params& params, int) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; const auto& weights = params.weights; @@ -158,34 +158,26 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_imad::SetDefault(const con size_t otw, oth; getOutBlock_WH(output.X().v, params.stride.x, weights.X().v, params.dilation.x, otw, oth); - std::vector global = {// number of tiles needed to cover output width - CeilDiv(output.X().v, otw), + dispatchData.gws = { // number of tiles needed to cover output width + CeilDiv(output.X().v, otw), - // number of tiles needed to cover output height - CeilDiv(output.Y().v, oth), + // number of tiles needed to cover output height + CeilDiv(output.Y().v, oth), - // round depth range up - Align(weights.OFM().v, SIMD_SIZE) * params.groups * output.Batch().v}; + // round depth range up + Align(weights.OFM().v, SIMD_SIZE) * params.groups * output.Batch().v }; - std::vector local = {1, 1, SIMD_SIZE}; + dispatchData.lws = {1, 1, SIMD_SIZE}; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - kd.cldnnStyle = {0, 0, 0, 0, 0}; - kd.gemmStyle = {0, 0, 0, 0, 0, 0}; + dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; + dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; // This kernel is quite slow for 1x1 and KHx1 kernels // TODO: check if we need any optimized kernels in this layout // If yes, we need to implement some customization for these cases. - kd.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - return kd; + return dispatchData; } // SetDefault bool ConvolutionKernel_imad::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.h index 37378c7f15f214..c20058dafefe1a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad.h @@ -32,7 +32,7 @@ class ConvolutionKernel_imad : public ConvolutionKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params &p) const override { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.cpp index adc69bf6462500..2b9c8fb6f1cba7 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.cpp @@ -120,7 +120,7 @@ bool ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::ValidateAutoTuneParams(const convo } ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::AutoTuneParams ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetAutoTuneParams(const convolution_params& params, - int index) const { + int index) const { AutoTuneParams tune_params; bool selected = false; if (index >= 0 && index < static_cast(all_tune_params.size())) { @@ -143,13 +143,13 @@ ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::AutoTuneParams ConvolutionKernel_imad_b } JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); - auto simd = kd.lws0; - auto features_per_wi = kd.cldnnStyle.blockHeight; - auto lwg_depth = kd.lws2; - auto force_prefetch = kd.cldnnStyle.prefetch == 1; + auto simd = dispatchData.lws[0]; + auto features_per_wi = dispatchData.cldnnStyle.blockHeight; + auto lwg_depth = dispatchData.lws[2]; + auto force_prefetch = dispatchData.cldnnStyle.prefetch == 1; mem_consts.AddConstant(MakeJitConstant("SIMD", simd)); mem_consts.AddConstant(MakeJitConstant("FEATURES_PER_WI", features_per_wi)); @@ -175,8 +175,8 @@ JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetJitConstants(const conv } // GetJitConstants ConvolutionKernelBase::DispatchData ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::SetDefault(const convolution_params& params, - int autoTuneIndex) const { - DispatchData kd; + int autoTuneIndex) const { + DispatchData dispatchData; auto& out = params.output; auto autoTuneParam = GetAutoTuneParams(params, autoTuneIndex); @@ -184,31 +184,23 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::Set auto simd = autoTuneParam.simd; auto features_per_wi = autoTuneParam.features_per_wi; - std::vector global = { RoundUp(out.X().v * out.Y().v, simd), CeilDiv(out.Feature().v, features_per_wi), out.Batch().v * lwg_depth }; - std::vector local = { simd, 1, lwg_depth}; + dispatchData.gws = { RoundUp(out.X().v * out.Y().v, simd), CeilDiv(out.Feature().v, features_per_wi), out.Batch().v * lwg_depth }; + dispatchData.lws = { simd, 1, lwg_depth}; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.gemmStyle = { 0, 0, 0, 0, 0, 0 }; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.cldnnStyle.blockHeight = features_per_wi; + dispatchData.cldnnStyle.blockWidth = simd; + dispatchData.cldnnStyle.prefetch = autoTuneParam.force_prefetch ? 1 : 0; - kd.gemmStyle = { 0, 0, 0, 0, 0, 0 }; + dispatchData.efficiency = FORCE_PRIORITY_1; - kd.cldnnStyle.blockHeight = features_per_wi; - kd.cldnnStyle.blockWidth = simd; - kd.cldnnStyle.prefetch = autoTuneParam.force_prefetch ? 1 : 0; - - kd.efficiency = FORCE_PRIORITY_1; - - return kd; + return dispatchData; } // SetDefault KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetTunedKernelsDataByIndex(const Params& params, - const optional_params& options, - int autoTuneIndex) const { + const optional_params& options, + int autoTuneIndex) const { auto convParams = static_cast(params); auto tuneParams = GetAutoTuneParams(convParams, autoTuneIndex); return GetCommonKernelsData(params, options, tuneParams.exeMode, autoTuneIndex); @@ -219,7 +211,7 @@ KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetKernelsData(const Params } KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_1x1::GetKernelsDataForAutoTune(const Params& params, - const optional_params& options) const { + const optional_params& options) const { if (!Validate(params, options)) { return {}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.h index 4d66e4679e4607..6cf7e8d7a39c64 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_1x1.h @@ -32,7 +32,7 @@ class ConvolutionKernel_imad_b_fs_yx_fsv4_1x1 : public ConvolutionKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp index 43b83e2681a9eb..1e1a229df17a13 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp @@ -273,8 +273,8 @@ ConvolutionKernel_imad_b_fs_yx_fsv4_dw::AutoTuneParams ConvolutionKernel_imad_b_ } JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); size_t filter_block_size = 4; size_t min_blocked_leftovers = 4; @@ -288,7 +288,7 @@ JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetJitConstants(const convo } mem_consts.AddConstant(MakeJitConstant("FILTER_BLOCKED", filter_blocked)); - auto& work_mode = kd.cldnnStyle.prefetch; + auto& work_mode = dispatchData.cldnnStyle.prefetch; bool tiled = (work_mode & mode::tiled) != 0; bool preload_input = (work_mode & mode::preload_input) != 0; bool preload_weights = (work_mode & mode::preload_weights) != 0; @@ -300,21 +300,21 @@ JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetJitConstants(const convo if (tiled) { preload_weights = true; - simd = kd.lws0; - tile_x = kd.cldnnStyle.blockWidth; - tile_y = kd.cldnnStyle.blockHeight; + simd = dispatchData.lws[0]; + tile_x = dispatchData.cldnnStyle.blockWidth; + tile_y = dispatchData.cldnnStyle.blockHeight; input_line_size = 1; output_block_x = 1; } else if (preload_input) { tile_x = 1; - tile_y = kd.cldnnStyle.blockHeight; - output_block_x = kd.cldnnStyle.blockWidth; + tile_y = dispatchData.cldnnStyle.blockHeight; + output_block_x = dispatchData.cldnnStyle.blockWidth; input_line_size = (output_block_x - 1) * params.stride.x + (params.weights.X().v - 1) * params.dilation.x + 1; } else { tile_x = 1; tile_y = 1; input_line_size = 1; - output_block_x = kd.cldnnStyle.blockWidth; + output_block_x = dispatchData.cldnnStyle.blockWidth; } mem_consts.AddConstant(MakeJitConstant("TILED", tiled)); @@ -345,7 +345,7 @@ JitConstants ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetJitConstants(const convo ConvolutionKernelBase::DispatchData ConvolutionKernel_imad_b_fs_yx_fsv4_dw::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd; + DispatchData dispatchData; auto& out = params.output; auto autoTuneParam = GetAutoTuneParams(params, autoTuneIndex); @@ -357,34 +357,26 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_imad_b_fs_yx_fsv4_dw::SetD global_x = global_x * autoTuneParam.tiled_simd; } - std::vector global = { global_x, global_y, CeilDiv(out.Feature().v, fsv) * out.Batch().v }; - std::vector local = { 1, 1, 1 }; + dispatchData.gws = { global_x, global_y, CeilDiv(out.Feature().v, fsv) * out.Batch().v }; + dispatchData.lws = { 1, 1, 1 }; if (autoTuneParam.tiled) { - local[0] = autoTuneParam.tiled_simd; + dispatchData.lws[0] = autoTuneParam.tiled_simd; } else { - local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); } - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.gemmStyle = { 0, 0, 0, 0, 0, 0 }; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.cldnnStyle.blockWidth = autoTuneParam.block_x; + dispatchData.cldnnStyle.blockHeight = autoTuneParam.block_y; + dispatchData.cldnnStyle.prefetch = (static_cast(autoTuneParam.tiled) * mode::tiled) + | (static_cast(autoTuneParam.preload_input) * mode::preload_input) + | (static_cast(autoTuneParam.preload_weights) * mode::preload_weights); - kd.gemmStyle = { 0, 0, 0, 0, 0, 0 }; + dispatchData.efficiency = FORCE_PRIORITY_1; - kd.cldnnStyle.blockWidth = autoTuneParam.block_x; - kd.cldnnStyle.blockHeight = autoTuneParam.block_y; - kd.cldnnStyle.prefetch = (static_cast(autoTuneParam.tiled) * mode::tiled) - | (static_cast(autoTuneParam.preload_input) * mode::preload_input) - | (static_cast(autoTuneParam.preload_weights) * mode::preload_weights); - - kd.efficiency = FORCE_PRIORITY_1; - - return kd; + return dispatchData; } // SetDefault KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetTunedKernelsDataByIndex(const Params& params, @@ -400,7 +392,7 @@ KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetKernelsData(const Params& } KernelsData ConvolutionKernel_imad_b_fs_yx_fsv4_dw::GetKernelsDataForAutoTune(const Params& params, - const optional_params& options) const { + const optional_params& options) const { if (!Validate(params, options)) { return {}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.hpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.hpp index 8021e7d3b65b90..06a21a559cbc63 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.hpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.hpp @@ -32,7 +32,7 @@ class ConvolutionKernel_imad_b_fs_yx_fsv4_dw : public ConvolutionKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return false; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.cpp index 4205064bb57e85..4a80120bc6abe3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.cpp @@ -57,8 +57,8 @@ KernelsData Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::GetKernelsData(con return GetCommonKernelsData(params, options); } -JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); +JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); FusedOpsConfiguration conf_scalar = {"", @@ -77,26 +77,18 @@ JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::GetJitConstants(c } // GetJitConstants ConvolutionKernelBase::DispatchData Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::SetDefault(const convolution_params& params, int) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; - std::vector global = {output.X().v, output.Y().v, output.Feature().v / 32 * output.Batch().v}; - std::vector local = {1, 1, SIMD_SIZE}; + dispatchData.gws = { output.X().v, output.Y().v, output.Feature().v / 32 * output.Batch().v }; + dispatchData.lws = { 1, 1, SIMD_SIZE}; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; + dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.efficiency = FORCE_PRIORITY_2; - kd.cldnnStyle = {0, 0, 0, 0, 0}; - kd.gemmStyle = {0, 0, 0, 0, 0, 0}; - - kd.efficiency = FORCE_PRIORITY_2; - - return kd; + return dispatchData; } // SetDefault bool Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.h index 20914735e9cacd..8f262803b32b9c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1.h @@ -32,7 +32,7 @@ class Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_1x1 : public ConvolutionKerne protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params&) const override { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.cpp index f87066a0408f25..fda27f2d80c7d3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.cpp @@ -57,8 +57,8 @@ KernelsData Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::GetKernelsData(con return GetCommonKernelsData(params, options); } -JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { - auto mem_consts = Parent::GetJitConstants(params, kd); +JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + auto mem_consts = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); @@ -77,26 +77,18 @@ JitConstants Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::GetJitConstants(c } // GetJitConstants ConvolutionKernelBase::DispatchData Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::SetDefault(const convolution_params& params, int) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; - std::vector global = {output.X().v, output.Y().v, output.Feature().v / 16 * output.Batch().v}; - std::vector local = {1, 1, SIMD_SIZE}; + dispatchData.gws = { output.X().v, output.Y().v, output.Feature().v / 16 * output.Batch().v }; + dispatchData.lws = { 1, 1, SIMD_SIZE }; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; + dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.efficiency = FORCE_PRIORITY_2; - kd.cldnnStyle = {0, 0, 0, 0, 0}; - kd.gemmStyle = {0, 0, 0, 0, 0, 0}; - - kd.efficiency = FORCE_PRIORITY_2; - - return kd; + return dispatchData; } // SetDefault bool Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.h index eaf7d0748e2e51..677ccfe8829e83 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3.h @@ -32,7 +32,7 @@ class Convolution_kernel_imad_bs_fs_yx_bsv16_fsv16_3x3 : public ConvolutionKerne protected: bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params&) const override { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.cpp index 01eefa02cbb9b1..cb109867c93b8c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.cpp @@ -96,46 +96,46 @@ ConvolutionKernel_mmad_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad_b_fs ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_b_fs_yx_fsv32::SetDefault(const convolution_params& cp, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; size_t ow_group = 8; while (ow_group > 1) { - if (CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth) % ow_group == 0) + if (CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth) % ow_group == 0) break; ow_group--; } - runInfo.gws0 = Align(cp.output.Feature().v, 32) / 4; - runInfo.gws1 = Align(CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth), ow_group) * cp.output.Y().v * cp.output.Z().v; - runInfo.gws2 = cp.output.Batch().v; + dispatchData.gws[0] = Align(cp.output.Feature().v, 32) / 4; + dispatchData.gws[1] = Align(CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth), ow_group) * cp.output.Y().v * cp.output.Z().v; + dispatchData.gws[2] = cp.output.Batch().v; - runInfo.lws0 = 8; - runInfo.lws1 = ow_group; - runInfo.lws2 = 1; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = ow_group; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_mmad_b_fs_yx_fsv32::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("OW_GROUP", runInfo.lws1)); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws0)); + jit.AddConstant(MakeJitConstant("OW_GROUP", dispatchData.lws[1])); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0])); jit.AddConstant(MakeJitConstant("OSV_SIZE", 32)); jit.AddConstant(MakeJitConstant("ISV_SIZE", 32)); - jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", runInfo.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", dispatchData.cldnnStyle.blockWidth)); jit.AddConstant(MakeJitConstant("IFM_BLOCKS", CeilDiv(params.inputs[0].Feature().v, 32))); auto input = params.inputs[0]; auto output = params.output; - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; size_t input_line_size = params.stride.x * (blockWidth - 1) + (params.weights.X().v - 1)*params.dilation.x + 1; jit.AddConstant(MakeJitConstant("OUTPUT_X_BLOCK_SIZE", blockWidth)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.h index 933f6c73bfd3af..d88972bc5fe7e5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32.h @@ -33,7 +33,7 @@ class ConvolutionKernel_mmad_b_fs_yx_fsv32 : public ConvolutionKernelBase { protected: bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; bool NeedPaddedInput() const override { return false; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.cpp index 35926d89412c24..862800a7713061 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.cpp @@ -75,28 +75,20 @@ bool ConvolutionKernel_mmad_b_fs_yx_fsv32_dw::Validate(const Params& p, const op ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_b_fs_yx_fsv32_dw::SetDefault(const convolution_params& cp, int /*autoTuneIndex*/) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - std::vector global = {cp.output.Feature().v, cp.output.X().v * cp.output.Y().v, cp.output.Batch().v}; + dispatchData.gws = { cp.output.Feature().v, cp.output.X().v * cp.output.Y().v, cp.output.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, cp.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - auto local = GetOptimalLocalWorkGroupSizes(global, cp.engineInfo); - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } // TODO: optimize this kernel JitConstants ConvolutionKernel_mmad_b_fs_yx_fsv32_dw::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.h index 1e1efb3c861825..26b3e45d1f41ee 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_b_fs_yx_fsv32_dw.h @@ -33,7 +33,7 @@ class ConvolutionKernel_mmad_b_fs_yx_fsv32_dw : public ConvolutionKernelBase { protected: bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::goiyx; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp index cf352d5a408971..551c2aa28c5f8d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp @@ -84,7 +84,7 @@ bool ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::Validate(const Params &p, con } ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetAutoTuneOptions(const Params &p, - int autoTuneIndex) const { + int autoTuneIndex) const { if ((autoTuneIndex >= 0) && (autoTuneIndex < static_cast(autoTuneOptions.size()))) { return autoTuneOptions[autoTuneIndex]; } @@ -150,50 +150,50 @@ static size_t get_lws(const convolution_params &cp, size_t blocks_count, size_t ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::SetDefault(const convolution_params &cp, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; const size_t max_lws = std::max((size_t)1, cp.engineInfo.maxWorkGroupSize / sub_group_size); - runInfo.gws0 = Align(cp.output.Feature().v, 32) / 2; - runInfo.gws1 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth); - runInfo.gws2 = cp.output.Batch().v * cp.output.Y().v * cp.output.Z().v; + dispatchData.gws[0] = Align(cp.output.Feature().v, 32) / 2; + dispatchData.gws[1] = CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth); + dispatchData.gws[2] = cp.output.Batch().v * cp.output.Y().v * cp.output.Z().v; - runInfo.lws0 = sub_group_size; - runInfo.lws1 = get_lws(cp, runInfo.gws1, tuneOptions.blockWidth, max_lws); - runInfo.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = get_lws(cp, dispatchData.gws[1], tuneOptions.blockWidth, max_lws); + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetJitConstants(const convolution_params ¶ms, - const DispatchData &runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData &dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws0)); - jit.AddConstant(MakeJitConstant("LWS0", runInfo.lws0)); - jit.AddConstant(MakeJitConstant("LWS1", runInfo.lws1)); - jit.AddConstant(MakeJitConstant("LWS2", runInfo.lws2)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0])); + jit.AddConstant(MakeJitConstant("LWS0", dispatchData.lws[0])); + jit.AddConstant(MakeJitConstant("LWS1", dispatchData.lws[1])); + jit.AddConstant(MakeJitConstant("LWS2", dispatchData.lws[2])); jit.AddConstant(MakeJitConstant("OSV", 32)); - jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", runInfo.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", dispatchData.cldnnStyle.blockWidth)); auto input = params.inputs[0]; auto output = params.output; - auto blockWidth = runInfo.cldnnStyle.blockWidth; - size_t slm_line_size = params.stride.x * (runInfo.lws1 * blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1; - size_t slm_chunk_size = slm_line_size / runInfo.lws1; - size_t slm_tail = slm_line_size % runInfo.lws1; - size_t slm_line_aligned = slm_chunk_size*runInfo.lws1 + Align(slm_tail, sub_group_size); + auto blockWidth = dispatchData.cldnnStyle.blockWidth; + size_t slm_line_size = params.stride.x * (dispatchData.lws[1] * blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1; + size_t slm_chunk_size = slm_line_size / dispatchData.lws[1]; + size_t slm_tail = slm_line_size % dispatchData.lws[1]; + size_t slm_line_aligned = slm_chunk_size*dispatchData.lws[1] + Align(slm_tail, sub_group_size); size_t input_line_size = params.stride.x * (blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1; jit.AddConstant(MakeJitConstant("INPUT_LINE_SIZE", input_line_size)); jit.AddConstant(MakeJitConstant("OUTPUT_X_BLOCK_SIZE", blockWidth)); - jit.AddConstant(MakeJitConstant("GROUP_SIZE", blockWidth * runInfo.lws1)); + jit.AddConstant(MakeJitConstant("GROUP_SIZE", blockWidth * dispatchData.lws[1])); jit.AddConstant(MakeJitConstant("SLM_LINE_SIZE", slm_line_aligned)); jit.AddConstant(MakeJitConstant("SLM_CHUNK_SIZE", slm_chunk_size)); jit.AddConstant(MakeJitConstant("SLM_TAIL", slm_tail)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h index 7ed3da4e5b0fc3..8bf8428c0bfa35 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h @@ -33,7 +33,7 @@ class ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32 : public ConvolutionKernelBas protected: bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &p) const override { if (p.output.GetDType() == Datatype::F16 || p.output.GetDType() == Datatype::F32 || diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.cpp index f9110ac9f7484f..456d9b17c1c2e5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.cpp @@ -87,39 +87,39 @@ ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4::AutoTuneOption ConvolutionKernel_mm } ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4::SetDefault(const convolution_params &cp, - int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp); + int autoTuneIndex) const { + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(cp); auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; - runInfo.gws0 = Align(cp.output.Feature().v, 32) / 2; - runInfo.gws1 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth) * cp.output.Y().v; - runInfo.gws2 = cp.output.Batch().v; + dispatchData.gws[0] = Align(cp.output.Feature().v, 32) / 2; + dispatchData.gws[1] = CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth) * cp.output.Y().v; + dispatchData.gws[2] = cp.output.Batch().v; - runInfo.lws0 = 16; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4::GetJitConstants(const convolution_params ¶ms, - const DispatchData &runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData &dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws0)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0])); jit.AddConstant(MakeJitConstant("OSV", 32)); jit.AddConstant(MakeJitConstant("ISV", 32)); - jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", runInfo.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", dispatchData.cldnnStyle.blockWidth)); jit.AddConstant(MakeJitConstant("IFM_BLOCKS", CeilDiv(params.inputs[0].Feature().v, 32))); auto input = params.inputs[0]; auto output = params.output; - auto blockWidth = runInfo.cldnnStyle.blockWidth; + auto blockWidth = dispatchData.cldnnStyle.blockWidth; size_t input_line_size = std::min(params.stride.x * (blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1, input.X().v + input.X().pad.Total()); @@ -149,7 +149,7 @@ KernelsData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4::GetKernelsData(const Pa } KernelsData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4::GetKernelsDataForAutoTune(const Params ¶ms, - const optional_params &options) const { + const optional_params &options) const { if (!Validate(params, options)) { return {}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h index 93c7a181b9ed01..aa8e59352a7f01 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h @@ -33,7 +33,7 @@ class ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv4 : public ConvolutionKernelBase protected: bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.cpp index 764f753cfd2722..da4b5cd1e032a4 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2016-2019 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -67,8 +67,8 @@ KernelsData ConvolutionKernel_Ref::GetKernelsData(const Params& params, const op return GetTunedKernelsDataByIndex(params, options); } -JitConstants ConvolutionKernel_Ref::GetJitConstants(const convolution_params& params, const DispatchData& kd) const { - JitConstants jit = ConvolutionKernelBase::GetJitConstants(params, kd); +JitConstants ConvolutionKernel_Ref::GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const { + JitConstants jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); Datatype accumulator_dt; Datatype activation_dt; @@ -100,7 +100,7 @@ JitConstants ConvolutionKernel_Ref::GetJitConstants(const convolution_params& pa ConvolutionKernelBase::DispatchData ConvolutionKernel_Ref::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); // FIXME: ConvolutionKernelBase::SetDefault should probably be pure and // not setting these at all as it's something specific to a concrete @@ -111,18 +111,9 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_Ref::SetDefault(const conv // Just set the correct value for a particular implementation here, // until the whole hierarchy is re-written. const auto& out = params.output; - std::vector global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; - - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - return kd; + dispatchData.gws = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + return dispatchData; } bool ConvolutionKernel_Ref::Validate(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.h index 5e27f6853db99a..eeb3a9c69359b9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_ref.h @@ -43,7 +43,7 @@ class ConvolutionKernel_Ref : public ConvolutionKernelBase { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; bool Validate(const Params& params, const optional_params& options) const override; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.cpp index ac920dea3f79be..f7f5637b3bac78 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.cpp @@ -44,8 +44,8 @@ ParamsKey ConvolutionKernel_Winograd_2x3_s1::GetSupportedKey() const { } JitConstants ConvolutionKernel_Winograd_2x3_s1::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const size_t input_tile_width = winograd_input_tile_width; const size_t input_tile_height = winograd_input_tile_height; @@ -70,10 +70,9 @@ JitConstants ConvolutionKernel_Winograd_2x3_s1::GetJitConstants(const convolutio return jit; } -ConvolutionKernel_Winograd_2x3_s1::Parent::DispatchData ConvolutionKernel_Winograd_2x3_s1::SetDefault( - const convolution_params& arg, - int) const { - Parent::DispatchData runInfo = Parent::SetDefault(arg); +ConvolutionKernel_Winograd_2x3_s1::Parent::DispatchData ConvolutionKernel_Winograd_2x3_s1::SetDefault(const convolution_params& arg, + int) const { + Parent::DispatchData dispatchData = Parent::SetDefault(arg); const size_t tile_n = winograd_tile_n; // goes in-depth const size_t tile_m = winograd_tile_m; // goes over flattened x and y @@ -86,17 +85,17 @@ ConvolutionKernel_Winograd_2x3_s1::Parent::DispatchData ConvolutionKernel_Winogr // width by tile's width to get tiles count const size_t nr_tiles_y = Align(arg.output.Y().v, 8) / input_tile_height; - runInfo.gws0 = arg.output.Feature().v / tile_n; - runInfo.gws1 = nr_tiles_x * nr_tiles_y / tile_m; - runInfo.gws2 = input_tile_width * input_tile_height * arg.inputs[0].Batch().v; + dispatchData.gws[0] = arg.output.Feature().v / tile_n; + dispatchData.gws[1] = nr_tiles_x * nr_tiles_y / tile_m; + dispatchData.gws[2] = input_tile_width * input_tile_height * arg.inputs[0].Batch().v; - runInfo.lws0 = 8; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - runInfo.efficiency = FORCE_PRIORITY_4; + dispatchData.efficiency = FORCE_PRIORITY_4; - return runInfo; + return dispatchData; } bool ConvolutionKernel_Winograd_2x3_s1::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.h index e9f032a6a2100c..66d7b5066029b6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1.h @@ -34,8 +34,8 @@ class ConvolutionKernel_Winograd_2x3_s1 : public ConvolutionKernelBase { return WeightsLayout::winograd_2x3_s1_weights; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.cpp index 43db00b35ea121..b24d05bfa72ef7 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.cpp @@ -38,8 +38,8 @@ ParamsKey ConvolutionKernel_Winograd_2x3_s1_fused::GetSupportedKey() const { } JitConstants ConvolutionKernel_Winograd_2x3_s1_fused::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto idepth = params.inputs[0].Feature().v; const auto input_pad_y = params.inputs[0].Y().pad.before + params.inputs[0].Y().pad.after; @@ -83,7 +83,7 @@ JitConstants ConvolutionKernel_Winograd_2x3_s1_fused::GetJitConstants(const conv ConvolutionKernel_Winograd_2x3_s1_fused::Parent::DispatchData ConvolutionKernel_Winograd_2x3_s1_fused::SetDefault( const convolution_params& arg, int) const { - Parent::DispatchData runInfo = Parent::SetDefault(arg); + Parent::DispatchData dispatchData = Parent::SetDefault(arg); const auto odepth = arg.output.Feature().v; const auto input_pad_y = arg.inputs[0].Y().pad.before + arg.inputs[0].Y().pad.after; @@ -100,21 +100,21 @@ ConvolutionKernel_Winograd_2x3_s1_fused::Parent::DispatchData ConvolutionKernel_ auto K = odepth; auto N = 1; - uint32_t global_step[3] = {14, 4, 16 * 8}; - uint32_t local_size[3] = {8, 2, 8}; + size_t global_step[3] = {14, 4, 16 * 8}; + size_t local_size[3] = {8, 2, 8}; - uint32_t zStep = local_size[2]; - runInfo.gws0 = ((uint32_t)((Q + global_step[0] - 1)) / global_step[0]) * local_size[0]; - runInfo.gws1 = ((uint32_t)((P + global_step[1] - 1)) / global_step[1]) * local_size[1]; - runInfo.gws2 = ((uint32_t)((N * K * 8 + global_step[2] - 1)) / global_step[2]) * zStep; + size_t zStep = local_size[2]; + dispatchData.gws[0] = ((size_t)((Q + global_step[0] - 1)) / global_step[0]) * local_size[0]; + dispatchData.gws[1] = ((size_t)((P + global_step[1] - 1)) / global_step[1]) * local_size[1]; + dispatchData.gws[2] = ((size_t)((N * K * 8 + global_step[2] - 1)) / global_step[2]) * zStep; - runInfo.lws0 = local_size[0]; - runInfo.lws1 = local_size[1]; - runInfo.lws2 = local_size[2]; + dispatchData.lws[0] = local_size[0]; + dispatchData.lws[1] = local_size[1]; + dispatchData.lws[2] = local_size[2]; - runInfo.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return runInfo; + return dispatchData; } bool ConvolutionKernel_Winograd_2x3_s1_fused::Validate(const Params& p, const optional_params& o) const { @@ -141,4 +141,4 @@ KernelsData ConvolutionKernel_Winograd_2x3_s1_fused::GetKernelsData(const Params const optional_params& options) const { return GetTunedKernelsDataByIndex(params, options); } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.h index 269bd19f4dbaa0..6cfdbde878e802 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_2x3_s1_fused.h @@ -34,8 +34,8 @@ class ConvolutionKernel_Winograd_2x3_s1_fused : public ConvolutionKernelBase { return WeightsLayout::winograd_2x3_s1_fused_weights; } - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.cpp index 61be9ab926274e..42fc634527b74f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.cpp @@ -39,8 +39,8 @@ ParamsKey ConvolutionKernel_Winograd_6x3_s1_fused::GetSupportedKey() const { } JitConstants ConvolutionKernel_Winograd_6x3_s1_fused::GetJitConstants(const convolution_params& params, - const DispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto idepth = params.inputs[0].Feature().v; const auto input_pad_y = params.inputs[0].Y().pad.before + params.inputs[0].Y().pad.after; @@ -95,7 +95,7 @@ WeightsLayout ConvolutionKernel_Winograd_6x3_s1_fused::GetPreferredWeightsLayout ConvolutionKernel_Winograd_6x3_s1_fused::Parent::DispatchData ConvolutionKernel_Winograd_6x3_s1_fused::SetDefault( const convolution_params& arg, int) const { - Parent::DispatchData runInfo = Parent::SetDefault(arg); + Parent::DispatchData dispatchData = Parent::SetDefault(arg); const auto odepth = arg.output.Feature().v; const auto input_pad_y = arg.inputs[0].Y().pad.before + arg.inputs[0].Y().pad.after; @@ -115,17 +115,17 @@ ConvolutionKernel_Winograd_6x3_s1_fused::Parent::DispatchData ConvolutionKernel_ uint32_t global_step[3] = {14, 6, 16 * 8}; uint32_t local_size[3] = {16, 1, 8}; - runInfo.gws0 = ((uint32_t)((Q + global_step[0] - 1)) / global_step[0]) * local_size[0]; - runInfo.gws1 = ((uint32_t)((P + global_step[1] - 1)) / global_step[1]) * local_size[1]; - runInfo.gws2 = ((uint32_t)((N * K * 8 + global_step[2] - 1)) / global_step[2]) * local_size[2]; + dispatchData.gws[0] = ((uint32_t)((Q + global_step[0] - 1)) / global_step[0]) * local_size[0]; + dispatchData.gws[1] = ((uint32_t)((P + global_step[1] - 1)) / global_step[1]) * local_size[1]; + dispatchData.gws[2] = ((uint32_t)((N * K * 8 + global_step[2] - 1)) / global_step[2]) * local_size[2]; - runInfo.lws0 = local_size[0]; - runInfo.lws1 = local_size[1]; - runInfo.lws2 = local_size[2]; + dispatchData.lws[0] = local_size[0]; + dispatchData.lws[1] = local_size[1]; + dispatchData.lws[2] = local_size[2]; - runInfo.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return runInfo; + return dispatchData; } bool ConvolutionKernel_Winograd_6x3_s1_fused::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.h index 6c4a522137178f..c5c657cfa0f7f8 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_winograd_6x3_s1_fused.h @@ -30,9 +30,9 @@ class ConvolutionKernel_Winograd_6x3_s1_fused : public ConvolutionKernelBase { ParamsKey GetSupportedKey() const override; protected: - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.cpp index d67b61ffab0acc..541ca75cfcb880 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.cpp @@ -77,7 +77,7 @@ size_t GetOfmPerWorkitem(Datatype dataType) { ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b16::SetDefault(const convolution_params& arg, int) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); const auto filter_ofm_num = arg.weights.OFM().v * arg.weights.G().v; const auto batch_size = arg.output.Batch().v; @@ -87,15 +87,15 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b16::SetDefault( const size_t ofmPerWorkItem = GetOfmPerWorkitem(arg.inputs[0].GetDType()); if (arg.inputs[0].GetDType() == Datatype::F16) { - runInfo.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; } else { - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; } - runInfo.lws0 = min_lws; - runInfo.gws0 = filter_ofm_num * batch_size / (ofmPerWorkItem * batchesPerWorkItem); + dispatchData.lws[0] = min_lws; + dispatchData.gws[0] = filter_ofm_num * batch_size / (ofmPerWorkItem * batchesPerWorkItem); - return runInfo; + return dispatchData; } bool ConvolutionKernel_yxfb_yxio_b16::Validate(const Params& p, const optional_params& o) const { @@ -140,10 +140,10 @@ bool ConvolutionKernel_yxfb_yxio_b16::Validate(const Params& p, const optional_p } JitConstants ConvolutionKernel_yxfb_yxio_b16::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto jit = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - const auto local_work_group_size = kd.lws0; + const auto local_work_group_size = dispatchData.lws[0]; const auto batch_size = params.output.Batch().v; if (params.inputs[0].GetDType() == Datatype::F32) { @@ -168,7 +168,7 @@ JitConstants ConvolutionKernel_yxfb_yxio_b16::GetJitConstants(const convolution_ const size_t ofmPerWorkItem = GetOfmPerWorkitem(params.inputs[0].GetDType()); jit.AddConstants({ - MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0), + MakeJitConstant("LOCAL_WORK_GROUP_SIZE", dispatchData.lws[0]), MakeJitConstant("OFM_PER_WORK_ITEM", ofmPerWorkItem), MakeJitConstant("BATCHES_PER_WORK_ITEM", batchesPerWorkItem), // how many batches will a single work item compute diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.h index c57db65a39fb32..e1c3aa122b7382 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b16.h @@ -36,7 +36,7 @@ class ConvolutionKernel_yxfb_yxio_b16 : public ConvolutionKernelBase { } std::string GetKernelName(const convolution_params&) const override; bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.cpp index 5d4b1e1c80c09f..39f42b033a2b90 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,16 +38,16 @@ ParamsKey ConvolutionKernel_yxfb_yxio_b1_block::GetSupportedKey() const { ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b1_block::SetDefault(const convolution_params& arg, int) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg); // TODO: fill the proper data here (I don't know where can I locate it). - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_yxfb_yxio_b1_block::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto cldnn_jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto cldnn_jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); - cldnn_jit.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0)); + cldnn_jit.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", dispatchData.lws[0])); return cldnn_jit; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.h index e2740923125e62..e7b1aa9be35be6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block.h @@ -29,10 +29,10 @@ class ConvolutionKernel_yxfb_yxio_b1_block : public ConvolutionKernelBase { ParamsKey GetSupportedKey() const override; protected: - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::yxio; } DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.cpp index c2e7b827faed83..c690b49eb471bc 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -51,12 +51,12 @@ size_t GetOfmPerWorkitem(size_t filter_ofm_num, size_t localWorkSize) { ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b1_block_mulitple_x::SetDefault( const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg, autoTuneIndex); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg, autoTuneIndex); const auto filter_ofm_num = arg.weights.OFM().v; const auto batch_size = arg.output.Batch().v; - runInfo.lws0 = local_work_size; + dispatchData.lws[0] = local_work_size; // We cannot return 8 because we are processing 4 spatial coordinates for batch1, // and if we use more than 4 ofm_per_work_item we downgrade simd16 to simd8 which would break this algorithm. @@ -65,28 +65,28 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b1_block_mulitpl // TODO: experiment with SIMD8 version of algorithm and check if it could be faster /*if (output_feature_count % (lws * 8) == 0) { - run_info.ofm_per_work_item = 8; - run_info.gws1 = static_cast(std::ceil(static_cast(run_info.gws1) / 2.0f)); + dispatchData.ofm_per_work_item = 8; + dispatchData.gws[1] = static_cast(std::ceil(static_cast(dispatchData.gws[1]) / 2.0f)); } else*/ const size_t ofmPerWorkItem = GetOfmPerWorkitem(filter_ofm_num, local_work_size); if (ofmPerWorkItem == 4) { // We compute multiple spatial coordinates "x" in a single workitem that's why we must divide - runInfo.gws1 = static_cast(std::ceil(static_cast(runInfo.gws1) / 4.0f)); + dispatchData.gws[1] = static_cast(std::ceil(static_cast(dispatchData.gws[1]) / 4.0f)); } else if (ofmPerWorkItem == 2) { - runInfo.gws1 = static_cast(std::ceil(static_cast(runInfo.gws1) / 8.0f)); + dispatchData.gws[1] = static_cast(std::ceil(static_cast(dispatchData.gws[1]) / 8.0f)); } else { - runInfo.gws1 = static_cast(std::ceil(static_cast(runInfo.gws1) / 8.0f)); + dispatchData.gws[1] = static_cast(std::ceil(static_cast(dispatchData.gws[1]) / 8.0f)); } - runInfo.gws0 = filter_ofm_num * batch_size / ofmPerWorkItem; + dispatchData.gws[0] = filter_ofm_num * batch_size / ofmPerWorkItem; - return runInfo; + return dispatchData; } JitConstants ConvolutionKernel_yxfb_yxio_b1_block_mulitple_x::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - auto cldnn_jit = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto cldnn_jit = ConvolutionKernelBase::GetJitConstants(params, dispatchData); size_t ofmPerWorkItem = GetOfmPerWorkitem(params.weights.OFM().v, local_work_size); cldnn_jit.AddConstant(MakeJitConstant("USE_VECTOR", ofmPerWorkItem)); @@ -101,7 +101,7 @@ JitConstants ConvolutionKernel_yxfb_yxio_b1_block_mulitple_x::GetJitConstants(co cldnn_jit.AddConstant(MakeJitConstant( "OFM_PER_WORK_ITEM", ofmPerWorkItem)); // how many output feature maps for a single batch will a single work item produce - cldnn_jit.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0)); + cldnn_jit.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", dispatchData.lws[0])); return cldnn_jit; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.h index 6d91c0c6d5c8f2..e1ff764bf19a46 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b1_block_multiple_x.h @@ -34,7 +34,7 @@ class ConvolutionKernel_yxfb_yxio_b1_block_mulitple_x : public ConvolutionKernel return WeightsLayout::yxio; } bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.cpp index f8733798db5b2a..1d573c4f61da6d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.cpp @@ -48,22 +48,22 @@ size_t GetOfmPerWorkitem(size_t filterOfmNum, size_t batchSize, size_t local_wor ConvolutionKernelBase::DispatchData ConvolutionKernel_yxfb_yxio_b8::SetDefault(const convolution_params& arg, int autoTuneIndex) const { - DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg, autoTuneIndex); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(arg, autoTuneIndex); const auto filterOfmNum = arg.weights.OFM().v; const auto batchSize = arg.output.Batch().v; - runInfo.lws0 = batchSize == 8 ? 8 : 16; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = batchSize == 8 ? 8 : 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - size_t ofmPerWorkItem = GetOfmPerWorkitem(filterOfmNum, batchSize, runInfo.lws0); + size_t ofmPerWorkItem = GetOfmPerWorkitem(filterOfmNum, batchSize, dispatchData.lws[0]); - runInfo.gws0 = filterOfmNum * batchSize / ofmPerWorkItem; + dispatchData.gws[0] = filterOfmNum * batchSize / ofmPerWorkItem; - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; - return runInfo; + return dispatchData; } bool ConvolutionKernel_yxfb_yxio_b8::Validate(const Params& p, const optional_params& o) const { @@ -99,13 +99,13 @@ bool ConvolutionKernel_yxfb_yxio_b8::Validate(const Params& p, const optional_pa } JitConstants ConvolutionKernel_yxfb_yxio_b8::GetJitConstants(const convolution_params& params, - const DispatchData& kd) const { - JitConstants jits = ConvolutionKernelBase::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + JitConstants jits = ConvolutionKernelBase::GetJitConstants(params, dispatchData); - size_t ofmPerWorkItem = GetOfmPerWorkitem(params.weights.OFM().v, params.output.Batch().v, kd.lws0); + size_t ofmPerWorkItem = GetOfmPerWorkitem(params.weights.OFM().v, params.output.Batch().v, dispatchData.lws[0]); jits.AddConstant(MakeJitConstant("OFM_PER_WORK_ITEM", ofmPerWorkItem)); - jits.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0)); + jits.AddConstant(MakeJitConstant("LOCAL_WORK_GROUP_SIZE", dispatchData.lws[0])); return jits; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.h index c4f8f3b8da0f7f..669ec8f6b391fd 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_yxfb_yxio_b8.h @@ -29,11 +29,11 @@ class ConvolutionKernel_yxfb_yxio_b8 : public ConvolutionKernelBase { ParamsKey GetSupportedKey() const override; protected: - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override { return WeightsLayout::yxio; } bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.cpp index a5687d6c867ea0..106bac57de2c0e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.cpp @@ -48,7 +48,7 @@ ParamsKey DeformableConvolutionKernel_bfyx_conv::GetSupportedKey() const { DeformableConvolutionKernel_bfyx_conv::DispatchData DeformableConvolutionKernel_bfyx_conv::SetDefault(const convolution_params& params, int autoTuneIndex) const { - DispatchData kd = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); + DispatchData dispatchData = ConvolutionKernelBase::SetDefault(params, autoTuneIndex); const auto& out = params.output; @@ -57,21 +57,21 @@ DeformableConvolutionKernel_bfyx_conv::DispatchData DeformableConvolutionKernel_ auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x * y, 16); - kd.gws1 = Align(f, 16); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x * y, 16); + dispatchData.gws[1] = Align(f, 16); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = 16; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 16; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } JitConstants DeformableConvolutionKernel_bfyx_conv::GetJitConstants(const convolution_params& params, - const DispatchData& /*kd*/) const { + const DispatchData& /*dispatchData*/) const { JitConstants jit = WeightBiasKernelBase::GetJitConstants(params); jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", 16)); jit.AddConstant(MakeJitConstant("INPUT_CHANNELS", params.inputs[0].Feature().v / params.weights.X().v / params.weights.Y().v)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.h index eb0eb06531718e..330874ff065ac3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_conv.h @@ -30,7 +30,7 @@ class DeformableConvolutionKernel_bfyx_conv : public ConvolutionKernelBase { protected: DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; ParamsKey GetSupportedKey() const override; WeightsLayout GetPreferredWeightsLayout(const convolution_params&) const override { return WeightsLayout::os_is_yx_isv16_osv16; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_interp.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_interp.cpp index 26fc7791d7c9c9..ff2c3f41dbc221 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_interp.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/deformable_convolution_kernel_bfyx_interp.cpp @@ -45,7 +45,7 @@ ParamsKey DeformableConvolutionKernel_bfyx_interp::GetSupportedKey() const { } CommonDispatchData DeformableConvolutionKernel_bfyx_interp::SetDefault(const convolution_params& params) const { - CommonDispatchData kd; + CommonDispatchData dispatchData; const auto& out = params.output; @@ -54,17 +54,17 @@ CommonDispatchData DeformableConvolutionKernel_bfyx_interp::SetDefault(const con auto b = out.Batch().v; auto kernel_size = params.kernelSize.x * params.kernelSize.y; - kd.gws0 = Align(x * y, 16); - kd.gws1 = params.deformable_groups * b; - kd.gws2 = kernel_size; + dispatchData.gws[0] = Align(x * y, 16); + dispatchData.gws[1] = params.deformable_groups * b; + dispatchData.gws[2] = kernel_size; - kd.lws0 = 16; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } @@ -91,14 +91,14 @@ KernelsData DeformableConvolutionKernel_bfyx_interp::GetKernelsData(const Params KernelData kd = KernelData::Default(params); convolution_params& newParams = *static_cast(kd.params.get()); - CommonDispatchData runInfo = SetDefault(newParams); + CommonDispatchData dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, DEFAULT, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, static_cast(newParams.inputs.size())); return {kd}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp index 8f5384acf3bdab..2688edb6d6757c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp @@ -33,26 +33,17 @@ JitConstants CTCGreedyDecoderKernelBase::GetJitConstants(const ctc_greedy_decode } CTCGreedyDecoderKernelBase::DispatchData CTCGreedyDecoderKernelBase::SetDefault(const ctc_greedy_decoder_params& params) const { - DispatchData kd; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + DispatchData dispatchData; - std::vector global = { 1, 1, 1 }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { 1, 1, 1 }; + dispatchData.lws= GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData CTCGreedyDecoderKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options, - float estimated_time) const { + const optional_params& options, + float estimated_time) const { assert(params.GetType() == KernelType::CTC_GREEDY_DECODER); if (!Validate(params, options)) @@ -60,19 +51,17 @@ KernelsData CTCGreedyDecoderKernelBase::GetCommonKernelsData(const Params& param const ctc_greedy_decoder_params& orgParams = static_cast(params); - DispatchData runInfo; - - runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); - auto cldnn_jit = GetJitConstants(orgParams, runInfo); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData); auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h index 9c14de1f2323f8..0181bce32f2c95 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h @@ -44,7 +44,7 @@ class CTCGreedyDecoderKernelBase : public common_kernel_base { using DispatchData = CommonDispatchData; protected: - virtual JitConstants GetJitConstants(const ctc_greedy_decoder_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const ctc_greedy_decoder_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const ctc_greedy_decoder_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimated_time) const; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.cpp index 43a3cecccefa35..3941259bb03d3d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.cpp @@ -83,22 +83,13 @@ JitConstants CumSumKernelBase::GetJitConstants(const cum_sum_params& params, Dis } CumSumKernelBase::DispatchData CumSumKernelBase::SetDefault(const cum_sum_params& params) const { - DispatchData runInfo; - std::vector global = {params.output.Batch().v, - params.output.Feature().v * params.output.W().v, - params.output.Z().v * params.output.Y().v * params.output.X().v}; + DispatchData dispatchData; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v * params.output.W().v, + params.output.Z().v * params.output.Y().v * params.output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } KernelsData CumSumKernelBase::GetCommonKernelsData(const Params& params, @@ -111,14 +102,14 @@ KernelsData CumSumKernelBase::GetCommonKernelsData(const Params& params, return {}; } - auto runInfo = SetDefault(newParams); + auto dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); - auto cldnn_jit = GetJitConstants(newParams, runInfo); + auto cldnn_jit = GetJitConstants(newParams, dispatchData); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.h index 920551f30bc1cd..4ec34bbe66cb2c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_base.h @@ -56,7 +56,7 @@ class CumSumKernelBase : public common_kernel_base { int32_t GetCumSumAxisIndex(const cum_sum_params& params) const; size_t GetRealAxisIndex(const cum_sum_params& params) const; ParamsKey GetSupportedKey() const override; - virtual JitConstants GetJitConstants(const cum_sum_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const cum_sum_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const cum_sum_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimatedTime) const; bool Validate(const Params&, const optional_params&) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.cpp index 89d91b47fb6535..2a2a2a27fe69d1 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.cpp @@ -25,15 +25,15 @@ namespace kernel_selector { static constexpr size_t simd = 16; static constexpr size_t BLOCK_SIZE = 16; -JitConstants CumSumKernelPartialSum::GetJitConstants(const cum_sum_params& params, DispatchData kd) const { - auto jits = CumSumKernelBase::GetJitConstants(params, kd); +JitConstants CumSumKernelPartialSum::GetJitConstants(const cum_sum_params& params, DispatchData dispatchData) const { + auto jits = CumSumKernelBase::GetJitConstants(params, dispatchData); auto activation_dt = GetActivationType(params); jits.Merge(MakeTypeJitConstants(activation_dt, "PARTIAL")); jits.AddConstant(MakeJitConstant("SIMD", simd)); - jits.AddConstant(MakeJitConstant("LWS", kd.lws0)); + jits.AddConstant(MakeJitConstant("LWS", dispatchData.lws[0])); jits.AddConstant(MakeJitConstant("BLOCK_SIZE", BLOCK_SIZE)); - jits.AddConstant(MakeJitConstant("SUM_ITEMS_NUM", kd.sum_items_num)); + jits.AddConstant(MakeJitConstant("SUM_ITEMS_NUM", dispatchData.sum_items_num)); return jits; } @@ -48,15 +48,15 @@ KernelsData CumSumKernelPartialSum::GetMultiStageKernelsData(const Params& param KernelData kd = KernelData::Default(params, kernels_num); const cum_sum_params& newParams = *static_cast(kd.params.get()); - auto runInfo = SetDefaultForMulti(newParams); + auto dispatchData = SetDefaultForMulti(newParams); { // partial sum - auto cldnn_jit = GetJitConstants(newParams, runInfo.stage_1); + auto cldnn_jit = GetJitConstants(newParams, dispatchData.stage_1); cldnn_jit.AddConstant(MakeJitConstant("CUM_SUM_PARTIAL_SUM", 1)); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo.stage_1, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData.stage_1, params.engineInfo, kernelName, jit, entry_point); kernel.arguments.clear(); // Clear original output argument kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 0}); kernel.arguments.push_back({ArgumentDescriptor::Types::INTERNAL_BUFFER, 0}); @@ -65,12 +65,12 @@ KernelsData CumSumKernelPartialSum::GetMultiStageKernelsData(const Params& param { // Final auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); - auto cldnn_jit = GetJitConstants(newParams, runInfo.stage_final); + auto cldnn_jit = GetJitConstants(newParams, dispatchData.stage_final); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[1]; - FillCLKernelData(kernel, runInfo.stage_final, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData.stage_final, params.engineInfo, kernelName, jit, entry_point); kernel.arguments.clear(); // Clear original output argument kernel.arguments.push_back({ArgumentDescriptor::Types::INTERNAL_BUFFER, 0}); @@ -83,7 +83,7 @@ KernelsData CumSumKernelPartialSum::GetMultiStageKernelsData(const Params& param } CumSumKernelPartialSum::MultiDispatchData CumSumKernelPartialSum::SetDefaultForMulti(const cum_sum_params& params) const { - MultiDispatchData md; + MultiDispatchData dispatchData; std::vector dims = {params.output.Batch().v, params.output.Feature().v, params.output.W().v, @@ -108,23 +108,19 @@ CumSumKernelPartialSum::MultiDispatchData CumSumKernelPartialSum::SetDefaultForM } } - md.stage_1.gws0 = Align(gws[0], BLOCK_SIZE); - md.stage_1.gws1 = gws[1]; - md.stage_1.gws2 = gws[2]; - md.stage_1.lws0 = BLOCK_SIZE; - md.stage_1.lws1 = 1; - md.stage_1.lws2 = 1; - md.stage_1.sum_items_num = items_num; - - md.stage_final.gws0 = gws[0]; - md.stage_final.gws1 = gws[1]; - md.stage_final.gws2 = gws[2]; - md.stage_final.lws0 = 1; - md.stage_final.lws1 = 1; - md.stage_final.lws2 = 1; - md.stage_final.sum_items_num = Align(items_num, BLOCK_SIZE); - - return md; + dispatchData.stage_1.gws[0] = Align(gws[0], BLOCK_SIZE); + dispatchData.stage_1.gws[1] = gws[1]; + dispatchData.stage_1.gws[2] = gws[2]; + dispatchData.stage_1.lws[0] = BLOCK_SIZE; + dispatchData.stage_1.lws[1] = 1; + dispatchData.stage_1.lws[2] = 1; + dispatchData.stage_1.sum_items_num = items_num; + + dispatchData.stage_final.gws = gws; + dispatchData.stage_final.lws = { 1, 1, 1 }; + dispatchData.stage_final.sum_items_num = Align(items_num, BLOCK_SIZE); + + return dispatchData; } KernelsData CumSumKernelPartialSum::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.h index d092e8236cbbb2..40330cd945e088 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_partial_sum.h @@ -29,7 +29,7 @@ class CumSumKernelPartialSum : public CumSumKernelBase { DispatchData stage_final; }; - JitConstants GetJitConstants(const cum_sum_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const cum_sum_params& params, DispatchData dispatchData) const override; KernelsData GetMultiStageKernelsData(const Params& params, const optional_params&, float estimated_time) const; MultiDispatchData SetDefaultForMulti(const cum_sum_params& params) const; KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.cpp index fe05d263ad6101..ee719854598030 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.cpp @@ -20,8 +20,8 @@ #include namespace kernel_selector { -JitConstants CumSumKernelRef::GetJitConstants(const cum_sum_params& params, DispatchData kd) const { - auto jits = CumSumKernelBase::GetJitConstants(params, kd); +JitConstants CumSumKernelRef::GetJitConstants(const cum_sum_params& params, DispatchData dispatchData) const { + auto jits = CumSumKernelBase::GetJitConstants(params, dispatchData); jits.AddConstant(MakeJitConstant("AXIS_LAYOUT_INDEX", GetCumSumAxisIndex(params))); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.h index 58099d7470ec05..4273653050e0c2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/cum_sum/cum_sum_kernel_ref.h @@ -24,7 +24,7 @@ class CumSumKernelRef : public CumSumKernelBase { CumSumKernelRef() : CumSumKernelBase("cum_sum_ref") {} virtual ~CumSumKernelRef() = default; protected: - JitConstants GetJitConstants(const cum_sum_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const cum_sum_params& params, DispatchData dispatchData) const override; KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp index dbbc4a9538c480..16f83ac34da4e2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp @@ -48,7 +48,7 @@ ParamsKey DeconvolutionKernel_b_fs_zyx_fsv16::GetSupportedKey() const { } DeconvolutionKernelBase::DispatchData DeconvolutionKernel_b_fs_zyx_fsv16::SetDefault(const deconvolution_params& params) const { - DispatchData kd = DeconvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = DeconvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -63,25 +63,26 @@ DeconvolutionKernelBase::DispatchData DeconvolutionKernel_b_fs_zyx_fsv16::SetDef if (ver_bsv16_fsv16) { if (params.depthwise_separable_opt) { - kd.gws0 = x * y * z; - kd.gws1 = f; - kd.gws2 = b / 16; + dispatchData.gws[0] = x * y * z; + dispatchData.gws[1] = f; + dispatchData.gws[2] = b / 16; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; } else { - kd.gws0 = 64; - while (kd.gws0 > 16) { - if (f % kd.gws0 == 0) break; - kd.gws0 /= 2; + dispatchData.gws[0] = 64; + while (dispatchData.gws[0] > 16) { + if (f % dispatchData.gws[0] == 0) + break; + dispatchData.gws[0] /= 2; } - kd.gws1 = x * y * z; - kd.gws2 = CeilDiv(b, 16) * (f / kd.gws0) * params.groups; + dispatchData.gws[1] = x * y * z; + dispatchData.gws[2] = CeilDiv(b, 16) * (f / dispatchData.gws[0]) * params.groups; - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } } else { size_t x_block_size = 16; @@ -92,31 +93,32 @@ DeconvolutionKernelBase::DispatchData DeconvolutionKernel_b_fs_zyx_fsv16::SetDef } x_block_size = std::max(x_block_size, (size_t)8); if (params.depthwise_separable_opt) { - kd.gws0 = CeilDiv(x, x_block_size) * y * z; - kd.gws1 = f; - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x, x_block_size) * y * z; + dispatchData.gws[1] = f; + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; } else { - kd.gws0 = 64; - while (kd.gws0 > 16) { - if (f % kd.gws0 == 0) break; - kd.gws0 /= 2; + dispatchData.gws[0] = 64; + while (dispatchData.gws[0] > 16) { + if (f % dispatchData.gws[0] == 0) + break; + dispatchData.gws[0] /= 2; } - kd.gws1 = CeilDiv(x, x_block_size) * y * z; - kd.gws2 = b * (f / kd.gws0); + dispatchData.gws[1] = CeilDiv(x, x_block_size) * y * z; + dispatchData.gws[2] = b * (f / dispatchData.gws[0]); - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } } - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } bool DeconvolutionKernel_b_fs_zyx_fsv16::Validate(const Params& p, const optional_params& o) const { @@ -230,10 +232,10 @@ JitConstants DeconvolutionKernel_b_fs_zyx_fsv16::GetJitConstants(const deconvolu jit.AddConstant(MakeJitConstant("IW_FULL", params.output.X().LogicalDimPadded())); - DispatchData runInfo = SetDefault(params); - jit.AddConstant(MakeJitConstant("LWS_0", runInfo.lws0)); - jit.AddConstant(MakeJitConstant("LWS_1", runInfo.lws1)); - jit.AddConstant(MakeJitConstant("LWS_2", runInfo.lws2)); + DispatchData dispatchData = SetDefault(params); + jit.AddConstant(MakeJitConstant("LWS_0", dispatchData.lws[0])); + jit.AddConstant(MakeJitConstant("LWS_1", dispatchData.lws[1])); + jit.AddConstant(MakeJitConstant("LWS_2", dispatchData.lws[2])); if (!params.fused_ops.empty()) { auto fused_dt = GetActivationType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16_dw.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16_dw.cpp index cafa959944bdaa..02a329c9ea27c3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16_dw.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16_dw.cpp @@ -138,7 +138,7 @@ ParamsKey DeconvolutionKernel_b_fs_zyx_fsv16_dw::GetSupportedKey() const { } DeconvolutionKernelBase::DispatchData DeconvolutionKernel_b_fs_zyx_fsv16_dw::SetDefault(const deconvolution_params& params) const { - DispatchData kd = DeconvolutionKernelBase::SetDefault(params); + DispatchData dispatchData = DeconvolutionKernelBase::SetDefault(params); const auto& out = params.output; @@ -148,17 +148,17 @@ DeconvolutionKernelBase::DispatchData DeconvolutionKernel_b_fs_zyx_fsv16_dw::Set auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x, GetDispatchParams(params).block_size_x) * y * z; - kd.gws1 = Align(f, feature_block_size); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x, GetDispatchParams(params).block_size_x) * y * z; + dispatchData.gws[1] = Align(f, feature_block_size); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = sub_group_size; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } bool DeconvolutionKernel_b_fs_zyx_fsv16_dw::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_base.cpp index 4a7d89ed325fdf..1dc654ca2f85d8 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_base.cpp @@ -90,22 +90,24 @@ DeconvolutionKernelBase::DispatchData DeconvolutionKernelBase::SetDefault(const auto batch_size = params.output.Batch().v; auto output_features = params.output.Feature().v; - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; size_t gws0 = output_features * batch_size; size_t lws0 = std::min(gws0, static_cast(32)); while (gws0 % lws0) { lws0--; } - kd.gws0 = gws0; - kd.gws1 = params.output.X().v; - kd.gws2 = params.output.Y().v * params.output.Z().v; - kd.lws0 = lws0; - kd.lws1 = 1; - kd.lws2 = 1; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + + dispatchData.gws[0] = gws0; + dispatchData.gws[1] = params.output.X().v; + dispatchData.gws[2] = params.output.Y().v * params.output.Z().v; + + dispatchData.lws[0] = lws0; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; + + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + return dispatchData; } KernelsData DeconvolutionKernelBase::GetKernelsData(const Params& params, const optional_params& options) const { @@ -116,7 +118,7 @@ KernelsData DeconvolutionKernelBase::GetKernelsData(const Params& params, const } const deconvolution_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); deconvolution_params& newParams = *static_cast(kd.params.get()); @@ -137,7 +139,7 @@ KernelsData DeconvolutionKernelBase::GetKernelsData(const Params& params, const auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, @@ -149,7 +151,7 @@ KernelsData DeconvolutionKernelBase::GetKernelsData(const Params& params, const GetFusedPrimitiveInputsCount(params)); kernel.arguments.push_back({ArgumentDescriptor::Types::SPLIT, 0}); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; return {kd}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp index 4084bdb8ae1c4c..ac89b0b5167460 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp @@ -40,19 +40,20 @@ ParamsKey DeconvolutionKernel_bfyx_opt::GetSupportedKey() const { } CommonDispatchData DeconvolutionKernel_bfyx_opt::SetDefault(const deconvolution_params& params) const { - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; auto wg_size = 16; - kd.gws0 = Align(params.output.X().v, wg_size * params.stride.x); - kd.gws1 = params.output.Y().v; - kd.gws2 = params.output.Batch().v * params.output.Feature().v; - kd.lws0 = wg_size; - kd.lws1 = 1; - kd.lws2 = 1; - kd.efficiency = FORCE_PRIORITY_6; - return kd; + dispatchData.gws[0] = Align(params.output.X().v, wg_size * params.stride.x); + dispatchData.gws[1] = params.output.Y().v; + dispatchData.gws[2] = params.output.Batch().v * params.output.Feature().v; + + dispatchData.lws[0] = wg_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; + + dispatchData.efficiency = FORCE_PRIORITY_6; + return dispatchData; } JitConstants DeconvolutionKernel_bfyx_opt::GetJitConstants(const deconvolution_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_along_f_tile_bfx.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_along_f_tile_bfx.cpp index e7e49ab276d4a5..7a97f476ad9c0a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_along_f_tile_bfx.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_along_f_tile_bfx.cpp @@ -108,36 +108,28 @@ WeightsLayout DeconvolutionKernel_imad_along_f_tile_bfx::GetPreferredWeightsLayo } DeconvolutionKernelBase::DispatchData DeconvolutionKernel_imad_along_f_tile_bfx::SetDefault(const deconvolution_params& params) const { - auto dispatch = Parent::SetDefault(params); + DispatchData dispatchData = Parent::SetDefault(params); auto tile_x = GetTileX(params); auto tile_ofm = GetTileOFM(params); auto tile_b = GetTileB(params); - std::vector global = { + dispatchData.gws = { CeilDiv(params.output.X().v, tile_x) * params.output.Y().v * params.output.Z().v, Align(CeilDiv(params.output.Feature().v, tile_ofm), simd), CeilDiv(params.output.Batch().v, tile_b) }; - std::vector local = { 1, simd, 1 }; - - dispatch.gws0 = global[0]; - dispatch.gws1 = global[1]; - dispatch.gws2 = global[2]; - - dispatch.lws0 = local[0]; - dispatch.lws1 = local[1]; - dispatch.lws2 = local[2]; + dispatchData.lws = { 1, simd, 1 }; // Currently most optimized for fsv16 formats if (params.inputs[0].GetLayout() == DataLayout::b_fs_yx_fsv16 || params.inputs[0].GetLayout() == DataLayout::b_fs_zyx_fsv16) { - dispatch.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; } else { - dispatch.efficiency = FORCE_PRIORITY_8; + dispatchData.efficiency = FORCE_PRIORITY_8; } - return dispatch; + return dispatchData; } JitConstants DeconvolutionKernel_imad_along_f_tile_bfx::GetJitConstants(const deconvolution_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_ref.cpp index da9b46fdd5c3f0..1eb8d7b174bf85 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_imad_ref.cpp @@ -60,27 +60,19 @@ WeightsLayout DeconvolutionKernel_imad_ref::GetPreferredWeightsLayout(const deco } DeconvolutionKernelBase::DispatchData DeconvolutionKernel_imad_ref::SetDefault(const deconvolution_params& params) const { - auto dispatch = Parent::SetDefault(params); + DispatchData dispatchData = Parent::SetDefault(params); - std::vector global = { + dispatchData.gws = { params.output.Feature().v, params.output.X().v * params.output.Y().v * params.output.Z().v, params.output.Batch().v }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - dispatch.gws0 = global[0]; - dispatch.gws1 = global[1]; - dispatch.gws2 = global[2]; + dispatchData.efficiency = FORCE_PRIORITY_9; - dispatch.lws0 = local[0]; - dispatch.lws1 = local[1]; - dispatch.lws2 = local[2]; - - dispatch.efficiency = FORCE_PRIORITY_9; - - return dispatch; + return dispatchData; } JitConstants DeconvolutionKernel_imad_ref::GetJitConstants(const deconvolution_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_ref.cpp index b3d4268b4d926f..b8cb81ec7d2408 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016-2019 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -62,20 +62,20 @@ ParamsKey DeconvolutionKernelRef::GetSupportedKey() const { } CommonDispatchData DeconvolutionKernelRef::SetDefault(const deconvolution_params& params) const { - CommonDispatchData runInfo = DeconvolutionKernelBase::SetDefault(params); + CommonDispatchData dispatchData = DeconvolutionKernelBase::SetDefault(params); if (params.output.Feature().v * params.output.Batch().v <= 16) { const auto& out = params.output; - runInfo.gws0 = Align(out.X().v, 32); - runInfo.gws1 = out.Y().v * out.Z().v; - runInfo.gws2 = out.Feature().v * out.Batch().v; + dispatchData.gws[0] = Align(out.X().v, 32); + dispatchData.gws[1] = out.Y().v * out.Z().v; + dispatchData.gws[2] = out.Feature().v * out.Batch().v; - runInfo.lws0 = 32; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - return runInfo; + return dispatchData; } JitConstants DeconvolutionKernelRef::GetJitConstants(const deconvolution_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_base.cpp index f120dcbb5ec92d..f13a64f710f8f4 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_base.cpp @@ -40,23 +40,15 @@ bool DepthToSpaceKernelBase::Validate(const Params& p, const optional_params& o) } CommonDispatchData DepthToSpaceKernelBase::SetDefault(const depth_to_space_params& params) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = { params.output.Batch().v, - params.output.Feature().v, - params.output.Z().v * params.output.Y().v * params.output.X().v }; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Z().v * params.output.Y().v * params.output.X().v }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants DepthToSpaceKernelBase::GetJitConstants(const depth_to_space_params& params) const { @@ -80,14 +72,14 @@ KernelsData DepthToSpaceKernelBase::GetCommonKernelsData(const Params& params, c return {}; } - auto runInfo = SetDefault(newParams); + auto dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, GetFusedPrimitiveInputsCount(params)); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_block2_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_block2_opt.cpp index 665e5351b99550..4a87031998921f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_block2_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/depth_to_space/depth_to_space_kernel_block2_opt.cpp @@ -45,23 +45,14 @@ bool DepthToSpaceKernelBlock2Opt::Validate(const Params& p, const optional_param } CommonDispatchData DepthToSpaceKernelBlock2Opt::SetDefault(const depth_to_space_params& params) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = { Align(params.inputs[0].X().v / 2, 16), - params.inputs[0].Y().v, - 1}; + dispatchData.gws = { Align(params.inputs[0].X().v / 2, 16), + params.inputs[0].Y().v, + 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants DepthToSpaceKernelBlock2Opt::GetJitConstants(const depth_to_space_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_base.cpp index a941bdda77b6ab..418b124a0f7ed9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -48,17 +48,17 @@ JitConstants DetectionOutputKernelBase::GetJitConstants(const detection_output_p return jit; } -DetectionOutputKernelBase::DispatchData DetectionOutputKernelBase::SetDefault( - const detection_output_params& params) const { - DispatchData kd; +DetectionOutputKernelBase::DispatchData DetectionOutputKernelBase::SetDefault(const detection_output_params& /*params*/) const { + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - kd.gws0 = 0; - kd.gws1 = 0; - kd.gws2 = 0; - kd.lws0 = 0; - kd.lws1 = 0; - kd.lws2 = 0; - return kd; + dispatchData.gws[0] = 0; + dispatchData.gws[1] = 0; + dispatchData.gws[2] = 0; + + dispatchData.lws[0] = 0; + dispatchData.lws[1] = 0; + dispatchData.lws[2] = 0; + + return dispatchData; } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_ref.cpp index a9b66028ef25ea..a68d4580c12be6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ ParamsKey DetectionOutputKernel::GetSupportedKey() const { } CommonDispatchData DetectionOutputKernel::SetDefault(const detection_output_params& params) const { - CommonDispatchData runInfo = DetectionOutputKernelBase::SetDefault(params); + CommonDispatchData dispatchData = DetectionOutputKernelBase::SetDefault(params); // Number of all work items is set to total number of bounding boxes - // one bounding box is procerssed by one work item @@ -54,15 +54,15 @@ CommonDispatchData DetectionOutputKernel::SetDefault(const detection_output_para bboxesNum = work_group_size * params.inputs[0].Batch().v; - runInfo.gws0 = Align(bboxesNum, work_group_size); - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = Align(bboxesNum, work_group_size); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = work_group_size; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = work_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } KernelsData DetectionOutputKernel::GetKernelsData(const Params& params, const optional_params& options) const { @@ -70,14 +70,14 @@ KernelsData DetectionOutputKernel::GetKernelsData(const Params& params, const op KernelData kd = KernelData::Default(params); const detection_output_params& detectOutParams = static_cast(params); - DispatchData runInfo = SetDefault(detectOutParams); + DispatchData dispatchData = SetDefault(detectOutParams); auto cldnnJit = GetJitConstants(detectOutParams); auto entryPoint = GetEntryPoint(kernelName, detectOutParams.layerID, options); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entryPoint); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entryPoint); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 2}); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_sort.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_sort.cpp index 91b99455f70675..03de4a719720b1 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_sort.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/detection_output/detection_output_kernel_sort.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ ParamsKey DetectionOutputKernel_sort::GetSupportedKey() const { } CommonDispatchData DetectionOutputKernel_sort::SetDefault(const detection_output_params& params) const { - CommonDispatchData runInfo = DetectionOutputKernelBase::SetDefault(params); + CommonDispatchData dispatchData = DetectionOutputKernelBase::SetDefault(params); unsigned class_num = params.detectOutParams.num_classes; if (params.detectOutParams.share_location && params.detectOutParams.background_label_id == 0) { @@ -49,15 +49,15 @@ CommonDispatchData DetectionOutputKernel_sort::SetDefault(const detection_output work_group_size = (work_group_size + work_group_size % 2) / (work_group_size / 256 + 1); } - runInfo.gws0 = Align(bboxesNum, work_group_size); - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = Align(bboxesNum, work_group_size); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = work_group_size; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = work_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } KernelsData DetectionOutputKernel_sort::GetKernelsData(const Params& params, const optional_params& options) const { @@ -66,14 +66,14 @@ KernelsData DetectionOutputKernel_sort::GetKernelsData(const Params& params, con KernelData kd = KernelData::Default(params); const detection_output_params& detectOutParams = static_cast(params); - DispatchData runInfo = SetDefault(detectOutParams); + DispatchData dispatchData = SetDefault(detectOutParams); auto cldnnJit = GetJitConstants(detectOutParams); auto entryPoint = GetEntryPoint(kernelName, detectOutParams.layerID, options); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entryPoint); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entryPoint); kd.estimatedTime = FORCE_PRIORITY_8; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_b_fs_yx_fsv16.cpp index e180e8a24a7e3f..7f12c6be83c51d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_b_fs_yx_fsv16.cpp @@ -200,23 +200,23 @@ bool EltwiseKernel_b_fs_yx_fsv16::Validate(const Params& params, const optional_ } EltwiseKernelBase::DispatchData EltwiseKernel_b_fs_yx_fsv16::SetDefault(const eltwise_params& params) const { - DispatchData kd; + DispatchData dispatchData; - kd.gws0 = Align(params.output.Feature().v, 16); - kd.gws1 = CeilDiv(params.output.X().v, GetBlockSize(params)) * params.output.Y().v; - kd.gws2 = params.output.Batch().v; + dispatchData.gws[0] = Align(params.output.Feature().v, 16); + dispatchData.gws[1] = CeilDiv(params.output.X().v, GetBlockSize(params)) * params.output.Y().v; + dispatchData.gws[2] = params.output.Batch().v; - kd.lws0 = 16; - kd.lws1 = 16; - while (kd.lws1 > 1) { - if (kd.gws1 % kd.lws1 == 0) + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 16; + while (dispatchData.lws[1] > 1) { + if (dispatchData.gws[1] % dispatchData.lws[1] == 0) break; - kd.lws1--; + dispatchData.lws[1]--; } - kd.lws2 = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_1; - return kd; + dispatchData.efficiency = FORCE_PRIORITY_1; + return dispatchData; } KernelsData EltwiseKernel_b_fs_yx_fsv16::GetKernelsData(const Params& params, const optional_params& options) const { @@ -231,12 +231,12 @@ KernelsData EltwiseKernel_b_fs_yx_fsv16::GetKernelsData(const Params& params, co auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto& kernel = kd.kernels[0]; - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo, DEFAULT); kernel.arguments = GetArgsDesc((uint32_t)newParams.inputs.size(), @@ -244,7 +244,7 @@ KernelsData EltwiseKernel_b_fs_yx_fsv16::GetKernelsData(const Params& params, co false, GetFusedPrimitiveInputsCount(params)); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; return {kd}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_base.cpp index 8de307d7c52b6e..f8bc15463b901d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_base.cpp @@ -512,17 +512,14 @@ JitConstants EltwiseKernelBase::GetJitConstants(const eltwise_params& params) co } EltwiseKernelBase::DispatchData EltwiseKernelBase::SetDefault(const eltwise_params& params) const { - DispatchData kd; + DispatchData dispatchData; if (params.layoutBased || params.int8_quantization || params.broadcast) { - auto global = GetTensorFriendlyWorkGroups(params.output); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.gws = GetTensorFriendlyWorkGroups(params.output); } else if (CheckInputsOutputNoPitchSameDims(params)) { - kd.gws0 = params.output.LogicalSize(); - kd.gws1 = 1; - kd.gws2 = 1; + dispatchData.gws[0] = params.output.LogicalSize(); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; } else { const auto& out = params.output; @@ -536,60 +533,58 @@ EltwiseKernelBase::DispatchData EltwiseKernelBase::SetDefault(const eltwise_para gws.push_back(1U); } - kd.gws0 = gws[0]; + dispatchData.gws[0] = gws[0]; if (n_dims == 6) { - kd.gws1 = gws[1] * gws[2] * gws[3]; // y*z*w - kd.gws2 = gws[4] * gws[5]; + dispatchData.gws[1] = gws[1] * gws[2] * gws[3]; // y*z*w + dispatchData.gws[2] = gws[4] * gws[5]; } else if (n_dims == 5) { - kd.gws1 = gws[1] * gws[2]; // y*z - kd.gws2 = gws[3] * gws[4]; + dispatchData.gws[1] = gws[1] * gws[2]; // y*z + dispatchData.gws[2] = gws[3] * gws[4]; } else { - kd.gws1 = gws[1]; - kd.gws2 = gws[2] * gws[3]; + dispatchData.gws[1] = gws[1]; + dispatchData.gws[2] = gws[2] * gws[3]; } } - auto local = GetOptimalLocalWorkGroupSizes({kd.gws0, kd.gws1, kd.gws2}, params.engineInfo); + auto local = GetOptimalLocalWorkGroupSizes({dispatchData.gws[0], dispatchData.gws[1], dispatchData.gws[2]}, params.engineInfo); const size_t optimal_lws_values[] = {256, 224, 192, 160, 128, 96, 64, 32, 16}; if ((params.output.GetLayout() == DataLayout::b_fs_yx_fsv16 || params.output.GetLayout() == DataLayout::b_fs_zyx_fsv16 || params.output.GetLayout() == DataLayout::bs_fs_yx_bsv16_fsv16) && - params.output.Feature().v % 16 == 0 && kd.gws1 % 16 == 0) { - kd.lws0 = 1; + params.output.Feature().v % 16 == 0 && dispatchData.gws[1] % 16 == 0) { + dispatchData.lws[0] = 1; for (auto lws : optimal_lws_values) { - if (kd.gws1 % lws == 0) { - kd.lws1 = lws; + if (dispatchData.gws[1] % lws == 0) { + dispatchData.lws[1] = lws; break; } } - kd.lws2 = 1; + dispatchData.lws[2] = 1; } else if (params.output.GetLayout() == DataLayout::fs_b_yx_fsv32) { - kd.gws2 = Align(kd.gws2, 32); - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 32; + dispatchData.gws[2] = Align(dispatchData.gws[2], 32); + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 32; } else if (params.output.GetLayout() == DataLayout::b_fs_yx_fsv32 && params.output.Feature().v % 32 == 0) { if (params.layoutBased || params.int8_quantization || params.broadcast) { - kd.lws0 = 1; - kd.lws1 = 32; - kd.lws2 = 1; - } else if (kd.gws0 == params.output.LogicalSize()) { - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 32; + dispatchData.lws[2] = 1; + } else if (dispatchData.gws[0] == params.output.LogicalSize()) { + dispatchData.lws = local; } else { - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 32; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 32; } } else { - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.lws[0] = local[0]; + dispatchData.lws[1] = local[1]; + dispatchData.lws[2] = local[2]; } - return kd; + return dispatchData; } KernelsData EltwiseKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options) const { @@ -604,12 +599,12 @@ KernelsData EltwiseKernelBase::GetCommonKernelsData(const Params& params, const auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto& kernel = kd.kernels[0]; - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo, DEFAULT); kernel.arguments = GetArgsDesc((uint32_t)newParams.inputs.size(), diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_mixed_byxf_and_fs_b_yx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_mixed_byxf_and_fs_b_yx_fsv32.cpp index 4abb2917cc72fe..0f5f71efeeda7e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_mixed_byxf_and_fs_b_yx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/eltwise/eltwise_kernel_mixed_byxf_and_fs_b_yx_fsv32.cpp @@ -21,26 +21,6 @@ namespace kernel_selector { -namespace { -std::shared_ptr GetJit_GetIndexForDataLayout(std::string jitName, - std::string prefix, - DataLayout dataLayout) { - std::string jitValue; - switch (dataLayout) { - case DataLayout::byxf: - jitValue += "GET_DATA_INDEX("; - break; - case DataLayout::fs_b_yx_fsv32: - jitValue += "GET_DATA_FS_B_YX_FSV32_INDEX("; - break; - default: - throw std::runtime_error("incorrect data_layout"); - } - jitValue += prefix + ",b,f,y,x)"; - - return MakeJitConstant(jitName, jitValue); -} -} // namespace // TODO: [blocked_formats] does fp32 work well with kernel? ParamsKey EltwiseKernel_mixed_byxf_and_fs_b_yx_fsv32::GetSupportedKey() const { ParamsKey k; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/embedding_bag/embedding_bag_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/embedding_bag/embedding_bag_kernel_ref.cpp index 333ecc2a212627..8c73282a053ddd 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/embedding_bag/embedding_bag_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/embedding_bag/embedding_bag_kernel_ref.cpp @@ -43,23 +43,14 @@ JitConstants EmbeddingBagKernelRef::GetJitConstants(const embedding_bag_params& } CommonDispatchData EmbeddingBagKernelRef::SetDefault(const embedding_bag_params& params) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = { params.output.Batch().v, - params.output.Feature().v, - params.output.Y().v * params.output.X().v }; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Y().v * params.output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } KernelsData EmbeddingBagKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { @@ -70,7 +61,7 @@ KernelsData EmbeddingBagKernelRef::GetKernelsData(const Params& params, const op return {}; } - auto runInfo = SetDefault(newParams); + auto dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); @@ -78,7 +69,7 @@ KernelsData EmbeddingBagKernelRef::GetKernelsData(const Params& params, const op auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/extract_image_patches/extract_image_patches_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/extract_image_patches/extract_image_patches_kernel_base.cpp index f3c3e7c6187a16..47083f79142c6e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/extract_image_patches/extract_image_patches_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/extract_image_patches/extract_image_patches_kernel_base.cpp @@ -53,23 +53,14 @@ JitConstants ExtractImagePatchesKernelBase::GetJitConstants(const extract_image_ } ExtractImagePatchesKernelBase::DispatchData ExtractImagePatchesKernelBase::SetDefault(const extract_image_patches_params& params) const { - DispatchData kd; + DispatchData dispatchData; - std::vector global = { params.output.Batch().v, - params.output.Feature().v, - params.output.Y().v * params.output.X().v }; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Y().v * params.output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData ExtractImagePatchesKernelBase::GetCommonKernelsData(const Params& params, @@ -81,7 +72,7 @@ KernelsData ExtractImagePatchesKernelBase::GetCommonKernelsData(const Params& pa const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData kd = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -89,7 +80,7 @@ KernelsData ExtractImagePatchesKernelBase::GetCommonKernelsData(const Params& pa auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, run_info, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = estimated_time; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_block_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_block_kernel_base.h index 33097aa4da22bd..c3ad259ceab5ac 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_block_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_block_kernel_base.h @@ -24,7 +24,7 @@ class FullyConnectedBlockKernelBase : public FullyConnectedKernelBase { virtual ~FullyConnectedBlockKernelBase() {} protected: - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; // how many batches will a single work item compute virtual size_t GetBatchesPerWorkItem(const fully_connected_params& params) const; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.cpp index 9617e458cca0b1..234e0272759228 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,18 +35,16 @@ JitConstants FullyConnectedKernelBase::GetJitConstants(const fully_connected_par FullyConnectedKernelBase::DispatchData FullyConnectedKernelBase::SetDefault(const fully_connected_params& params, int) const { DispatchData dispatchData; - dispatchData.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; // Determine global work sizes. - dispatchData.gws0 = params.output.LogicalSize(); - dispatchData.gws1 = dispatchData.gws2 = 1; + dispatchData.gws = { params.output.LogicalSize(), 1, 1 }; // Find largest positive local work size that is divider for global work size. - dispatchData.lws0 = std::min(std::max(dispatchData.gws0, static_cast(1)), static_cast(32)); - while (dispatchData.gws0 % dispatchData.lws0 != 0) { - --dispatchData.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - dispatchData.lws1 = dispatchData.lws2 = 1; + dispatchData.lws[1] = dispatchData.lws[2] = 1; return dispatchData; } @@ -99,8 +97,8 @@ KernelsData FullyConnectedKernelBase::GetCommonKernelsData(const Params ¶ms, auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, options); - const DispatchData runInfo = SetDefault(newParams, autoTuneIndex); - auto cldnn_jit = GetJitConstants(newParams, runInfo); + const DispatchData dispatchData = SetDefault(newParams, autoTuneIndex); + auto cldnn_jit = GetJitConstants(newParams, dispatchData); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); uint32_t fused_deps_total = 0; @@ -112,7 +110,7 @@ KernelsData FullyConnectedKernelBase::GetCommonKernelsData(const Params ¶ms, auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.h index f732cf8e4c6c4b..b3da6c9971a71a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_base.h @@ -63,7 +63,7 @@ class FullyConnectedKernelBase : public WeightBiasKernelBase { const int autoTuneIndex = -1) const; protected: - virtual JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const; + virtual JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const; virtual DispatchData SetDefault(const fully_connected_params& params, int autoTuneIndex = -1) const; KernelsData GetCommonKernelsData(const Params ¶ms, const optional_params &options, @@ -74,7 +74,7 @@ class FullyConnectedKernelBase : public WeightBiasKernelBase { int autoTuneIndex = -1) const; // Fused ops - virtual JitConstants GetFusedPrimitivesJitConstants(const fully_connected_params& params, const DispatchData& kd) const; + virtual JitConstants GetFusedPrimitivesJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const; Datatype GetActivationType(const fully_connected_params& params) const; // --Fused ops diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.cpp index 05e17ccfa68c8a..4fdeed22c7ba56 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.cpp @@ -39,30 +39,22 @@ ParamsKey FullyConnected_bf_io_GEMM::GetSupportedKey() const { FullyConnected_bf_io_GEMM::DispatchData FullyConnected_bf_io_GEMM::SetDefault(const fully_connected_params& params, int autoTuneIndex) const { - auto runInfo = Parent::SetDefault(params, autoTuneIndex); + auto dispatchData = Parent::SetDefault(params, autoTuneIndex); const uint32_t localWorkSizeX = 64; const uint32_t globalWorkSizeX = localWorkSizeX; - std::vector global = {globalWorkSizeX, params.output.Feature().v, params.output.Batch().v}; - std::vector local = {localWorkSizeX, 1, 1}; + dispatchData.gws = { globalWorkSizeX, params.output.Feature().v, 1 }; + dispatchData.lws = { localWorkSizeX, 1, 1 }; - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = 1; + dispatchData.efficiency = FORCE_PRIORITY_6; - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = 1; - - runInfo.efficiency = FORCE_PRIORITY_6; - - return runInfo; + return dispatchData; } JitConstants FullyConnected_bf_io_GEMM::GetJitConstants(const fully_connected_params& params, - const DispatchData& kd) const { - auto jit = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); if (params.inputs[0].GetDType() == Datatype::F16) { jit.AddConstant(MakeJitConstant("__fc_f16", "")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.h index 162950ba1380dc..edfd5bde755174 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_gemm.h @@ -29,6 +29,6 @@ class FullyConnected_bf_io_GEMM : public FullyConnectedKernelBase { protected: DispatchData SetDefault(const fully_connected_params& params, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_input_spatial.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_input_spatial.cpp index 7b0d270582f582..aaa22fd857aad9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_input_spatial.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_input_spatial.cpp @@ -35,27 +35,28 @@ ParamsKey FullyConnected_bf_io_input_spatial::GetSupportedKey() const { FullyConnected_bf_io_input_spatial::DispatchData FullyConnected_bf_io_input_spatial::SetDefault( const fully_connected_params& arg, int) const { - auto kd = FullyConnectedKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedKernelBase::SetDefault(arg); - kd.gws0 = Align(arg.output.LogicalSize() / arg.inputs[0].Batch().v, 16); - kd.gws1 = arg.inputs[0].Batch().v; - kd.gws2 = 1; - kd.lws0 = 16; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.gws[0] = Align(arg.output.LogicalSize() / arg.inputs[0].Batch().v, 16); + dispatchData.gws[1] = arg.inputs[0].Batch().v; + dispatchData.gws[2] = 1; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; + + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; const auto& input = arg.inputs[0]; const auto& output = arg.output; if (input.Batch().v == 1 && output.Batch().v == 1) { if ((input.LogicalSize() / output.Batch().v >= 4096) && (output.Feature().v >= 4096)) { - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; } } - return kd; + return dispatchData; } bool FullyConnected_bf_io_input_spatial::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.cpp index cd7bbcc2388a62..5caf4eb5051973 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.cpp @@ -32,8 +32,8 @@ ParamsKey FullyConnected_bf_io_ref::GetSupportedKey() const { return k; } -JitConstants FullyConnected_bf_io_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnected_bf_io_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.h index 331ccf589d9ef4..6ce6621368a034 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_io_ref.h @@ -31,6 +31,6 @@ class FullyConnected_bf_io_ref : public FullyConnectedKernelBase { std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp index 0101ace92d44e4..858c43b4c56c29 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp @@ -182,7 +182,7 @@ FullyConnected_bf_tiled::GetAutoTuneParams(const fully_connected_params& params, .Case(tune_params(16, std::min(max_tile_ofm, 2u), 1, 2, 1, 1, AGE_BASED)) .Case(tune_params(8, std::min(max_tile_ofm, 2u), 1, 2, 1, 1, AGE_BASED)); } - + if (dtype == Datatype::F32) { // tune_params(tile_b, tile_ofm, tile_ifm, tile_k, dispatch_bsv, dispatch_fsv, exec_options) selector.Case(tune_params(8, std::min(max_tile_ofm, 2u), 1, 1, 16, 2, AGE_BASED)) @@ -195,17 +195,17 @@ FullyConnected_bf_tiled::GetAutoTuneParams(const fully_connected_params& params, selector.Case([&](const fully_connected_params&) -> tune_params { tune_params result(8, std::min(max_tile_ofm, 2u), 1, 2, 1, 1, DEFAULT); - + while (batch % result.tile_b != 0) result.tile_b--; - + result.dispatch_bsv = 16; while (batch % (result.tile_b * result.dispatch_bsv) != 0) result.dispatch_bsv--; if (result.tile_b >= 8) result.exec_options = AGE_BASED; - + return result; }); @@ -214,43 +214,43 @@ FullyConnected_bf_tiled::GetAutoTuneParams(const fully_connected_params& params, FullyConnected_bf_tiled::DispatchData FullyConnected_bf_tiled::SetDefault(const fully_connected_params& params, int autoTuneIndex) const { - auto runInfo = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); auto tparams = GetAutoTuneParams(params, autoTuneIndex); size_t feature_threads = CeilDiv(params.output.Feature().v, tparams.tile_ofm * simd); size_t batch_threads = params.output.Batch().v / tparams.tile_b; - runInfo.gws0 = feature_threads * batch_threads * simd; - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = feature_threads * batch_threads * simd; + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = simd; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = simd; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - runInfo.tile_m = tparams.tile_b; - runInfo.tile_n = tparams.tile_ofm; - runInfo.tile_mk = tparams.tile_ifm; - runInfo.tile_nk = tparams.tile_k; - runInfo.tile_ms = tparams.dispatch_bsv; - runInfo.tile_ns = tparams.dispatch_fsv; + dispatchData.tile_m = tparams.tile_b; + dispatchData.tile_n = tparams.tile_ofm; + dispatchData.tile_mk = tparams.tile_ifm; + dispatchData.tile_nk = tparams.tile_k; + dispatchData.tile_ms = tparams.dispatch_bsv; + dispatchData.tile_ns = tparams.dispatch_fsv; - return runInfo; + return dispatchData; } -JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("SIMD", simd)); - jit.AddConstant(MakeJitConstant("TILE_B", kd.tile_m)); - jit.AddConstant(MakeJitConstant("TILE_OFM", kd.tile_n)); - jit.AddConstant(MakeJitConstant("TILE_IFM", kd.tile_mk)); - jit.AddConstant(MakeJitConstant("TILE_K", kd.tile_nk)); - jit.AddConstant(MakeJitConstant("TILE_K_OFM", kd.tile_nk * kd.tile_n)); - jit.AddConstant(MakeJitConstant("DISPATCH_BSV", kd.tile_ms)); - jit.AddConstant(MakeJitConstant("DISPATCH_FSV", kd.tile_ns)); + jit.AddConstant(MakeJitConstant("TILE_B", dispatchData.tile_m)); + jit.AddConstant(MakeJitConstant("TILE_OFM", dispatchData.tile_n)); + jit.AddConstant(MakeJitConstant("TILE_IFM", dispatchData.tile_mk)); + jit.AddConstant(MakeJitConstant("TILE_K", dispatchData.tile_nk)); + jit.AddConstant(MakeJitConstant("TILE_K_OFM", dispatchData.tile_nk * dispatchData.tile_n)); + jit.AddConstant(MakeJitConstant("DISPATCH_BSV", dispatchData.tile_ms)); + jit.AddConstant(MakeJitConstant("DISPATCH_FSV", dispatchData.tile_ns)); - jit.Merge(MakeConstantLoopUnrollJitConstants(kd.tile_m)); + jit.Merge(MakeConstantLoopUnrollJitConstants(dispatchData.tile_m)); bool realign_fp16_offset = params.inputs[0].GetDType() == Datatype::F16 && params.output.GetFirstElementOffset() % 2 != 0; jit.AddConstant(MakeJitConstant("REALIGN_FP16_OFFSET", realign_fp16_offset)); @@ -262,14 +262,14 @@ JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_para if (!params.fused_ops.empty()) { auto boundary_check = BoundaryCheck::DISABLED; - if (params.output.Feature().v % (kd.tile_n * simd) != 0) + if (params.output.Feature().v % (dispatchData.tile_n * simd) != 0) boundary_check = BoundaryCheck::ENABLED; FusedOpsConfiguration conf = { "", {"(out_b + bi)", "out_f", "0", "0"}, "activated[bi]", activation_dt, - kd.tile_n, + dispatchData.tile_n, LoadType::LT_ALIGNED_READ, boundary_check, IndexType::TENSOR_COORD, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.h index 72d0e77339d257..e795165b4c7f00 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bf_tiled.h @@ -68,7 +68,7 @@ class FullyConnected_bf_tiled : public FullyConnectedKernelBase { FusedOpType::SCALE, FusedOpType::QUANTIZE }; } - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& params, const optional_params& options) const override; tune_params GetAutoTuneParams(const fully_connected_params& params, int idx = -1) const; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.cpp index b6db4b9b97ccde..4937335e345f51 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016-2019 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -48,25 +48,17 @@ ParamsKey FullyConnected_bfyx_Ref::GetSupportedKey() const { FullyConnected_bfyx_Ref::DispatchData FullyConnected_bfyx_Ref::SetDefault(const fully_connected_params& params, int) const { - auto runInfo = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); - std::vector global = {params.output.Feature().v, params.output.Batch().v}; - std::vector local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { params.output.Feature().v, params.output.Batch().v, 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = 1; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = 1; - - return runInfo; + return dispatchData; } JitConstants FullyConnected_bfyx_Ref::GetJitConstants(const fully_connected_params& params, - const FullyConnectedKernelBase::DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); + const FullyConnectedKernelBase::DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); Datatype accumulator_dt; Datatype activation_dt; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.h index 2965a0d22cb581..e47bb2fa3aa451 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bfyx_ref.h @@ -36,6 +36,6 @@ class FullyConnected_bfyx_Ref : public FullyConnectedKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_af8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_af8.cpp index a89e55c07a1bfe..beda9cbb8cb97c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_af8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_af8.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,15 +34,15 @@ ParamsKey FullyConnected_bs_f_bsv16_af8::GetSupportedKey() const { FullyConnected_bs_f_bsv16_af8::DispatchData FullyConnected_bs_f_bsv16_af8::SetDefault(const fully_connected_params& arg, int) const { - auto kd = FullyConnectedBlockKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedBlockKernelBase::SetDefault(arg); size_t groups_per_batches = GetLocalGroupsSize(arg); - kd.gws0 = Align(arg.output.LogicalSize() / (GetBatchesPerWorkItem(arg) * groups_per_batches), 16); - kd.gws1 = groups_per_batches; - kd.lws0 = 16; - kd.lws1 = 1; + dispatchData.gws[0] = Align(arg.output.LogicalSize() / (GetBatchesPerWorkItem(arg) * groups_per_batches), 16); + dispatchData.gws[1] = groups_per_batches; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; - return kd; + return dispatchData; } static bool check_input_layout(const DataTensor& t) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.cpp index 49160b5221482c..f20dbe7c62862f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,12 +34,12 @@ ParamsKey FullyConnected_bs_f_bsv16_b1::GetSupportedKey() const { JitConstants FullyConnected_bs_f_bsv16_b1::GetJitConstants( const fully_connected_params& params, - const FullyConnectedKernelBase::DispatchData& run_info) const { - auto& d = static_cast(run_info); - auto cldnn_jit = FullyConnectedKernelBase::GetJitConstants(params, run_info); + const FullyConnectedKernelBase::DispatchData& dispatchData) const { + auto& d = static_cast(dispatchData); + auto cldnn_jit = FullyConnectedKernelBase::GetJitConstants(params, dispatchData); cldnn_jit.AddConstants({ - MakeJitConstant("SUB_GROUP_SIZE", run_info.lws0), - MakeJitConstant("WORK_ITEMS_PER_BATCH", run_info.gws1), + MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0]), + MakeJitConstant("WORK_ITEMS_PER_BATCH", dispatchData.gws[1]), MakeJitConstant("UNIT_BYTE_SIZE", d.unit_byte_size), MakeJitConstant("CHUNK_TYPE", d.chunk_type), @@ -56,11 +56,11 @@ JitConstants FullyConnected_bs_f_bsv16_b1::GetJitConstants( FullyConnected_bs_f_bsv16_b1::DispatchData FullyConnected_bs_f_bsv16_b1::SetDefault(const fully_connected_params& arg, int) const { - DispatchData run_info = FullyConnectedKernelBase::SetDefault(arg); + DispatchData dispatchData = FullyConnectedKernelBase::SetDefault(arg); // Properties of chunk and unit. const char* chunk_type = "uint"; - const uint32_t unit_byte_size = run_info.fp16UnitUsed ? sizeof(short) : sizeof(float); + const uint32_t unit_byte_size = BytesPerElement(arg.inputs[0].GetDType()); constexpr uint32_t chunk_byte_size = sizeof(uint32_t); constexpr uint32_t sub_group_size = 16; const uint32_t units_per_chunk = chunk_byte_size / unit_byte_size; @@ -73,23 +73,23 @@ FullyConnected_bs_f_bsv16_b1::DispatchData FullyConnected_bs_f_bsv16_b1::SetDefa const auto response_size = arg.output.Feature().v; auto rg_count = CeilDiv(response_size, responses_per_sg_exec); - run_info.lws0 = sub_group_size; + dispatchData.lws[0] = sub_group_size; // Number of work items needed to process all response groups. - run_info.gws0 = rg_count * sub_group_size; - run_info.lws1 = run_info.lws2 = 1; - run_info.gws1 = run_info.gws2 = 1; + dispatchData.gws[0] = rg_count * sub_group_size; + dispatchData.lws[1] = dispatchData.lws[2] = 1; + dispatchData.gws[1] = dispatchData.gws[2] = 1; - run_info.unit_byte_size = unit_byte_size; - run_info.chunk_type = chunk_type; - run_info.chunk_byte_size = chunk_byte_size; - run_info.units_per_chunk = units_per_chunk; - run_info.bytes_per_sg_read = sub_group_size * chunk_byte_size; - run_info.units_per_sg_read = units_per_sg_read; - run_info.responses_per_sg_exec = responses_per_sg_exec; - run_info.in_chunk_prefetch_size = 2; - run_info.filter_chunk_prefetch_size = responses_per_sg_exec; + dispatchData.unit_byte_size = unit_byte_size; + dispatchData.chunk_type = chunk_type; + dispatchData.chunk_byte_size = chunk_byte_size; + dispatchData.units_per_chunk = units_per_chunk; + dispatchData.bytes_per_sg_read = sub_group_size * chunk_byte_size; + dispatchData.units_per_sg_read = units_per_sg_read; + dispatchData.responses_per_sg_exec = responses_per_sg_exec; + dispatchData.in_chunk_prefetch_size = 2; + dispatchData.filter_chunk_prefetch_size = responses_per_sg_exec; - return run_info; + return dispatchData; } KernelsData FullyConnected_bs_f_bsv16_b1::GetKernelsData(const Params& params, const optional_params& optParams) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.h index 03422b808e564c..30e3830f120758 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv16_b1.h @@ -28,7 +28,7 @@ class FullyConnected_bs_f_bsv16_b1 : public FullyConnectedKernelBase { protected: JitConstants GetJitConstants(const fully_connected_params& params, - const FullyConnectedKernelBase::DispatchData& kd) const override; + const FullyConnectedKernelBase::DispatchData& dispatchData) const override; DispatchData SetDefault(const fully_connected_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv8_af8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv8_af8.cpp index 60e879adaa4fac..ebc6da84222b20 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv8_af8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_bs_f_bsv8_af8.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,17 +36,17 @@ ParamsKey FullyConnected_bs_f_bsv8_af8::GetSupportedKey() const { FullyConnected_bs_f_bsv8_af8::DispatchData FullyConnected_bs_f_bsv8_af8::SetDefault(const fully_connected_params& arg, int) const { - auto kd = FullyConnectedBlockKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedBlockKernelBase::SetDefault(arg); size_t groups_per_batches = GetLocalGroupsSize(arg); - kd.gws0 = + dispatchData.gws[0] = Align(arg.output.LogicalSize() / (GetNeuronsPerWorkItem(arg) * GetBatchesPerWorkItem(arg) * groups_per_batches), 8); - kd.gws1 = groups_per_batches; - kd.lws0 = 8; - kd.lws1 = 1; + dispatchData.gws[1] = groups_per_batches; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = 1; - return kd; + return dispatchData; } static bool check_input_layout(const DataTensor& t) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_b8_f8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_b8_f8.cpp index 62ea7f21f9b388..77e720dfa3ae23 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_b8_f8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_b8_f8.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -48,18 +48,18 @@ size_t FullyConnected_fb_io_b8_f8::GetBatchesPerWorkItem(const fully_connected_p FullyConnected_fb_io_b8_f8::DispatchData FullyConnected_fb_io_b8_f8::SetDefault(const fully_connected_params& arg, int) const { - auto kd = FullyConnectedBlockKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedBlockKernelBase::SetDefault(arg); const auto& output = arg.output; size_t groups_per_batches = GetLocalGroupsSize(arg); - kd.gws0 = + dispatchData.gws[0] = Align(output.LogicalSize() / (GetNeuronsPerWorkItem(arg) * GetBatchesPerWorkItem(arg) * groups_per_batches), 8); - kd.gws1 = groups_per_batches; - kd.lws0 = 8; - kd.lws1 = 1; + dispatchData.gws[1] = groups_per_batches; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = 1; - return kd; + return dispatchData; } bool FullyConnected_fb_io_b8_f8::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.cpp index 4af19b245a3de4..2ec01a1524c4e5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ ParamsKey FullyConnected_fb_io_block::GetSupportedKey() const { FullyConnected_fb_io_block::DispatchData FullyConnected_fb_io_block::SetDefault(const fully_connected_params& arg, int) const { - auto kd = FullyConnectedKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedKernelBase::SetDefault(arg); const auto& output = arg.output; auto batch_size = output.Batch().v; @@ -50,37 +50,37 @@ FullyConnected_fb_io_block::DispatchData FullyConnected_fb_io_block::SetDefault( // for at least one input data set from batch. auto rg_count = CeilDiv(response_size, units_per_sg_read); - kd.lws0 = sub_group_size; + dispatchData.lws[0] = sub_group_size; // Number of work items needed to process all response groups. - kd.gws0 = rg_count * sub_group_size; - kd.lws1 = 1; - kd.gws1 = batch_size / units_per_sg_read; - - kd.unit_byte_size = unit_byte_size; - kd.chunk_type = chunk_type; - kd.chunk_byte_size = chunk_byte_size; - kd.units_per_chunk = units_per_chunk; - kd.bytes_per_sg_read = sub_group_size * chunk_byte_size; - kd.units_per_sg_read = units_per_sg_read; - kd.rg_count = (uint32_t)rg_count; - kd.last_rg_size = response_size % units_per_sg_read; - return kd; + dispatchData.gws[0] = rg_count * sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.gws[1] = batch_size / units_per_sg_read; + + dispatchData.unit_byte_size = unit_byte_size; + dispatchData.chunk_type = chunk_type; + dispatchData.chunk_byte_size = chunk_byte_size; + dispatchData.units_per_chunk = units_per_chunk; + dispatchData.bytes_per_sg_read = sub_group_size * chunk_byte_size; + dispatchData.units_per_sg_read = units_per_sg_read; + dispatchData.rg_count = (uint32_t)rg_count; + dispatchData.last_rg_size = response_size % units_per_sg_read; + return dispatchData; } JitConstants FullyConnected_fb_io_block::GetJitConstants(const fully_connected_params& params, - const FullyConnectedKernelBase::DispatchData& run_info) const { - auto cldnn_jit = FullyConnectedKernelBase::GetJitConstants(params, run_info); + const FullyConnectedKernelBase::DispatchData& dispatchData) const { + auto cldnn_jit = FullyConnectedKernelBase::GetJitConstants(params, dispatchData); cldnn_jit.AddConstants({ - MakeJitConstant("SUB_GROUP_SIZE", run_info.lws0), - MakeJitConstant("WORK_ITEMS_PER_BATCH", run_info.gws1), - MakeJitConstant("UNIT_BYTE_SIZE", run_info.unit_byte_size), - MakeJitConstant("CHUNK_TYPE", run_info.chunk_type), - MakeJitConstant("CHUNK_BYTE_SIZE", run_info.chunk_byte_size), - MakeJitConstant("UNITS_PER_CHUNK", run_info.units_per_chunk), - MakeJitConstant("BYTES_PER_SG_READ", run_info.bytes_per_sg_read), - MakeJitConstant("UNITS_PER_SG_READ", run_info.units_per_sg_read), - MakeJitConstant("RG_COUNT", run_info.rg_count), - MakeJitConstant("LAST_RG_SIZE", run_info.last_rg_size), + MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[0]), + MakeJitConstant("WORK_ITEMS_PER_BATCH", dispatchData.gws[1]), + MakeJitConstant("UNIT_BYTE_SIZE", dispatchData.unit_byte_size), + MakeJitConstant("CHUNK_TYPE", dispatchData.chunk_type), + MakeJitConstant("CHUNK_BYTE_SIZE", dispatchData.chunk_byte_size), + MakeJitConstant("UNITS_PER_CHUNK", dispatchData.units_per_chunk), + MakeJitConstant("BYTES_PER_SG_READ", dispatchData.bytes_per_sg_read), + MakeJitConstant("UNITS_PER_SG_READ", dispatchData.units_per_sg_read), + MakeJitConstant("RG_COUNT", dispatchData.rg_count), + MakeJitConstant("LAST_RG_SIZE", dispatchData.last_rg_size), }); return cldnn_jit; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.h index 550c9ee96dd2a1..8545ae5476554c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_block.h @@ -29,7 +29,7 @@ class FullyConnected_fb_io_block : public FullyConnectedKernelBase { protected: bool Validate(const Params& p, const optional_params& o) const override; JitConstants GetJitConstants(const fully_connected_params& params, - const FullyConnectedKernelBase::DispatchData& kd) const override; + const FullyConnectedKernelBase::DispatchData& dispatchData) const override; DispatchData SetDefault(const fully_connected_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.cpp index f769fde1a67a18..ed8aa49adb65e3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.cpp @@ -32,8 +32,8 @@ ParamsKey FullyConnected_fb_io_ref::GetSupportedKey() const { return k; } -JitConstants FullyConnected_fb_io_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnected_fb_io_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.h index ee844a7c3e1d2e..2a45ee3964599f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_io_ref.h @@ -31,6 +31,6 @@ class FullyConnected_fb_io_ref : public FullyConnectedKernelBase { std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_b8_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_b8_ref.cpp index e2254ce86122e7..64cd0d825073af 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_b8_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_b8_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,15 +33,15 @@ ParamsKey FullyConnected_fb_oi_b8_ref::GetSupportedKey() const { FullyConnected_fb_oi_b8_ref::DispatchData FullyConnected_fb_oi_b8_ref::SetDefault(const fully_connected_params& arg, int) const { - auto kd = FullyConnectedKernelBase::SetDefault(arg); + auto dispatchData = FullyConnectedKernelBase::SetDefault(arg); const auto& output = arg.output; - kd.gws0 = output.Batch().v; - kd.gws1 = output.LogicalSize() / kd.gws0; - kd.lws0 = 8; - kd.lws1 = 1; + dispatchData.gws[0] = output.Batch().v; + dispatchData.gws[1] = output.LogicalSize() / dispatchData.gws[0]; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = 1; - return kd; + return dispatchData; } bool FullyConnected_fb_oi_b8_ref::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.cpp index bcfedd61729a74..037e5368840fdc 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.cpp @@ -33,8 +33,8 @@ ParamsKey FullyConnected_fb_oi_ref::GetSupportedKey() const { } -JitConstants FullyConnected_fb_oi_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnected_fb_oi_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.h index 1461a23da6af6b..3780103f9d0c50 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fb_oi_ref.h @@ -31,6 +31,6 @@ class FullyConnected_fb_oi_ref : public FullyConnectedKernelBase { std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.cpp index 718992c0edc187..6fba66afa6c95c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.cpp @@ -44,28 +44,28 @@ ParamsKey FullyConnected_fs_byx_fsv32::GetSupportedKey() const { FullyConnected_fs_byx_fsv32::Parent::DispatchData FullyConnected_fs_byx_fsv32::SetDefault( const fully_connected_params& params, int autoTuneIndex) const { - auto runInfo = Parent::SetDefault(params, autoTuneIndex); + auto dispatchData = Parent::SetDefault(params, autoTuneIndex); auto blockSizeB = std::min(outputBlockSizeB, params.output.Batch().v); auto blockNumB = CeilDiv(params.output.Batch().v, blockSizeB); auto wgHeight = std::min(preferredWGHeight, blockNumB); - runInfo.gws0 = CeilDiv(params.output.Feature().v, outputBlockSizeF); - runInfo.gws1 = RoundUp(blockNumB, wgHeight); - runInfo.gws2 = subGroupSize; + dispatchData.gws[0] = CeilDiv(params.output.Feature().v, outputBlockSizeF); + dispatchData.gws[1] = RoundUp(blockNumB, wgHeight); + dispatchData.gws[2] = subGroupSize; - runInfo.lws0 = 1; - runInfo.lws1 = wgHeight; - runInfo.lws2 = subGroupSize; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = wgHeight; + dispatchData.lws[2] = subGroupSize; - runInfo.efficiency = FORCE_PRIORITY_5; + dispatchData.efficiency = FORCE_PRIORITY_5; - return runInfo; + return dispatchData; } JitConstants FullyConnected_fs_byx_fsv32::GetJitConstants(const fully_connected_params& params, - const DispatchData& kd) const { - auto jit = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); auto blockSizeB = std::min(outputBlockSizeB, params.output.Batch().v); auto blockNumB = CeilDiv(params.output.Batch().v, blockSizeB); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.h index 350d800d5848ae..77511e95f1ae2e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_fs_byx_fsv32.h @@ -28,6 +28,6 @@ class FullyConnected_fs_byx_fsv32 : public FullyConnectedKernelBase { protected: ParamsKey GetSupportedKey() const override; DispatchData SetDefault(const fully_connected_params& params, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.cpp index ded8ebbe34fbdc..753916c1f9fbf6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.cpp @@ -55,17 +55,17 @@ FullyConnectedKernelIMAD::Parent::DispatchData FullyConnectedKernelIMAD::SetDefa int) const { const int simdSize = 16; - auto runInfo = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); - runInfo.gws0 = RoundUp(params.output.Feature().v, simdSize); - runInfo.gws1 = params.output.Batch().v; - runInfo.gws2 = 1; + dispatchData.gws[0] = RoundUp(params.output.Feature().v, simdSize); + dispatchData.gws[1] = params.output.Batch().v; + dispatchData.gws[2] = 1; - runInfo.lws0 = simdSize; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = simdSize; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } // SetDefault bool FullyConnectedKernelIMAD::Validate(const Params& params, const optional_params& options) const { @@ -95,8 +95,8 @@ bool FullyConnectedKernelIMAD::Validate(const Params& params, const optional_par return true; } // Validate -JitConstants FullyConnectedKernelIMAD::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - auto jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnectedKernelIMAD::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetActivationType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.h index 718ecc80e0cd45..f0de0beb469b74 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_imad.h @@ -31,7 +31,7 @@ class FullyConnectedKernelIMAD : public FullyConnectedKernelBase { protected: bool Validate(const Params& params, const optional_params& options) const override; DispatchData SetDefault(const fully_connected_params& params, int autoTuneIndex = -1) const override; - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, FusedOpType::SCALE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.cpp index b560f6e0503c1e..8b2e9f7c3ada6f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.cpp @@ -82,28 +82,20 @@ FullyConnectedKernelMMAD::FullyConnectedTuningData FullyConnectedKernelMMAD::Set FullyConnectedKernelMMAD::DispatchData FullyConnectedKernelMMAD::SetDefault(const fully_connected_params& params, int) const { FullyConnectedTuningData tuning_data = SetTuningParams(params); - auto runInfo = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); const auto& output = params.output; - std::vector global = { Align(output.Feature().v, tuning_data.sub_group_size) * tuning_data.slm_div_factor, output.Batch().v, 1 }; - std::vector local = { tuning_data.work_group_size, 1, 1 }; + dispatchData.gws = { Align(output.Feature().v, tuning_data.sub_group_size) * tuning_data.slm_div_factor, output.Batch().v, 1 }; + dispatchData.lws = { tuning_data.work_group_size, 1, 1 }; - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants FullyConnectedKernelMMAD::GetJitConstants(const fully_connected_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { FullyConnectedTuningData tuning_data = SetTuningParams(params); - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); auto& input = params.inputs[0]; auto& weights = params.weights; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.h index 704b29173006a5..af7cb336e9abc9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_mmad.h @@ -36,7 +36,7 @@ class FullyConnectedKernelMMAD : public FullyConnectedKernelBase { }; protected: - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const fully_connected_params& params, int autoTuneIndex = -1) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.cpp index b5d84af1d16537..49057ae4637a22 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.cpp @@ -34,8 +34,8 @@ ParamsKey FullyConnected_yxfb_ref::GetSupportedKey() const { return k; } -JitConstants FullyConnected_yxfb_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants FullyConnected_yxfb_ref::GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = GetUnitType(params); FusedOpsConfiguration conf = { "", {"b", "f", "y", "x"}, "result", input_dt, 1 }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.h index 60af787251aebe..dcab3ba0540a46 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fully_connected/fully_connected_kernel_yxfb_ref.h @@ -32,6 +32,6 @@ class FullyConnected_yxfb_ref : public FullyConnectedKernelBase { std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fully_connected_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.cpp index 515e2b26264b25..be3f08aa9694cb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.cpp @@ -109,7 +109,7 @@ bool fused_conv_eltwise_kernel_base::Validate(const Params& p, const optional_pa } JitConstants fused_conv_eltwise_kernel_base::GetJitConstants(const fused_conv_eltwise_params& params, - const DispatchData& kd) const { + const DispatchData& dispatchData) const { JitConstants mem_consts = WeightBiasKernelBase::GetJitConstants(params); const auto& padding = params.conv.padding; const auto& input = params.inputs[0]; @@ -151,12 +151,12 @@ JitConstants fused_conv_eltwise_kernel_base::GetJitConstants(const fused_conv_el std::vector unrollLoopParams{params.conv.filterSize.x, params.conv.filterSize.y, params.conv.filterSize.z, - (uint32_t)kd.gemmStyle.globalWorkSizeDX, - (uint32_t)kd.gemmStyle.globalWorkSizeDY, - (uint32_t)kd.gemmStyle.globalWorkSizeDZ, - (uint32_t)kd.gemmStyle.subBlockDimM, - (uint32_t)kd.gemmStyle.subBlockDimK, - (uint32_t)kd.gemmStyle.subBlockDimN}; + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDX, + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDY, + (uint32_t)dispatchData.gemmStyle.globalWorkSizeDZ, + (uint32_t)dispatchData.gemmStyle.subBlockDimM, + (uint32_t)dispatchData.gemmStyle.subBlockDimK, + (uint32_t)dispatchData.gemmStyle.subBlockDimN}; auto loopCount = *std::max_element(unrollLoopParams.begin(), unrollLoopParams.end()); @@ -166,13 +166,15 @@ JitConstants fused_conv_eltwise_kernel_base::GetJitConstants(const fused_conv_el return mem_consts; } -bool fused_conv_eltwise_kernel_base::CheckWorkGroups(const fused_conv_eltwise_kernel_base::DispatchData& kd) { - if (kd.gws0 == 0 || kd.gws1 == 0 || kd.gws2 == 0 || kd.lws0 == 0 || kd.lws1 == 0 || kd.lws2 == 0) { +bool fused_conv_eltwise_kernel_base::CheckWorkGroups(const fused_conv_eltwise_kernel_base::DispatchData& dispatchData) { + if (dispatchData.gws.size() != 3 || dispatchData.lws.size() != 3) return false; - } - if ((kd.gws0 % kd.lws0) != 0 || (kd.gws1 % kd.lws1) != 0 || (kd.gws2 % kd.lws2) != 0) { - return false; + for (size_t i = 0; i < dispatchData.gws.size(); i++) { + if (dispatchData.gws[i] == 0 || dispatchData.lws[i] == 0) + return false; + if ((dispatchData.gws[i] % dispatchData.lws[i]) != 0) + return false; } return true; @@ -216,43 +218,34 @@ bool fused_conv_eltwise_kernel_base::CheckPitchForSplitOnly(const fused_conv_elt fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_base::SetDefault( const fused_conv_eltwise_params& params, int) const { - DispatchData kd; + DispatchData dispatchData; const auto& out = params.output; - kd.fp16UnitUsed = out.GetDType() == Datatype::F16; - std::vector global; + if (params.output.GetLayout() == DataLayout::bfyx || params.output.GetLayout() == DataLayout::byxf || params.output.GetLayout() == DataLayout::bfzyx || params.output.GetLayout() == DataLayout::b_fs_zyx_fsv16 || params.output.GetLayout() == DataLayout::bs_fs_zyx_bsv16_fsv16) { - global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.gws = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; } else { - global = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v * out.Z().v }; + dispatchData.gws = {out.Feature().v * out.Batch().v, out.X().v, out.Y().v * out.Z().v }; } - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - kd.cldnnStyle.blockWidth = 1; - kd.cldnnStyle.blockHeight = 1; - kd.cldnnStyle.prefetch = 0; - kd.cldnnStyle.inputBlockArraySize = 0; - kd.cldnnStyle.inputBlockWidth = 0; - - kd.gemmStyle.globalWorkSizeDX = 1; - kd.gemmStyle.globalWorkSizeDY = 1; - kd.gemmStyle.globalWorkSizeDZ = 1; - kd.gemmStyle.subBlockDimK = 1; - kd.gemmStyle.subBlockDimM = 0; - kd.gemmStyle.subBlockDimN = 0; - kd.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - return kd; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + + dispatchData.cldnnStyle.blockWidth = 1; + dispatchData.cldnnStyle.blockHeight = 1; + dispatchData.cldnnStyle.prefetch = 0; + dispatchData.cldnnStyle.inputBlockArraySize = 0; + dispatchData.cldnnStyle.inputBlockWidth = 0; + + dispatchData.gemmStyle.globalWorkSizeDX = 1; + dispatchData.gemmStyle.globalWorkSizeDY = 1; + dispatchData.gemmStyle.globalWorkSizeDZ = 1; + dispatchData.gemmStyle.subBlockDimK = 1; + dispatchData.gemmStyle.subBlockDimM = 0; + dispatchData.gemmStyle.subBlockDimN = 0; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + return dispatchData; } KernelsData fused_conv_eltwise_kernel_base::GetCommonKernelsData(const Params& params, @@ -269,9 +262,9 @@ KernelsData fused_conv_eltwise_kernel_base::GetCommonKernelsData(const Params& p if (NeedPaddedInput()) { kd.reorderInput = CovolutionUpdateInputParams(newParams); } - DispatchData runInfo = SetDefault(newParams, autoTuneIndex); + DispatchData dispatchData = SetDefault(newParams, autoTuneIndex); - if (!CheckWorkGroups(runInfo)) { + if (!CheckWorkGroups(dispatchData)) { // Internal Error - wrong calculation of global/local work group sizes return {}; } @@ -287,13 +280,13 @@ KernelsData fused_conv_eltwise_kernel_base::GetCommonKernelsData(const Params& p } auto finalKernelName = GetKernelName(newParams); - auto cldnnJit = GetJitConstants(newParams, runInfo); + auto cldnnJit = GetJitConstants(newParams, dispatchData); auto entryPoint = GetEntryPoint(finalKernelName, newParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnnJit, entryPoint); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, finalKernelName, jit, @@ -310,7 +303,7 @@ KernelsData fused_conv_eltwise_kernel_base::GetCommonKernelsData(const Params& p kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); } - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; kd.autoTuneIndex = autoTuneIndex; return {kd}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.h index 4d1d1aa98560d9..9e691831142456 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_base.h @@ -111,7 +111,7 @@ class fused_conv_eltwise_kernel_base : public WeightBiasKernelBase { virtual std::string GetKernelName(const fused_conv_eltwise_params&) const { return kernelName; } virtual bool NeedPaddedInput() const { return false; } bool Validate(const Params& p, const optional_params& o) const override; - virtual JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const; + virtual JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& dispatchData) const; virtual DispatchData SetDefault(const fused_conv_eltwise_params& params, int autoTuneIndex = -1) const; static bool CheckWorkGroups(const DispatchData&); static bool CheckPitchForSplitOnly(const fused_conv_eltwise_params& params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.cpp index de8ea67b25adfc..894cdf0ea5e740 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.cpp @@ -132,29 +132,29 @@ WeightsLayout fused_conv_eltwise_kernel_bfyx_1x1_opt::GetPreferreddWeightsLayout fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_bfyx_1x1_opt::SetDefault( const fused_conv_eltwise_params& arg, int) const { - DispatchData runInfo = Parent::SetDefault(arg); + DispatchData dispatchData = Parent::SetDefault(arg); constexpr size_t sub_group_size = 8; - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; auto block = get_out_block_size(arg); - runInfo.gws0 = arg.output.X().v / block.out_width; - runInfo.gws1 = arg.output.Y().v / block.out_height; - runInfo.gws2 = 2 * (arg.output.Feature().v * arg.output.Batch().v) / - block.out_depth; // process 8 output channels per Workitem + dispatchData.gws[0] = arg.output.X().v / block.out_width; + dispatchData.gws[1] = arg.output.Y().v / block.out_height; + dispatchData.gws[2] = 2 * (arg.output.Feature().v * arg.output.Batch().v) / + block.out_depth; // process 8 output channels per Workitem - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 2 * sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 2 * sub_group_size; - return runInfo; + return dispatchData; } JitConstants fused_conv_eltwise_kernel_bfyx_1x1_opt::GetJitConstants(const fused_conv_eltwise_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); auto block = get_out_block_size(params); jit.AddConstant(MakeJitConstant("OUT_BLOCK_WIDTH", block.out_width)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.h index 3f77a726911bbc..4ad16b6468cd5b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_1x1_opt.h @@ -35,8 +35,8 @@ class fused_conv_eltwise_kernel_bfyx_1x1_opt : public fused_conv_eltwise_kernel_ WeightsLayout GetPreferreddWeightsLayout(const fused_conv_eltwise_params &) const override; std::string GetKernelName(const fused_conv_eltwise_params& params) const override; bool NeedPaddedInput() const override { return true; } - JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; DispatchData SetDefault(const fused_conv_eltwise_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.cpp index 6f91ce21fd1c65..991f2092667f71 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.cpp @@ -54,19 +54,19 @@ ParamsKey fused_conv_eltwise_kernel_bfyx_iyxo::GetSupportedKey() const { fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_bfyx_iyxo::SetDefault( const fused_conv_eltwise_params& cp, int) const { - DispatchData runInfo = fused_conv_eltwise_kernel_base::SetDefault(cp); + DispatchData dispatchData = fused_conv_eltwise_kernel_base::SetDefault(cp); - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; - runInfo.gws0 = CeilDiv(cp.output.X().v, sub_group_size) / 4 / 2; - runInfo.gws1 = cp.output.Y().v / 2; - runInfo.gws2 = sub_group_size; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, sub_group_size) / 4 / 2; + dispatchData.gws[1] = cp.output.Y().v / 2; + dispatchData.gws[2] = sub_group_size; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = sub_group_size; - return runInfo; + return dispatchData; } bool fused_conv_eltwise_kernel_bfyx_iyxo::Validate(const Params& p, const optional_params& o) const { @@ -82,9 +82,9 @@ bool fused_conv_eltwise_kernel_bfyx_iyxo::Validate(const Params& p, const option } JitConstants fused_conv_eltwise_kernel_bfyx_iyxo::GetJitConstants(const fused_conv_eltwise_params& params, - const DispatchData& runInfo) const { - auto jit = Parent::GetJitConstants(params, runInfo); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2)); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); return jit; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.h index 965a863f6c94fc..1dddc41bd652ac 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_iyxo.h @@ -34,7 +34,7 @@ class fused_conv_eltwise_kernel_bfyx_iyxo : public fused_conv_eltwise_kernel_bas WeightsLayout GetPreferreddWeightsLayout(const fused_conv_eltwise_params&) const override { return WeightsLayout::iyxo; } - JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const fused_conv_eltwise_params& arg, int autoTuneIndex = -1) const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.cpp index 3f4582e54e671d..7b34ea54ba6fce 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.cpp @@ -148,7 +148,6 @@ fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::GetAutoTuneOptions(const Params& p, option.blockWidth = 4; option.blockHeight = 3; option.prefetch = 5; - // run_info.efficiency = FORCE_PRIORITY_7; // GEMM is better } // if this is not 1x1 batch1 case then shrink filters, other way we're memory bound and it's best to use 16x1 block @@ -162,38 +161,38 @@ fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::GetAutoTuneOptions(const Params& p, fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::SetDefault( const fused_conv_eltwise_params& cp, int autoTuneIndex) const { - DispatchData runInfo = fused_conv_eltwise_kernel_base::SetDefault(cp); + DispatchData dispatchData = fused_conv_eltwise_kernel_base::SetDefault(cp); const auto of_maps = cp.output.Feature().v; const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size); - runInfo.efficiency = FORCE_PRIORITY_3; + dispatchData.efficiency = FORCE_PRIORITY_3; auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex); - runInfo.cldnnStyle.blockWidth = tuneOptions.blockWidth; - runInfo.cldnnStyle.blockHeight = tuneOptions.blockHeight; - runInfo.cldnnStyle.prefetch = tuneOptions.prefetch; + dispatchData.cldnnStyle.blockWidth = tuneOptions.blockWidth; + dispatchData.cldnnStyle.blockHeight = tuneOptions.blockHeight; + dispatchData.cldnnStyle.prefetch = tuneOptions.prefetch; - auto input_block_dims = get_bfyx_req_input_block_dims(runInfo.cldnnStyle.blockWidth, - runInfo.cldnnStyle.blockHeight, + auto input_block_dims = get_bfyx_req_input_block_dims(dispatchData.cldnnStyle.blockWidth, + dispatchData.cldnnStyle.blockHeight, cp.conv.filterSize, cp.conv.stride, cp.conv.dilation, sub_group_size, - runInfo.fp16UnitUsed ? sub_group_size : sub_group_size / 2, + cp.output.GetDType() == Datatype::F16 ? sub_group_size : sub_group_size / 2, sub_group_size); - runInfo.cldnnStyle.inputBlockArraySize = input_block_dims.first; - runInfo.cldnnStyle.inputBlockWidth = input_block_dims.second; + dispatchData.cldnnStyle.inputBlockArraySize = input_block_dims.first; + dispatchData.cldnnStyle.inputBlockWidth = input_block_dims.second; - runInfo.gws0 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth); - runInfo.gws1 = CeilDiv(cp.output.Y().v, runInfo.cldnnStyle.blockHeight); - runInfo.gws2 = of_threads_per_batch * cp.output.Batch().v; + dispatchData.gws[0] = CeilDiv(cp.output.X().v, dispatchData.cldnnStyle.blockWidth); + dispatchData.gws[1] = CeilDiv(cp.output.Y().v, dispatchData.cldnnStyle.blockHeight); + dispatchData.gws[2] = of_threads_per_batch * cp.output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = sub_group_size; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = sub_group_size; - return runInfo; + return dispatchData; } bool fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::Validate(const Params& p, const optional_params& o) const { @@ -205,19 +204,19 @@ bool fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::Validate(const Params& p, cons } JitConstants fused_conv_eltwise_kernel_bfyx_os_iyx_osv16::GetJitConstants(const fused_conv_eltwise_params& params, - const DispatchData& runInfo) const { + const DispatchData& dispatchData) const { const auto of_maps = params.output.Feature().v; const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size); size_t leftovers = of_threads_per_batch - of_maps; - auto jit = Parent::GetJitConstants(params, runInfo); + auto jit = Parent::GetJitConstants(params, dispatchData); - jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", runInfo.cldnnStyle.blockWidth)); - jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", runInfo.cldnnStyle.blockHeight)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", runInfo.cldnnStyle.inputBlockArraySize)); - jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", runInfo.cldnnStyle.inputBlockWidth)); - jit.AddConstant(MakeJitConstant("PREFETCH", runInfo.cldnnStyle.prefetch)); + jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", dispatchData.lws[2])); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_WIDTH", dispatchData.cldnnStyle.blockWidth)); + jit.AddConstant(MakeJitConstant("OUTPUT_BLOCK_HEIGHT", dispatchData.cldnnStyle.blockHeight)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_ARRAY_SIZE", dispatchData.cldnnStyle.inputBlockArraySize)); + jit.AddConstant(MakeJitConstant("IN_BLOCK_WIDTH", dispatchData.cldnnStyle.inputBlockWidth)); + jit.AddConstant(MakeJitConstant("PREFETCH", dispatchData.cldnnStyle.prefetch)); if (leftovers) { jit.AddConstant(MakeJitConstant("LEFTOVERS", leftovers)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.h index 3bda6e12632f10..f4179f37665e8c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_bfyx_os_iyx_osv16.h @@ -33,7 +33,7 @@ class fused_conv_eltwise_kernel_bfyx_os_iyx_osv16 : public fused_conv_eltwise_ke protected: WeightsLayout GetPreferreddWeightsLayout(const fused_conv_eltwise_params &) const override; - JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& dispatchData) const override; bool Validate(const Params& p, const optional_params& o) const override; bool NeedPaddedInput() const override { return true; } DispatchData SetDefault(const fused_conv_eltwise_params& arg, int autoTuneIndex = -1) const override; @@ -50,4 +50,4 @@ class fused_conv_eltwise_kernel_bfyx_os_iyx_osv16 : public fused_conv_eltwise_ke std::vector autoTuneOptions = {}; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.cpp index 056f4398d92318..51a1b75547ae7a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.cpp @@ -76,7 +76,7 @@ size_t GetOfmPerWorkitem(Datatype dataType) { fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_yxfb_yxio_b16::SetDefault( const fused_conv_eltwise_params& arg, int) const { - DispatchData runInfo = fused_conv_eltwise_kernel_base::SetDefault(arg); + DispatchData dispatchData = fused_conv_eltwise_kernel_base::SetDefault(arg); const auto filter_ofm_num = arg.weights.OFM().v; const auto batch_size = arg.output.Batch().v; @@ -86,15 +86,15 @@ fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_yxfb_yxio const size_t ofmPerWorkItem = GetOfmPerWorkitem(arg.inputs[0].GetDType()); if (arg.inputs[0].GetDType() == Datatype::F16) { - runInfo.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; } else { - runInfo.efficiency = FORCE_PRIORITY_9; + dispatchData.efficiency = FORCE_PRIORITY_9; } - runInfo.lws0 = min_lws; - runInfo.gws0 = filter_ofm_num * batch_size / (ofmPerWorkItem * batchesPerWorkItem); + dispatchData.lws[0] = min_lws; + dispatchData.gws[0] = filter_ofm_num * batch_size / (ofmPerWorkItem * batchesPerWorkItem); - return runInfo; + return dispatchData; } bool fused_conv_eltwise_kernel_yxfb_yxio_b16::Validate(const Params& p, const optional_params& o) const { @@ -138,10 +138,10 @@ bool fused_conv_eltwise_kernel_yxfb_yxio_b16::Validate(const Params& p, const op } JitConstants fused_conv_eltwise_kernel_yxfb_yxio_b16::GetJitConstants(const fused_conv_eltwise_params& params, - const DispatchData& kd) const { - auto jit = Parent::GetJitConstants(params, kd); + const DispatchData& dispatchData) const { + auto jit = Parent::GetJitConstants(params, dispatchData); - const auto local_work_group_size = kd.lws0; + const auto local_work_group_size = dispatchData.lws[0]; const auto batch_size = params.output.Batch().v; if (params.inputs[0].GetDType() == Datatype::F32) { @@ -166,7 +166,7 @@ JitConstants fused_conv_eltwise_kernel_yxfb_yxio_b16::GetJitConstants(const fuse const size_t ofmPerWorkItem = GetOfmPerWorkitem(params.inputs[0].GetDType()); jit.AddConstants({ - MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0), + MakeJitConstant("LOCAL_WORK_GROUP_SIZE", dispatchData.lws[0]), MakeJitConstant("OFM_PER_WORK_ITEM", ofmPerWorkItem), MakeJitConstant("BATCHES_PER_WORK_ITEM", batchesPerWorkItem), // how many batches will a single work item compute diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.h index 3a20b49482590d..2d9a509a73de2c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/fused_conv_eltwise/fused_conv_eltwise_kernel_yxfb_yxio_b16.h @@ -37,7 +37,7 @@ class fused_conv_eltwise_kernel_yxfb_yxio_b16 : public fused_conv_eltwise_kernel } std::string GetKernelName(const fused_conv_eltwise_params&) const override; bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const fused_conv_eltwise_params& arg, int autoTuneIndex = -1) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather/gather_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather/gather_kernel_ref.cpp index 78f248d306eab3..7cc7bb66fa933a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather/gather_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather/gather_kernel_ref.cpp @@ -101,7 +101,7 @@ static inline std::vector GetOrder(size_t size) { } else if (size == 6) { idx_order = {"b", "f", "w", "z", "y", "x"}; } - + return idx_order; } @@ -120,7 +120,7 @@ static std::string GetDictionaryIndexOrder(const gather_params& params, size_t a for (size_t i = dictionary_dims_num; i < idx_order.size(); i++) idx_order[i] = zeroVal; - + // Fix size to inputs[0] dims size for (size_t i = 0; i < params.output.GetDims().size() - params.inputs[0].GetDims().size(); i++) idx_order.pop_back(); @@ -152,33 +152,20 @@ static std::string GetIndecesIdxOrder(const gather_params& params, size_t axis) } CommonDispatchData GatherKernelRef::SetDefault(const gather_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; const auto& output = params.output; - std::vector global; - std::vector local; - if (output.GetLayout() == DataLayout::bfyx) { - global = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; } else if (output.GetLayout() == DataLayout::bfzyx) { - global = {output.X().v, output.Y().v * output.Z().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v, output.Y().v * output.Z().v, output.Feature().v * output.Batch().v}; } else { - global = {output.X().v * output.Y().v, output.Z().v * output.W().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v * output.Y().v, output.Z().v * output.W().v, output.Feature().v * output.Batch().v}; } - local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return runInfo; + return dispatchData; } JitConstants GatherKernelRef::GetJitConstants(const gather_params& params) const { @@ -220,14 +207,14 @@ KernelsData GatherKernelRef::GetKernelsData(const Params& params, const optional KernelData kd = KernelData::Default(params); gather_params& newParams = *static_cast(kd.params.get()); - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, "", false, false, 2, GetFusedPrimitiveInputsCount(params)); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, 2, GetFusedPrimitiveInputsCount(params)); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather_tree/gather_tree_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather_tree/gather_tree_kernel_base.cpp index 1042910b656448..4f3a2fc278bb4c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather_tree/gather_tree_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gather_tree/gather_tree_kernel_base.cpp @@ -23,49 +23,40 @@ JitConstants GatherTreeKernelBase::GetJitConstants(const gather_tree_params & pa } GatherTreeKernelBase::DispatchData GatherTreeKernelBase::SetDefault(const gather_tree_params & params) const { - std::vector global{ - params.output.Y().v, // beam - params.output.Feature().v, // batch - 1 - }; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + DispatchData dispatchData; /* b -> time f -> batch y -> beam */ - DispatchData data; - data.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - data.gws0 = global[0]; - data.gws1 = global[1]; - data.gws2 = global[2]; - data.lws0 = local[0]; - data.lws1 = local[1]; - data.lws2 = local[2]; - return data; + dispatchData.gws = { params.output.Y().v, // beam + params.output.Feature().v, // batch + 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + return dispatchData; } KernelsData GatherTreeKernelBase::GetCommonKernelsData(const Params& params, - const optional_params& options, - float estimated_time) const { + const optional_params& options, + float estimated_time) const { assert(params.GetType() == KernelType::GATHER_TREE); const auto& gt_params = static_cast(params); - auto run_info = SetDefault(gt_params); + auto dispatchData = SetDefault(gt_params); auto kernel_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(gt_params); auto entry_point = GetEntryPoint(kernelName, gt_params.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); FillCLKernelData(kernel_data.kernels[0], - run_info, - params.engineInfo, - kernelName, - jit, - entry_point, - DEFAULT, - false, - false, - static_cast(gt_params.inputs.size())); + dispatchData, + params.engineInfo, + kernelName, + jit, + entry_point, + DEFAULT, + false, + false, + static_cast(gt_params.inputs.size())); kernel_data.estimatedTime = estimated_time; return { kernel_data }; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.cpp index 249e47f000b550..e8877636cc2ae6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,24 +36,13 @@ JitConstants GemmKernelBase::GetJitConstants(const gemm_params& params) const { GemmKernelBase::DispatchData GemmKernelBase::SetDefault(const gemm_params& params) const { const auto& output = params.output; - DispatchData kd; - - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + DispatchData dispatchData; auto total_batches = output.LogicalSize() / (output.X().v * output.Y().v); - std::vector global = { output.X().v, output.Y().v, total_batches }; - - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.gws = { output.X().v, output.Y().v, total_batches }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return kd; + return dispatchData; } KernelsData GemmKernelBase::GetCommonKernelsData(const Params& params, @@ -65,7 +54,7 @@ KernelsData GemmKernelBase::GetCommonKernelsData(const Params& params, const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -74,7 +63,7 @@ KernelsData GemmKernelBase::GetCommonKernelsData(const Params& params, auto& kernel = k_data.kernels[0]; FillCLKernelData(kernel, - run_info, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.h index d30d45417c9095..5df5bb03c1652f 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_base.h @@ -60,7 +60,7 @@ class GemmKernelBase : public common_kernel_base { virtual DispatchData SetDefault(const gemm_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimated_time) const; // Fused ops - virtual JitConstants GetFusedPrimitivesJitConstants(const gemm_params& params, const DispatchData& kd) const; + virtual JitConstants GetFusedPrimitivesJitConstants(const gemm_params& params, const DispatchData& dispatchData) const; Datatype GetActivationType(const gemm_params& params) const; // --Fused ops diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8.cpp index df5534a047868c..537825dc838f97 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8.cpp @@ -75,24 +75,15 @@ GemmKernelBase::DispatchData GemmKernelMMADint8::SetDefault(const gemm_params& p const auto& output = params.output; auto total_batches = output.LogicalSize() / (output.X().v * output.Y().v); - DispatchData kd; + DispatchData dispatchData; GemmTuningData td = SetTuningParams(params); - std::vector global = { Align(output.X().v, td.simd_size), - Align(output.Y().v, td.simd_size * td.tile_num) / (td.simd_size * td.tile_num), - total_batches }; + dispatchData.gws = { Align(output.X().v, td.simd_size), + Align(output.Y().v, td.simd_size * td.tile_num) / (td.simd_size * td.tile_num), + total_batches }; + dispatchData.lws = { td.simd_size, 1, 1 }; - std::vector local = { td.simd_size, 1, 1 }; - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } GemmKernelMMADint8::GemmTuningData GemmKernelMMADint8::InitGemmTuningData(const gemm_params& params) const { @@ -154,7 +145,7 @@ KernelsData GemmKernelMMADint8::GetKernelsData(const Params& params, const optio const auto& prim_params = static_cast(params); - auto run_info = GemmKernelMMADint8::SetDefault(prim_params); + auto dispatchData = GemmKernelMMADint8::SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -163,7 +154,7 @@ KernelsData GemmKernelMMADint8::GetKernelsData(const Params& params, const optio auto& kernel = k_data.kernels[0]; FillCLKernelData(kernel, - run_info, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8_slm.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8_slm.cpp index 0b1f3074cd84d1..94d25bf4f4a3d7 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8_slm.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_mmad_int8_slm.cpp @@ -72,21 +72,13 @@ GemmKernelBase::DispatchData GemmKernelMMADslmInt8::SetDefault(const gemm_params const auto& output = params.output; auto total_batches = output.LogicalSize() / (output.X().v * output.Y().v); - DispatchData kd; + DispatchData dispatchData; GemmTuningData td = SetTuningParams(params); - std::vector global = { td.size_n / td.pack_size, output.Y().v / td.simd_size, total_batches }; - std::vector local = { td.slm_tile_size / td.pack_size, td.slm_tile_size / td.simd_size, 1 }; + dispatchData.gws = { td.size_n / td.pack_size, output.Y().v / td.simd_size, total_batches }; + dispatchData.lws = { td.slm_tile_size / td.pack_size, td.slm_tile_size / td.simd_size, 1 }; - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } GemmKernelMMADslmInt8::GemmTuningData GemmKernelMMADslmInt8::InitGemmTuningData(const gemm_params& params) const { @@ -123,7 +115,7 @@ KernelsData GemmKernelMMADslmInt8::GetKernelsData(const Params& params, const op const auto& prim_params = static_cast(params); - auto run_info = GemmKernelMMADslmInt8::SetDefault(prim_params); + auto dispatchData = GemmKernelMMADslmInt8::SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -132,7 +124,7 @@ KernelsData GemmKernelMMADslmInt8::GetKernelsData(const Params& params, const op auto& kernel = k_data.kernels[0]; FillCLKernelData(kernel, - run_info, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_tiled_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_tiled_opt.cpp index 8ae66629a53230..8b722163794468 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_tiled_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/gemm/gemm_kernel_tiled_opt.cpp @@ -40,21 +40,21 @@ ParamsKey GemmKernelTiledOpt::GetSupportedKey() const { GemmKernelBase::DispatchData GemmKernelTiledOpt::SetDefault(const gemm_params& params) const { const auto& output = params.output; - DispatchData kd; + DispatchData dispatchData; GemmTuningData td = SetTuningParams(params); auto total_batches = output.LogicalSize() / (output.X().v * output.Y().v); std::vector global = { output.X().v, output.Y().v, total_batches }; - kd.gws0 = Align(global[0], td.tile_n_size) / (td.tile_n_size / td.simd_size); - kd.gws1 = Align(global[1], td.tile_m_size) / td.tile_m_size; - kd.gws2 = global[2]; + dispatchData.gws[0] = Align(global[0], td.tile_n_size) / (td.tile_n_size / td.simd_size); + dispatchData.gws[1] = Align(global[1], td.tile_m_size) / td.tile_m_size; + dispatchData.gws[2] = global[2]; - kd.lws0 = td.simd_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = td.simd_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } GemmKernelTiledOpt::GemmTuningData GemmKernelTiledOpt::SetTuningParams(const gemm_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.cpp index a63d841319f8e8..fec3194bb2e22d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.cpp @@ -28,21 +28,11 @@ JitConstants GRNKernelBase::GetJitConstants(const grn_params& params, GRNKernelB GRNKernelBase::DispatchData GRNKernelBase::SetDefault(const grn_params& params) const { const auto& output = params.output; - DispatchData kd; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + DispatchData dispatchData; + dispatchData.gws = { output.Batch().v, output.Y().v, output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - std::vector global = { output.Batch().v, output.Y().v, output.X().v }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData GRNKernelBase::GetCommonKernelsData(const Params& params, @@ -55,19 +45,17 @@ KernelsData GRNKernelBase::GetCommonKernelsData(const Params& params, const grn_params& orgParams = static_cast(params); - DispatchData runInfo; - - runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); - auto cldnn_jit = GetJitConstants(orgParams, runInfo); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData); auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.h index d960541828adbc..f17fca8bab5c9d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/grn/grn_kernel_base.h @@ -44,7 +44,7 @@ class GRNKernelBase : public common_kernel_base { using DispatchData = CommonDispatchData; protected: - virtual JitConstants GetJitConstants(const grn_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const grn_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const grn_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimated_time) const; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.cpp index 945524cdde0148..ce148e9539d727 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.cpp @@ -56,7 +56,7 @@ static unsigned int GetOfmPerSimd(const lrn_params& params) { } CommonDispatchData LRNKernelAcrossChannelMultipleFeatures::SetDefault(const lrn_params& params) const { - CommonDispatchData runInfo = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); const auto& input = params.inputs[0]; unsigned int ofm_per_simd = GetOfmPerSimd(params); @@ -65,24 +65,24 @@ CommonDispatchData LRNKernelAcrossChannelMultipleFeatures::SetDefault(const lrn_ const auto& out = params.output; const unsigned int alignment = out.X().v > 16 ? 32 : 16; - runInfo.gws0 = Align(out.X().v, alignment); - runInfo.gws1 = out.Y().v; - runInfo.gws2 = (out.Feature().v * out.Batch().v) / ofm_per_simd; + dispatchData.gws[0] = Align(out.X().v, alignment); + dispatchData.gws[1] = out.Y().v; + dispatchData.gws[2] = (out.Feature().v * out.Batch().v) / ofm_per_simd; - runInfo.lws0 = alignment; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = alignment; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } else if (input.GetLayout() == DataLayout::yxfb) { - runInfo.gws0 /= ofm_per_simd; - runInfo.lws0 = std::min(std::max(runInfo.gws0, static_cast(1)), static_cast(32)); - while (runInfo.gws0 % runInfo.lws0 != 0) { - --runInfo.lws0; + dispatchData.gws[0] /= ofm_per_simd; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } } - runInfo.efficiency = FORCE_PRIORITY_6; + dispatchData.efficiency = FORCE_PRIORITY_6; - return runInfo; + return dispatchData; } bool LRNKernelAcrossChannelMultipleFeatures::Validate(const Params& p, const optional_params& o) const { @@ -98,8 +98,8 @@ bool LRNKernelAcrossChannelMultipleFeatures::Validate(const Params& p, const opt return true; } -JitConstants LRNKernelAcrossChannelMultipleFeatures::GetJitConstants(const lrn_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants LRNKernelAcrossChannelMultipleFeatures::GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto& input = params.inputs[0]; const auto& input_dt = params.inputs[0].GetDType(); const auto& output = params.output; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.h index 395bc90c44074d..384a2e4c7e248b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.h @@ -35,6 +35,6 @@ class LRNKernelAcrossChannelMultipleFeatures : public LRNKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.cpp index 69fd39188b376f..1746dee0668c0b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.cpp @@ -38,32 +38,23 @@ ParamsKey LRNKernelAcrossChannelMultipleFeaturesFSV16::GetSupportedKey() const { } CommonDispatchData LRNKernelAcrossChannelMultipleFeaturesFSV16::SetDefault(const lrn_params& params) const { - CommonDispatchData runInfo = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); const auto& out = params.output; const unsigned int alignment = 16; - std::vector global = {Align(out.Feature().v, alignment), - out.X().v, - out.Y().v * out.Batch().v}; + dispatchData.gws = { Align(out.Feature().v, alignment), + out.X().v, + out.Y().v * out.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.efficiency = FORCE_PRIORITY_6; - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - runInfo.efficiency = FORCE_PRIORITY_6; - - return runInfo; + return dispatchData; } -JitConstants LRNKernelAcrossChannelMultipleFeaturesFSV16::GetJitConstants(const lrn_params& params, const DispatchData& kd) const { - JitConstants jit = LRNKernelBase::GetJitConstants(params, kd); +JitConstants LRNKernelAcrossChannelMultipleFeaturesFSV16::GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const { + JitConstants jit = LRNKernelBase::GetJitConstants(params, dispatchData); const auto& input_dt = params.inputs[0].GetDType(); if (!params.fused_ops.empty()) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.h index 782757726ad98c..397f4c1cac39d3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features_fsv16.h @@ -27,6 +27,6 @@ class LRNKernelAcrossChannelMultipleFeaturesFSV16 : public LRNKernelAcrossChanne private: DispatchData SetDefault(const lrn_params& params) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.cpp index a551c18855d3bd..1bc2623b4db3eb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.cpp @@ -36,12 +36,12 @@ ParamsKey LRNKernelAcrossChannel_b8::GetSupportedKey() const { } CommonDispatchData LRNKernelAcrossChannel_b8::SetDefault(const lrn_params& params) const { - CommonDispatchData run_info = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); - run_info.gws0 /= 8; - run_info.lws0 = 8; // gws0 is dividable by 64, so after correction it will be dividable by 8. + dispatchData.gws[0] /= 8; + dispatchData.lws[0] = 8; // gws[0] is dividable by 64, so after correction it will be dividable by 8. - return run_info; + return dispatchData; } bool LRNKernelAcrossChannel_b8::Validate(const Params& p, const optional_params& o) const { @@ -62,8 +62,8 @@ bool LRNKernelAcrossChannel_b8::Validate(const Params& p, const optional_params& return true; } -JitConstants LRNKernelAcrossChannel_b8::GetJitConstants(const lrn_params& params, const DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); +JitConstants LRNKernelAcrossChannel_b8::GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto& input_dt = params.inputs[0].GetDType(); jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", 8)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.h index 9c1e298f816816..c837a54e65b786 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.h @@ -36,6 +36,6 @@ class LRNKernelAcrossChannel_b8 : public LRNKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.cpp index 693b98a92d0da2..b4c1443897b413 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.cpp @@ -40,25 +40,25 @@ ParamsKey LRNKernelAcrossChannelRef::GetSupportedKey() const { } CommonDispatchData LRNKernelAcrossChannelRef::SetDefault(const lrn_params& params) const { - CommonDispatchData runInfo = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); if (params.inputs[0].GetLayout() == DataLayout::bfyx) { const auto& out = params.output; - runInfo.gws0 = Align(out.X().v, 32); - runInfo.gws1 = out.Y().v; - runInfo.gws2 = out.Feature().v * out.Batch().v; + dispatchData.gws[0] = Align(out.X().v, 32); + dispatchData.gws[1] = out.Y().v; + dispatchData.gws[2] = out.Feature().v * out.Batch().v; - runInfo.lws0 = 32; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - return runInfo; + return dispatchData; } JitConstants LRNKernelAcrossChannelRef::GetJitConstants(const lrn_params& params, - const LRNKernelBase::DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); + const LRNKernelBase::DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto& input_dt = params.inputs[0].GetDType(); if (!params.fused_ops.empty()) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.h index fd206c5fd4ed9e..e3832a51655105 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.h @@ -35,6 +35,6 @@ class LRNKernelAcrossChannelRef : public LRNKernelBase { FusedOpType::SCALE, FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.cpp index 8e444f9dead962..9f4fa16cdb1673 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ bool LRNKernelBase::Validate(const Params& p, const optional_params& o) const { return true; } -JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, const LRNKernelBase::DispatchData& kd) const { +JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, const LRNKernelBase::DispatchData& /*dispatchData*/) const { JitConstants mem_consts = MakeBaseParamsJitConstants(params); const auto padding = (params.localSize - 1) / 2; @@ -57,10 +57,10 @@ JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, const LRNK auto alpha_div_by_size_abs_sqrt = std::sqrt(std::abs(alpha_div_by_size)); mem_consts.AddConstants({ - MakeJitConstant("ALPHA_AFTER_FACTORED", kd.fp16UnitUsed ? alpha_sign : alpha), - MakeJitConstant("ALPHA_DIV_BY_SIZE", kd.fp16UnitUsed ? alpha_sign : alpha_div_by_size), - MakeJitConstant("ALPHA_VAL_FACTOR", kd.fp16UnitUsed ? alpha_abs_sqrt : 1.0f), - MakeJitConstant("ALPHA_VAL_FACTOR_DIV_BY_SIZE", kd.fp16UnitUsed ? alpha_div_by_size_abs_sqrt : 1.0f), + MakeJitConstant("ALPHA_AFTER_FACTORED", params.inputs[0].GetDType() == Datatype::F16 ? alpha_sign : alpha), + MakeJitConstant("ALPHA_DIV_BY_SIZE", params.inputs[0].GetDType() == Datatype::F16 ? alpha_sign : alpha_div_by_size), + MakeJitConstant("ALPHA_VAL_FACTOR", params.inputs[0].GetDType() == Datatype::F16 ? alpha_abs_sqrt : 1.0f), + MakeJitConstant("ALPHA_VAL_FACTOR_DIV_BY_SIZE", params.inputs[0].GetDType() == Datatype::F16 ? alpha_div_by_size_abs_sqrt : 1.0f), }); return mem_consts; @@ -69,22 +69,21 @@ JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, const LRNK LRNKernelBase::DispatchData LRNKernelBase::SetDefault(const lrn_params& params) const { const auto& output = params.output; - DispatchData kd; + DispatchData dispatchData; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; // Determine global work sizes. - kd.gws0 = output.Batch().v * output.Feature().v; // B, F - kd.gws1 = output.X().v; // X - kd.gws2 = output.Y().v; // Y + dispatchData.gws[0] = output.Batch().v * output.Feature().v; // B, F + dispatchData.gws[1] = output.X().v; // X + dispatchData.gws[2] = output.Y().v; // Y // Find largest positive local work size that is divider for global work size. - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData LRNKernelBase::GetCommonKernelsData(const Params& params, @@ -96,17 +95,17 @@ KernelsData LRNKernelBase::GetCommonKernelsData(const Params& params, const lrn_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); - auto cldnnJit = GetJitConstants(orgParams, runInfo); + auto cldnnJit = GetJitConstants(orgParams, dispatchData); auto entryPoint = GetEntryPoint(kernelName, orgParams.layerID, options); auto jit = CreateJit(kernelName, cldnnJit, entryPoint); auto fused_deps_total = GetFusedPrimitiveInputsCount(params); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.h index 8314e85164b262..8b95eff3124d49 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.h @@ -61,7 +61,7 @@ class LRNKernelBase : public common_kernel_base { protected: bool Validate(const Params& p, const optional_params& o) const override; - virtual JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const; + virtual JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const; virtual DispatchData SetDefault(const lrn_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimatedTime) const; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.cpp index 86ccca38cf6a19..b85687774cee7d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ ParamsKey LRNKernelRef::GetSupportedKey() const { return k; } -JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, const LRNKernelRef::Parent::DispatchData& kd) const { +JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, const LRNKernelRef::Parent::DispatchData& dispatchData) const { const uint32_t round_norm_size = (params.localSize / 2) * 2 + 1; uint32_t numElement = round_norm_size * round_norm_size; const auto& input_dt = params.inputs[0].GetDType(); @@ -54,7 +54,7 @@ JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, const LRNKe const float num_element_div = 1.f / static_cast(numElement); - JitConstants jit = Parent::GetJitConstants(params, kd); + JitConstants jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstants({ MakeJitConstant("NUM_ELEMENTS_DIV", num_element_div), MakeJitConstant("GWS_BATCH", 2), @@ -71,22 +71,14 @@ JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, const LRNKe } LRNKernelRef::Parent::DispatchData LRNKernelRef::SetDefault(const lrn_params& params) const { - DispatchData kd = Parent::SetDefault(params); + DispatchData dispatchData = Parent::SetDefault(params); const auto& out = params.output; - std::vector global = {out.X().v * out.Y().v, out.Feature().v, out.Batch().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.X().v * out.Y().v, out.Feature().v, out.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData LRNKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.h index 0872feb596eda1..36be0cbfd13565 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.h @@ -35,6 +35,6 @@ class LRNKernelRef : public LRNKernelBase { FusedOpType::SCALE, FusedOpType::ACTIVATION }; } - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.cpp index 5b2f25466beb4d..e3530c80787981 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -39,9 +39,8 @@ ParamsKey LRNKernelWithinChannelByxfOpt::GetSupportedKey() const { return k; } -JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants( - const lrn_params& params, - const LRNKernelBase::DispatchData& kd) const { +JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants(const lrn_params& params, + const LRNKernelBase::DispatchData& dispatchData) const { const uint32_t round_norm_size = (params.localSize / 2) * 2 + 1; uint32_t numElement = round_norm_size * round_norm_size; const auto& input_dt = params.inputs[0].GetDType(); @@ -52,7 +51,7 @@ JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants( const float num_element_div = 1.f / static_cast(numElement); - JitConstants jit = Parent::GetJitConstants(params, kd); + JitConstants jit = Parent::GetJitConstants(params, dispatchData); jit.AddConstants({ MakeJitConstant("NUM_ELEMENTS_DIV", num_element_div), MakeJitConstant("GWS_BATCH", 2), @@ -70,22 +69,14 @@ JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants( LRNKernelWithinChannelByxfOpt::Parent::DispatchData LRNKernelWithinChannelByxfOpt::SetDefault( const lrn_params& params) const { - DispatchData kd = Parent::SetDefault(params); + DispatchData dispatchData = Parent::SetDefault(params); const auto& out = params.output; - std::vector global = {out.X().v * out.Y().v, CeilDiv(out.Feature().v, 8), out.Batch().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.X().v * out.Y().v, CeilDiv(out.Feature().v, 8), out.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } bool LRNKernelWithinChannelByxfOpt::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.h index 9cdd64fc9cb4e2..4ae0e546fee160 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.h @@ -37,6 +37,6 @@ class LRNKernelWithinChannelByxfOpt : public LRNKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.cpp index b788cedbaf1c5d..de4f0f5afc23ef 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,22 +38,22 @@ ParamsKey LRNKernelWithinChannel::GetSupportedKey() const { } CommonDispatchData LRNKernelWithinChannel::SetDefault(const lrn_params& params) const { - CommonDispatchData runInfo = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); - runInfo.gws0 = 128 * 128; - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = 128 * 128; + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = 128; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 128; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } JitConstants LRNKernelWithinChannel::GetJitConstants(const lrn_params& params, - const LRNKernelWithinChannel::Parent::DispatchData& kd) const { - JitConstants jit = Parent::GetJitConstants(params, kd); + const LRNKernelWithinChannel::Parent::DispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); const auto& input_dt = params.inputs[0].GetDType(); if (!params.fused_ops.empty()) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.h index adaf9c334f224e..93500a87a01aeb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.h @@ -36,6 +36,6 @@ class LRNKernelWithinChannel : public LRNKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.cpp index 22e95f72aa29d1..4b69db957b9f99 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,19 +38,19 @@ ParamsKey LRNKernelWithinChannelOpt::GetSupportedKey() const { } CommonDispatchData LRNKernelWithinChannelOpt::SetDefault(const lrn_params& params) const { - CommonDispatchData runInfo = LRNKernelBase::SetDefault(params); + CommonDispatchData dispatchData = LRNKernelBase::SetDefault(params); const auto totalSize = params.inputs[0].LogicalSize(); const unsigned work_group_size = (totalSize < 128) ? 32 : 128; - runInfo.gws0 = Align(params.inputs[0].LogicalSize(), work_group_size); - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = Align(params.inputs[0].LogicalSize(), work_group_size); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = work_group_size; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = work_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return runInfo; + return dispatchData; } bool LRNKernelWithinChannelOpt::Validate(const Params& p, const optional_params& o) const { @@ -60,9 +60,9 @@ bool LRNKernelWithinChannelOpt::Validate(const Params& p, const optional_params& return true; } -JitConstants LRNKernelWithinChannelOpt::GetJitConstants(const lrn_params& params, const LRNKernelWithinChannelOpt::Parent::DispatchData& kd) const { +JitConstants LRNKernelWithinChannelOpt::GetJitConstants(const lrn_params& params, const LRNKernelWithinChannelOpt::Parent::DispatchData& dispatchData) const { const auto& input_dt = params.inputs[0].GetDType(); - JitConstants jit = Parent::GetJitConstants(params, kd); + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { FusedOpsConfiguration conf = {"", {"batch_id", "feature_id", "y", "x"}, "lrn_result", input_dt, 1}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.h index 8740055c30da2a..cce68e20077b85 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.h @@ -35,6 +35,6 @@ class LRNKernelWithinChannelOpt : public LRNKernelBase { FusedOpType::ACTIVATION }; } bool Validate(const Params& params, const optional_params& options) const override; - JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override; + JitConstants GetJitConstants(const lrn_params& params, const DispatchData& dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp index 37680593a0f339..6088de216dab5a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_bfyx_opt.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ KernelsData LSTM_DynamicInputKernelBfyxOpt::GetKernelsData(const Params& params, return {}; } - DispatchData run_info; + DispatchData dispatchData; KernelData kd = KernelData::Default(params); lstm_dynamic_input_params& dlstm_params = *static_cast(kd.params.get()); @@ -83,18 +83,8 @@ KernelsData LSTM_DynamicInputKernelBfyxOpt::GetKernelsData(const Params& params, const auto& out = dlstm_params.output; auto hidden_size = out.X().v; - std::vector global = { hidden_size / simd_size, out.Batch().v * out.Y().v, out.Feature().v }; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - run_info.gws0 = global[0]; - run_info.gws1 = global[1]; - run_info.gws2 = global[2]; - - run_info.lws0 = local[0]; - run_info.lws1 = local[1]; - run_info.lws2 = local[2]; - - run_info.fp16UnitUsed = dlstm_params.inputs[0].GetDType() == Datatype::F16; + dispatchData.gws = { hidden_size / simd_size, out.Batch().v * out.Y().v, out.Feature().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); bool succeed = UpdateWeightsParams(dlstm_params, options, @@ -111,8 +101,8 @@ KernelsData LSTM_DynamicInputKernelBfyxOpt::GetKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - kernel.workGroups.global = { run_info.gws0, run_info.gws1, run_info.gws2 }; - kernel.workGroups.local = { run_info.lws0, run_info.lws1, run_info.lws2 }; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); SetKernelArguments(dlstm_params, kernel); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp index aecd6e658b8824..aea352f1898416 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_input_kernel_base.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,23 +37,14 @@ JitConstants LSTM_DynamicInputKernelBase::GetJitConstants(const lstm_dynamic_inp LSTM_DynamicInputKernelBase::DispatchData LSTM_DynamicInputKernelBase::SetDefault( const lstm_dynamic_input_params& params) { - DispatchData kd; + DispatchData dispatchData; const auto& out = params.output; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; // 4 * hidden, batch * dir, seq_len - std::vector global = {out.X().v, out.Batch().v * out.Y().v, out.Feature().v}; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.X().v, out.Batch().v * out.Y().v, out.Feature().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } void kernel_selector::LSTM_DynamicInputKernelBase::SetKernelArguments(const lstm_dynamic_input_params& params, clKernelData& kernel) const { @@ -75,7 +66,7 @@ KernelsData LSTM_DynamicInputKernelBase::GetCommonKernelsData(const Params& para const lstm_dynamic_input_params& orgParams = static_cast(params); - auto run_info = SetDefault(orgParams); + auto dispatchData = SetDefault(orgParams); KernelData k_data = KernelData::Default(params, 1); auto cldnn_jit = GetJitConstants(orgParams); @@ -83,7 +74,7 @@ KernelsData LSTM_DynamicInputKernelBase::GetCommonKernelsData(const Params& para auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = k_data.kernels[0]; - kernel.workGroups.global = {run_info.gws0, run_info.gws1, run_info.gws2}; + kernel.workGroups.global = dispatchData.gws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); SetKernelArguments(orgParams, kernel); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp index 7384048a7ee654..81acef80b398ab 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lstm_dynamic/lstm_dynamic_timeloop_kernel_base.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -86,24 +86,15 @@ JitConstants LSTM_DynamicTimeloopKernelBase::GetJitConstants(const lstm_dynamic_ LSTM_DynamicTimeloopKernelBase::DispatchData LSTM_DynamicTimeloopKernelBase::SetDefault( const lstm_dynamic_timeloop_params& params) { - DispatchData kd; + DispatchData dispatchData; const auto& out = params.output; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; auto out_x_size = out.X().v; auto gws0 = out_x_size > 256 ? 256 : out_x_size; - std::vector global = {gws0, out.Batch().v, static_cast(params.direction)}; - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { gws0, out.Batch().v, static_cast(params.direction) }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } void kernel_selector::LSTM_DynamicTimeloopKernelBase::SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& kernel) const { @@ -136,7 +127,7 @@ KernelsData LSTM_DynamicTimeloopKernelBase::GetCommonKernelsData(const Params& p const lstm_dynamic_timeloop_params& org_params = static_cast(params); - auto run_info = SetDefault(org_params); + auto dispatchData = SetDefault(org_params); KernelData k_data = KernelData::Default(params, 1); auto cldnn_jit = GetJitConstants(org_params); @@ -144,8 +135,8 @@ KernelsData LSTM_DynamicTimeloopKernelBase::GetCommonKernelsData(const Params& p auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = k_data.kernels[0]; - kernel.workGroups.global = {run_info.gws0, run_info.gws1, run_info.gws2}; - kernel.workGroups.local = {run_info.lws0, run_info.lws1, run_info.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo); SetKernelArguments(org_params, kernel); k_data.estimatedTime = estimated_time; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/max_unpooling/max_unpooling_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/max_unpooling/max_unpooling_kernel_base.cpp index 93406e9160bfb2..e6f6a864bd2616 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/max_unpooling/max_unpooling_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/max_unpooling/max_unpooling_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,32 +35,32 @@ JitConstants MaxUnpoolingKernelBase::GetJitConstants(const max_unpooling_params& MaxUnpoolingKernelBase::DispatchData MaxUnpoolingKernelBase::SetDefault(const max_unpooling_params& params) const { const auto& input = params.inputs[0]; - DispatchData kd; + DispatchData dispatchData; if (input.GetLayout() == DataLayout::bfyx || input.GetLayout() == DataLayout::byxf) { // Determine global work sizes. - kd.gws2 = input.Batch().v * input.Feature().v; // B, F - kd.gws0 = Align(input.X().v, 32); // X - kd.gws1 = input.Y().v; // Y + dispatchData.gws[2] = input.Batch().v * input.Feature().v; // B, F + dispatchData.gws[0] = Align(input.X().v, 32); // X + dispatchData.gws[1] = input.Y().v; // Y - kd.lws0 = 32; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } else { // Determine global work sizes. - kd.gws0 = input.Batch().v * input.Feature().v; // B, F - kd.gws1 = input.X().v; // X - kd.gws2 = input.Y().v; // Y + dispatchData.gws[0] = input.Batch().v * input.Feature().v; // B, F + dispatchData.gws[1] = input.X().v; // X + dispatchData.gws[2] = input.Y().v; // Y - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - return kd; + return dispatchData; } KernelsData MaxUnpoolingKernelBase::GetCommonKernelsData(const Params& params, @@ -72,7 +72,7 @@ KernelsData MaxUnpoolingKernelBase::GetCommonKernelsData(const Params& params, const max_unpooling_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); @@ -81,7 +81,7 @@ KernelsData MaxUnpoolingKernelBase::GetCommonKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.cpp index 80955a172381e8..22011d6ad92a35 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.cpp @@ -67,7 +67,7 @@ bool MVNKernel_b_fs_yx_fsv16_imad::Validate(const Params& p, const optional_para } MVNKernelBase::DispatchData MVNKernel_b_fs_yx_fsv16_imad::SetDefault(const mvn_params& params) const { - auto kd = Parent::SetDefault(params); + auto dispatchData = Parent::SetDefault(params); auto items_num = params.output.X().v * params.output.Y().v * params.output.Z().v; auto max_wg = params.engineInfo.maxWorkGroupSize; @@ -79,28 +79,28 @@ MVNKernelBase::DispatchData MVNKernel_b_fs_yx_fsv16_imad::SetDefault(const mvn_p auto lws = std::max(std::min(items_num, max_lws) / simd, (size_t)1) * simd; - kd.gws0 = lws; - kd.gws1 = CeilDiv(params.output.Feature().v, fsv); - kd.gws2 = params.output.Batch().v; + dispatchData.gws[0] = lws; + dispatchData.gws[1] = CeilDiv(params.output.Feature().v, fsv); + dispatchData.gws[2] = params.output.Batch().v; - kd.lws0 = lws; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = lws; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.itemsNum = 1; + dispatchData.itemsNum = 1; - return kd; + return dispatchData; } -JitConstants MVNKernel_b_fs_yx_fsv16_imad::GetJitConstants(const mvn_params& params, DispatchData kd) const { - auto jits = Parent::GetJitConstants(params, kd); +JitConstants MVNKernel_b_fs_yx_fsv16_imad::GetJitConstants(const mvn_params& params, DispatchData dispatchData) const { + auto jits = Parent::GetJitConstants(params, dispatchData); auto activation_dt = GetActivationType(params); jits.Merge(MakeTypeJitConstants(activation_dt, "MEAN")); jits.AddConstant(MakeJitConstant("SIMD", simd)); - jits.AddConstant(MakeJitConstant("LWS", kd.lws0)); - jits.AddConstant(MakeJitConstant("GWS", kd.gws0)); - jits.AddConstant(MakeJitConstant("ITEM_GROUPS", kd.itemsNum)); + jits.AddConstant(MakeJitConstant("LWS", dispatchData.lws[0])); + jits.AddConstant(MakeJitConstant("GWS", dispatchData.gws[0])); + jits.AddConstant(MakeJitConstant("ITEM_GROUPS", dispatchData.itemsNum)); if (!params.fused_ops.empty()) { std::vector idx_order; @@ -126,7 +126,7 @@ JitConstants MVNKernel_b_fs_yx_fsv16_imad::GetJitConstants(const mvn_params& par MVNKernel_b_fs_yx_fsv16_imad::MultiDispatchData MVNKernel_b_fs_yx_fsv16_imad::SetDefaultForMulti( const mvn_params& params) const { - MultiDispatchData md; + MultiDispatchData dispatchData; auto items_num = params.output.X().v * params.output.Y().v * params.output.Z().v; auto max_wg = params.engineInfo.maxWorkGroupSize; @@ -139,43 +139,43 @@ MVNKernel_b_fs_yx_fsv16_imad::MultiDispatchData MVNKernel_b_fs_yx_fsv16_imad::Se // TODO Check if larger number of work-groups does not provide benefit size_t item_groups = pref_work_groups; - md.item_groups = item_groups; + dispatchData.item_groups = item_groups; size_t stage1_lws = lws; - md.stage_1.gws0 = stage1_lws * item_groups; - md.stage_1.gws1 = CeilDiv(params.output.Feature().v, fsv); - md.stage_1.gws2 = params.output.Batch().v; + dispatchData.stage_1.gws[0] = stage1_lws * item_groups; + dispatchData.stage_1.gws[1] = CeilDiv(params.output.Feature().v, fsv); + dispatchData.stage_1.gws[2] = params.output.Batch().v; - md.stage_1.lws0 = stage1_lws; - md.stage_1.lws1 = 1; - md.stage_1.lws2 = 1; + dispatchData.stage_1.lws[0] = stage1_lws; + dispatchData.stage_1.lws[1] = 1; + dispatchData.stage_1.lws[2] = 1; - md.stage_1.itemsNum = item_groups; + dispatchData.stage_1.itemsNum = item_groups; size_t stage2_lws = std::max(std::min(item_groups, max_lws) / simd, (size_t)1) * simd; - md.stage_2.gws0 = stage2_lws; - md.stage_2.gws1 = CeilDiv(params.output.Feature().v, fsv); - md.stage_2.gws2 = params.output.Batch().v; + dispatchData.stage_2.gws[0] = stage2_lws; + dispatchData.stage_2.gws[1] = CeilDiv(params.output.Feature().v, fsv); + dispatchData.stage_2.gws[2] = params.output.Batch().v; - md.stage_2.lws0 = stage2_lws; - md.stage_2.lws1 = 1; - md.stage_2.lws2 = 1; + dispatchData.stage_2.lws[0] = stage2_lws; + dispatchData.stage_2.lws[1] = 1; + dispatchData.stage_2.lws[2] = 1; - md.stage_2.itemsNum = item_groups; + dispatchData.stage_2.itemsNum = item_groups; - md.stage_final.gws0 = std::max(items_num / simd, (size_t)1) * simd; - md.stage_final.gws1 = CeilDiv(params.output.Feature().v, fsv); - md.stage_final.gws2 = params.output.Batch().v; + dispatchData.stage_final.gws[0] = std::max(items_num / simd, (size_t)1) * simd; + dispatchData.stage_final.gws[1] = CeilDiv(params.output.Feature().v, fsv); + dispatchData.stage_final.gws[2] = params.output.Batch().v; - md.stage_final.lws0 = simd; - md.stage_final.lws1 = 1; - md.stage_final.lws2 = 1; + dispatchData.stage_final.lws[0] = simd; + dispatchData.stage_final.lws[1] = 1; + dispatchData.stage_final.lws[2] = 1; - md.stage_final.itemsNum = 1; + dispatchData.stage_final.itemsNum = 1; - return md; + return dispatchData; } KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_params& params, @@ -187,7 +187,7 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par constexpr size_t intermidiate_bytes = 4; const mvn_params& orgParams = static_cast(params); - auto runInfo = SetDefaultForMulti(orgParams); + auto dispatchData = SetDefaultForMulti(orgParams); size_t kernels_num = params.mvnNormalizeVariance ? 5 : 3; KernelData kd = KernelData::Default(params, kernels_num); @@ -195,13 +195,13 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par auto finalKernelName = GetKernelName(orgParams); { // Mean first stage - auto cldnn_jit = GetJitConstants(orgParams, runInfo.stage_1); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData.stage_1); cldnn_jit.AddConstant(MakeJitConstant("MVN_KERNEL_MEAN_1", 1)); auto entry_point = GetEntryPoint(finalKernelName, orgParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo.stage_1, + dispatchData.stage_1, params.engineInfo, finalKernelName, jit, @@ -215,17 +215,17 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 0}); kernel.arguments.push_back({ArgumentDescriptor::Types::INTERNAL_BUFFER, 0}); kd.internalBufferSizes.push_back(params.output.Batch().v * Align(params.output.Feature().v, fsv) * - runInfo.item_groups * intermidiate_bytes); + dispatchData.item_groups * intermidiate_bytes); } { // Mean second stage - auto cldnn_jit = GetJitConstants(orgParams, runInfo.stage_2); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData.stage_2); cldnn_jit.AddConstant(MakeJitConstant("MVN_KERNEL_MEAN_2", 1)); auto entry_point = GetEntryPoint(finalKernelName, orgParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[1]; FillCLKernelData(kernel, - runInfo.stage_2, + dispatchData.stage_2, params.engineInfo, finalKernelName, jit, @@ -243,13 +243,13 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par } if (params.mvnNormalizeVariance) { // Variance first stage - auto cldnn_jit = GetJitConstants(orgParams, runInfo.stage_1); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData.stage_1); cldnn_jit.AddConstant(MakeJitConstant("MVN_KERNEL_VAR_1", 1)); auto entry_point = GetEntryPoint(finalKernelName, orgParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[2]; FillCLKernelData(kernel, - runInfo.stage_1, + dispatchData.stage_1, params.engineInfo, finalKernelName, jit, @@ -266,13 +266,13 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par } if (params.mvnNormalizeVariance) { // Variance second stage - auto cldnn_jit = GetJitConstants(orgParams, runInfo.stage_2); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData.stage_2); cldnn_jit.AddConstant(MakeJitConstant("MVN_KERNEL_VAR_2", 1)); auto entry_point = GetEntryPoint(finalKernelName, orgParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[3]; FillCLKernelData(kernel, - runInfo.stage_2, + dispatchData.stage_2, params.engineInfo, finalKernelName, jit, @@ -289,7 +289,7 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par intermidiate_bytes); } { // Final - auto cldnn_jit = GetJitConstants(orgParams, runInfo.stage_final); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData.stage_final); cldnn_jit.AddConstant(MakeJitConstant("MVN_KERNEL_MAIN", 1)); cldnn_jit.AddConstant(MakeJitConstant("PRECALC_MEAN", 1)); cldnn_jit.AddConstant(MakeJitConstant("PRECALC_VARIANCE", params.mvnNormalizeVariance)); @@ -297,7 +297,7 @@ KernelsData MVNKernel_b_fs_yx_fsv16_imad::GetMultiStageKernelsData(const mvn_par auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[kernels_num - 1]; FillCLKernelData(kernel, - runInfo.stage_final, + dispatchData.stage_final, params.engineInfo, finalKernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.hpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.hpp index 38d9e99f053939..2a1811f7bf5eff 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.hpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_b_fs_yx_fsv16_imad.hpp @@ -40,7 +40,7 @@ class MVNKernel_b_fs_yx_fsv16_imad : public MVNKernelBase { bool Validate(const Params&, const optional_params&) const override; DispatchData SetDefault(const mvn_params& params) const override; - JitConstants GetJitConstants(const mvn_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const mvn_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.cpp index 75ed07b45ec636..4482a181521080 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.cpp @@ -45,29 +45,16 @@ JitConstants MVNKernelBase::GetJitConstants(const mvn_params& params, MVNKernelB MVNKernelBase::DispatchData MVNKernelBase::SetDefault(const mvn_params& params) const { const auto& output = params.output; - DispatchData kd; - - std::vector global(3); - - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - + DispatchData dispatchData; if (params.mvnMode == MVNMode::WITHIN_CHANNELS) { - global = {output.Batch().v, output.Feature().v, 1}; + dispatchData.gws = {output.Batch().v, output.Feature().v, 1}; } else { - global = {output.Batch().v, 1, 1}; + dispatchData.gws = {output.Batch().v, 1, 1}; } - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData MVNKernelBase::GetCommonKernelsData(const Params& params, @@ -80,20 +67,18 @@ KernelsData MVNKernelBase::GetCommonKernelsData(const Params& params, const mvn_params& orgParams = static_cast(params); - DispatchData runInfo; - - runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); auto finalKernelName = GetKernelName(orgParams); - auto cldnn_jit = GetJitConstants(orgParams, runInfo); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData); auto entry_point = GetEntryPoint(finalKernelName, orgParams.layerID, options); auto jit = CreateJit(finalKernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, finalKernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h index f2485f15610e46..da2e81653491d6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h @@ -68,7 +68,7 @@ class MVNKernelBase : public common_kernel_base { protected: bool Validate(const Params&, const optional_params&) const override; - virtual JitConstants GetJitConstants(const mvn_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const mvn_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const mvn_params& params) const; virtual std::string GetKernelName(const mvn_params&) const { return kernelName; } KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimated_time) const; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.cpp index c1e006b18e5849..e0207c647a5135 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,57 +44,55 @@ ParamsKey MVNKernelBfyxOpt::GetSupportedKey() const { } MVNKernelBfyxOpt::Parent::DispatchData MVNKernelBfyxOpt::SetDefault(const mvn_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.inputs[0]; - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - if (params.mvnMode == MVNMode::WITHIN_CHANNELS) { - kd.dataSetSize = input.X().v * input.Y().v * input.Z().v; - kd.dataSetsCount = input.Batch().v * input.Feature().v; + dispatchData.dataSetSize = input.X().v * input.Y().v * input.Z().v; + dispatchData.dataSetsCount = input.Batch().v * input.Feature().v; } else { - kd.dataSetSize = input.X().v * input.Y().v * input.Z().v * input.Feature().v; - kd.dataSetsCount = input.Batch().v; + dispatchData.dataSetSize = input.X().v * input.Y().v * input.Z().v * input.Feature().v; + dispatchData.dataSetsCount = input.Batch().v; } // start with 1 thread per data set - kd.gws0 = 1; - kd.gws1 = kd.dataSetsCount; - kd.gws2 = 1; - kd.itemsNum = kd.dataSetSize; + dispatchData.gws[0] = 1; + dispatchData.gws[1] = dispatchData.dataSetsCount; + dispatchData.gws[2] = 1; + dispatchData.itemsNum = dispatchData.dataSetSize; // We have two units of data per work item in current implementation. - auto local_mem_per_wi = 2 * (kd.fp16UnitUsed ? sizeof(short) : sizeof(float)); + auto local_mem_per_wi = 2 * BytesPerElement(params.inputs[0].GetDType()); // Combining device execution and local memory restrictions to compute maximum possible LWS. auto max_lws = std::min(params.engineInfo.maxWorkGroupSize, params.engineInfo.maxLocalMemSize / local_mem_per_wi); - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; // Compute maximum possible LWS that does not exceed device capabilities and optimizes number of global memory // reads. - while ((kd.itemsNum > 32 || kd.lws0 < kd.itemsNum) && (2 * kd.lws0 <= max_lws)) { - kd.lws0 *= 2; - kd.itemsNum /= 2; + while ((dispatchData.itemsNum > 32 || dispatchData.lws[0] < dispatchData.itemsNum) && (2 * dispatchData.lws[0] <= max_lws)) { + dispatchData.lws[0] *= 2; + dispatchData.itemsNum /= 2; } - kd.gws0 = kd.lws0; - kd.leftovers = kd.dataSetSize % kd.lws0; + dispatchData.gws[0] = dispatchData.lws[0]; + dispatchData.leftovers = dispatchData.dataSetSize % dispatchData.lws[0]; - return kd; + return dispatchData; } -JitConstants MVNKernelBfyxOpt::GetJitConstants(const mvn_params& params, MVNKernelBase::DispatchData kd) const { - auto jit = MVNKernelBase::GetJitConstants(params, kd); +JitConstants MVNKernelBfyxOpt::GetJitConstants(const mvn_params& params, MVNKernelBase::DispatchData dispatchData) const { + auto jit = MVNKernelBase::GetJitConstants(params, dispatchData); jit.AddConstants({ - MakeJitConstant("ITEMS_NUM", kd.itemsNum), - MakeJitConstant("LWS", kd.lws0), - MakeJitConstant("GWS", kd.gws0), - MakeJitConstant("DATA_SETS_COUNT", kd.dataSetsCount), - MakeJitConstant("DATA_SET_SIZE", kd.dataSetSize), - MakeJitConstant("LEFTOVERS", kd.leftovers), + MakeJitConstant("ITEMS_NUM", dispatchData.itemsNum), + MakeJitConstant("LWS", dispatchData.lws[0]), + MakeJitConstant("GWS", dispatchData.gws[0]), + MakeJitConstant("DATA_SETS_COUNT", dispatchData.dataSetsCount), + MakeJitConstant("DATA_SET_SIZE", dispatchData.dataSetSize), + MakeJitConstant("LEFTOVERS", dispatchData.leftovers), }); auto activation_dt = GetActivationType(params); jit.Merge(MakeTypeJitConstants(activation_dt, "ACTIVATION")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.h index 8fd65612914946..e184a984858ef9 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_bfyx_opt.h @@ -39,6 +39,6 @@ class MVNKernelBfyxOpt : public MVNKernelBase { }; } DispatchData SetDefault(const mvn_params& params) const override; - JitConstants GetJitConstants(const mvn_params& params, MVNKernelBase::DispatchData kd) const override; + JitConstants GetJitConstants(const mvn_params& params, MVNKernelBase::DispatchData dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.cpp index 63a7a3425cd386..296d683785e3f5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.cpp @@ -43,8 +43,8 @@ ParamsKey MVNKernelRef::GetSupportedKey() const { return k; } -JitConstants MVNKernelRef::GetJitConstants(const mvn_params& params, DispatchData kd) const { - auto jits = Parent::GetJitConstants(params, kd); +JitConstants MVNKernelRef::GetJitConstants(const mvn_params& params, DispatchData dispatchData) const { + auto jits = Parent::GetJitConstants(params, dispatchData); auto activation_dt = GetActivationType(params); jits.Merge(MakeTypeJitConstants(activation_dt, "ACTIVATION")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.h index 5a3f4e894b8d4b..24f162c0ed49a0 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_ref.h @@ -30,7 +30,7 @@ class MVNKernelRef : public MVNKernelBase { ParamsKey GetSupportedKey() const override; protected: - JitConstants GetJitConstants(const mvn_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const mvn_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::ACTIVATION, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/normalize/normalize_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/normalize/normalize_kernel_base.cpp index 2f1d5eab875842..2ce61fec50175c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/normalize/normalize_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/normalize/normalize_kernel_base.cpp @@ -42,29 +42,16 @@ JitConstants NormalizeKernelBase::GetJitConstants(const normalize_params& np) co NormalizeKernelBase::DispatchData NormalizeKernelBase::SetDefault(const normalize_params& params) const { const auto& output = params.output; - DispatchData kd; - - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - - std::vector global(3); - + DispatchData dispatchData; if (params.normMode == NormalizeMode::WITHIN_SPATIAL) { - global = {output.X().v, output.Y().v, output.Batch().v}; + dispatchData.gws = {output.X().v, output.Y().v, output.Batch().v}; } else { - global = {output.Batch().v, 1, 1}; + dispatchData.gws = {output.Batch().v, 1, 1}; } - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData NormalizeKernelBase::GetCommonKernelsData(const Params& params, @@ -76,9 +63,7 @@ KernelsData NormalizeKernelBase::GetCommonKernelsData(const Params& params, const normalize_params& orgParams = static_cast(params); - DispatchData runInfo; - - runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); @@ -88,7 +73,7 @@ KernelsData NormalizeKernelBase::GetCommonKernelsData(const Params& params, auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/one_hot/one_hot_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/one_hot/one_hot_kernel_base.cpp index 9a5482d7090cc0..bd2e448e4f3423 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/one_hot/one_hot_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/one_hot/one_hot_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,27 +33,15 @@ JitConstants OneHotKernelBase::GetJitConstants(const one_hot_params& params) con OneHotKernelBase::DispatchData OneHotKernelBase::SetDefault(const one_hot_params& params) { const auto& input = params.inputs[0]; - DispatchData kd; - - kd.fp16UnitUsed = input.GetDType() == Datatype::F16; - - std::vector global{input.Batch().v, input.Feature().v, input.Y().v * input.X().v}; + DispatchData dispatchData; if (params.output.GetDims().size() == 5) { - global[0] = input.Batch().v; - global[1] = input.Feature().v * input.Z().v; - global[2] = input.Y().v * input.X().v; + dispatchData.gws = { input.Batch().v, input.Feature().v * input.Z().v, input.Y().v * input.X().v }; + } else { + dispatchData.gws = { input.Batch().v, input.Feature().v, input.Y().v * input.X().v }; } - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return kd; + return dispatchData; } KernelsData OneHotKernelBase::GetCommonKernelsData(const Params& params, @@ -64,7 +52,7 @@ KernelsData OneHotKernelBase::GetCommonKernelsData(const Params& params, const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); @@ -72,7 +60,7 @@ KernelsData OneHotKernelBase::GetCommonKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = k_data.kernels[0]; - FillCLKernelData(kernel, run_info, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); k_data.estimatedTime = estimated_time; return {k_data}; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.cpp index d7821a681bc1b7..946b82e40c73b0 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.cpp @@ -60,7 +60,7 @@ Datatype PoolingKernelBase::GetActivationType(const pooling_params& params) cons } -JitConstants PoolingKernelBase::GetJitConstants(const pooling_params& pp, PoolingKernelBase::DispatchData kd) const { +JitConstants PoolingKernelBase::GetJitConstants(const pooling_params& pp, PoolingKernelBase::DispatchData dispatchData) const { JitConstants mem_consts = MakeBaseParamsJitConstants(pp); mem_consts.AddConstants({ @@ -71,7 +71,7 @@ JitConstants PoolingKernelBase::GetJitConstants(const pooling_params& pp, Poolin MakeJitConstant(toString(pp.divMode) + "_KERNEL_DIVIDER", 1), }); - if (kd.needsBoundary) { + if (dispatchData.needsBoundary) { mem_consts.AddConstant(MakeJitConstant("CHECK_BOUNDRY", 1)); } @@ -131,48 +131,46 @@ bool PoolingKernelBase::EnableRound(const kernel_selector::pooling_params& param PoolingKernelBase::DispatchData PoolingKernelBase::SetDefault(const pooling_params& params) const { const auto& output = params.output; - DispatchData kd; - - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + DispatchData dispatchData; if (output.GetLayout() == DataLayout::bfyx || output.GetLayout() == DataLayout::b_fs_yx_fsv4 || output.GetLayout() == DataLayout::byxf || output.GetLayout() == DataLayout::bfzyx || output.GetLayout() == DataLayout::b_fs_zyx_fsv16 || output.GetLayout() == DataLayout::bs_fs_zyx_bsv16_fsv16) { // Determine global work sizes. - kd.gws0 = Align(output.X().v, 32); // X - kd.gws1 = output.Y().v * output.Z().v; // Y, Z - kd.gws2 = output.Batch().v * output.Feature().v; // B, F + dispatchData.gws[0] = Align(output.X().v, 32); // X + dispatchData.gws[1] = output.Y().v * output.Z().v; // Y, Z + dispatchData.gws[2] = output.Batch().v * output.Feature().v; // B, F // Find largest positive local work size that is divider for global work size. - kd.lws0 = 32; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } else if (output.GetLayout() == DataLayout::b_fs_yx_fsv32 || output.GetLayout() == DataLayout::b_fs_zyx_fsv32) { - kd.gws0 = 32; - kd.gws1 = output.Y().v * output.X().v * output.Z().v; - kd.gws2 = output.Batch().v * CeilDiv(output.Feature().v, 32); + dispatchData.gws[0] = 32; + dispatchData.gws[1] = output.Y().v * output.X().v * output.Z().v; + dispatchData.gws[2] = output.Batch().v * CeilDiv(output.Feature().v, 32); - kd.lws0 = 32; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } else { // Determine global work sizes. - kd.gws0 = output.Batch().v * output.Feature().v; // B, F - kd.gws1 = output.X().v; // X - kd.gws2 = output.Y().v * output.Z().v; // Y * Z + dispatchData.gws[0] = output.Batch().v * output.Feature().v; // B, F + dispatchData.gws[1] = output.X().v; // X + dispatchData.gws[2] = output.Y().v * output.Z().v; // Y * Z - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - kd.needsBoundary = NeedsBoundaryCheck(params); + dispatchData.needsBoundary = NeedsBoundaryCheck(params); - return kd; + return dispatchData; } KernelsData PoolingKernelBase::GetCommonKernelsData(const Params& params, @@ -184,16 +182,16 @@ KernelsData PoolingKernelBase::GetCommonKernelsData(const Params& params, const pooling_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); - auto cldnn_jit = GetJitConstants(orgParams, runInfo); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData); auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, GetFusedPrimitiveInputsCount(params)); if (orgParams.poolType == PoolType::MAX_WITH_ARGMAX) kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.h index a9bcfda9c737be..76e6bab6302edb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_base.h @@ -65,7 +65,7 @@ class PoolingKernelBase : public common_kernel_base { protected: bool Validate(const Params&, const optional_params&) const override; - virtual JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const pooling_params& params) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimatedTime) const; Datatype GetAccumulatorType(const pooling_params& p) const; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.cpp index 157430a6670dd1..16df4fa6bce3ab 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.cpp @@ -63,7 +63,7 @@ size_t PoolingKernel_b_fs_yx_fsv16::GetSimdSize(const pooling_params& params) co } PoolingKernelBase::DispatchData PoolingKernel_b_fs_yx_fsv16::SetDefault(const pooling_params& params) const { - DispatchData kd = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); const auto& out = params.output; const size_t alignment = GetSimdSize(params); @@ -73,25 +73,25 @@ PoolingKernelBase::DispatchData PoolingKernel_b_fs_yx_fsv16::SetDefault(const po auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = CeilDiv(x, x_block_size) * y; - kd.gws1 = Align(f, alignment); - kd.gws2 = b; + dispatchData.gws[0] = CeilDiv(x, x_block_size) * y; + dispatchData.gws[1] = Align(f, alignment); + dispatchData.gws[2] = b; - kd.lws0 = 1; - kd.lws1 = alignment; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = alignment; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_2; + dispatchData.efficiency = FORCE_PRIORITY_2; - return kd; + return dispatchData; } -JitConstants PoolingKernel_b_fs_yx_fsv16::GetJitConstants(const pooling_params& params, DispatchData runInfo) const { +JitConstants PoolingKernel_b_fs_yx_fsv16::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { const size_t alignment = GetSimdSize(params); size_t x_block_size = GetBlockSize(params); auto input = params.inputs[0]; auto output = params.output; - auto jit = PoolingKernelBase::GetJitConstants(params, runInfo); + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); size_t input_line_size = params.poolStride.x * (x_block_size - 1) + params.poolSize.x; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.h index 6b35c94cd1be97..06c3ea248206fb 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv16.h @@ -28,7 +28,7 @@ class PoolingKernel_b_fs_yx_fsv16 : public PoolingKernelBase { protected: bool Validate(const Params&, const optional_params&) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const pooling_params& params) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.cpp index 6375f737183154..f38905d32c2f55 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.cpp @@ -42,24 +42,19 @@ ParamsKey PoolingKerneGPU_b_fs_yx_fsv4::GetSupportedKey() const { } PoolingKernelBase::DispatchData PoolingKerneGPU_b_fs_yx_fsv4::SetDefault(const pooling_params& params) const { - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws0 = params.output.X().v; // X - runInfo.gws1 = params.output.Y().v; // Y + dispatchData.gws[0] = params.output.X().v; // X + dispatchData.gws[1] = params.output.Y().v; // Y // we got b_fs_yx_fsv4 format, we process 4 features per workitem - runInfo.gws2 = CeilDiv(params.output.Feature().v, 4) * params.output.Batch().v; + dispatchData.gws[2] = CeilDiv(params.output.Feature().v, 4) * params.output.Batch().v; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes({ runInfo.gws0, runInfo.gws1, runInfo.gws2 }, params.engineInfo); - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } -JitConstants PoolingKerneGPU_b_fs_yx_fsv4::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKerneGPU_b_fs_yx_fsv4::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); const size_t in_x_pitch = 4; const size_t in_y_pitch = 4 * params.inputs[0].X().LogicalDimPadded(); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.h index fd12d6526fa84c..f7714536d65a8e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_yx_fsv4.h @@ -35,6 +35,6 @@ class PoolingKerneGPU_b_fs_yx_fsv4 : public PoolingKernelBase { } protected: - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.cpp index 802b21815985b2..1fa2473dec8208 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.cpp @@ -45,7 +45,7 @@ ParamsKey PoolingKernelGPU_b_fs_zyx_fsv16_imad::GetSupportedKey() const { } PoolingKernelBase::DispatchData PoolingKernelGPU_b_fs_zyx_fsv16_imad::SetDefault(const pooling_params& params) const { - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); const auto& out = params.output; auto x = out.X().v; @@ -54,22 +54,17 @@ PoolingKernelBase::DispatchData PoolingKernelGPU_b_fs_zyx_fsv16_imad::SetDefault auto f = out.Feature().v; auto b = out.Batch().v; - runInfo.gws0 = x; - runInfo.gws1 = y * z; + dispatchData.gws[0] = x; + dispatchData.gws[1] = y * z; // we got b_fs_yx_fsv16 format, we process 16 features per workitem - runInfo.gws2 = CeilDiv(f, FEATURE_SLICE_SIZE) * b; + dispatchData.gws[2] = CeilDiv(f, FEATURE_SLICE_SIZE) * b; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes({ runInfo.gws0, runInfo.gws1, runInfo.gws2 }, params.engineInfo); - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } -JitConstants PoolingKernelGPU_b_fs_zyx_fsv16_imad::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPU_b_fs_zyx_fsv16_imad::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); const size_t in_x_pitch = FEATURE_SLICE_SIZE; const size_t in_y_pitch = FEATURE_SLICE_SIZE * params.inputs[0].X().LogicalDimPadded(); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.h index 8870a6f8239779..fe1687825722f6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_b_fs_zyx_fsv16_imad.h @@ -36,6 +36,6 @@ class PoolingKernelGPU_b_fs_zyx_fsv16_imad: public PoolingKernelBase{ } protected: - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.cpp index 4088e22b30ebae..f3c4ea5cc02989 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.cpp @@ -42,15 +42,15 @@ ParamsKey PoolingKernelGPUBfyxBlockOpt::GetSupportedKey() const { PoolingKernelBase::DispatchData PoolingKernelGPUBfyxBlockOpt::SetDefault(const pooling_params& params) const { const auto& output = params.output; - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws1 = CeilDiv(output.Y().v, params.poolSize.y); + dispatchData.gws[1] = CeilDiv(output.Y().v, params.poolSize.y); - return runInfo; + return dispatchData; } -JitConstants PoolingKernelGPUBfyxBlockOpt::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPUBfyxBlockOpt::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.AddConstant( MakeJitConstant("BLOCK_SIZE_Y", params.poolSize.y + params.poolSize.y * params.poolStride.y - 1)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.h index 4b77a845df793a..b093a1af13c103 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bfyx_block_opt.h @@ -28,7 +28,7 @@ class PoolingKernelGPUBfyxBlockOpt : public PoolingKernelBase { protected: bool Validate(const Params&, const optional_params&) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const pooling_params& params) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.cpp index a0af34fedab3ab..ef06a7e2c6bc4a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.cpp @@ -50,22 +50,22 @@ ParamsKey Pooling_kernel_gpu_bs_fs_yx_bsv_16_fsv16::GetSupportedKey() const { } PoolingKernelBase::DispatchData Pooling_kernel_gpu_bs_fs_yx_bsv_16_fsv16::SetDefault(const pooling_params& params) const { - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws0 = params.output.Feature().v/16; - runInfo.gws1 = params.output.X().v * params.output.Y().v; - runInfo.gws2 = params.output.Batch().v; + dispatchData.gws[0] = params.output.Feature().v/16; + dispatchData.gws[1] = params.output.X().v * params.output.Y().v; + dispatchData.gws[2] = params.output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = SIMD_SIZE; - runInfo.efficiency = FORCE_PRIORITY_1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = SIMD_SIZE; + dispatchData.efficiency = FORCE_PRIORITY_1; - return runInfo; + return dispatchData; } -JitConstants Pooling_kernel_gpu_bs_fs_yx_bsv_16_fsv16::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants Pooling_kernel_gpu_bs_fs_yx_bsv_16_fsv16::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); if (!params.fused_ops.empty()) { auto input_dt = EnableRound(params) ? Datatype::INT32 : GetActivationType(params); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.h index 4651dbda03cdd1..5607b79985dd4e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bs_fs_yx_bsv16_fsv16.h @@ -36,6 +36,6 @@ class Pooling_kernel_gpu_bs_fs_yx_bsv_16_fsv16 : public PoolingKernelBase { } protected: - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.cpp index 93ae17541e286f..445312f73dac27 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.cpp @@ -50,7 +50,7 @@ ParamsKey PoolingKernel_bsv16_fsv16::GetSupportedKey() const { } PoolingKernelBase::DispatchData PoolingKernel_bsv16_fsv16::SetDefault(const pooling_params& params) const { - DispatchData kd = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); const auto& out = params.output; @@ -60,17 +60,17 @@ PoolingKernelBase::DispatchData PoolingKernel_bsv16_fsv16::SetDefault(const pool auto f = out.Feature().v; auto b = out.Batch().v; - kd.gws0 = Align(f, feature_block_size); - kd.gws1 = x * y * z; - kd.gws2 = CeilDiv(b, batch_block_size); + dispatchData.gws[0] = Align(f, feature_block_size); + dispatchData.gws[1] = x * y * z; + dispatchData.gws[2] = CeilDiv(b, batch_block_size); - kd.lws0 = sub_group_size; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = sub_group_size; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - kd.efficiency = FORCE_PRIORITY_1; + dispatchData.efficiency = FORCE_PRIORITY_1; - return kd; + return dispatchData; } bool PoolingKernel_bsv16_fsv16::Validate(const Params& p, const optional_params& o) const { @@ -98,10 +98,10 @@ bool PoolingKernel_bsv16_fsv16::Validate(const Params& p, const optional_params& return true; } -JitConstants PoolingKernel_bsv16_fsv16::GetJitConstants(const pooling_params& params, DispatchData runInfo) const { +JitConstants PoolingKernel_bsv16_fsv16::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { auto input = params.inputs[0]; auto output = params.output; - auto jit = PoolingKernelBase::GetJitConstants(params, runInfo); + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("OC_BLOCK", feature_block_size)); jit.AddConstant(MakeJitConstant("MB_BLOCK", batch_block_size)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.h index fc2ebc258bd5da..2e938b6c5c85ec 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_bsv16_fsv16.h @@ -32,7 +32,7 @@ class PoolingKernel_bsv16_fsv16 : public PoolingKernelBase { protected: bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const pooling_params& params) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.cpp index b5d9e4759db0b4..8cb55bbde0f897 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.cpp @@ -41,15 +41,15 @@ ParamsKey PoolingKernelGPUByxfOpt::GetSupportedKey() const { PoolingKernelBase::DispatchData PoolingKernelGPUByxfOpt::SetDefault(const pooling_params& params) const { const auto& output = params.output; - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws2 = output.Batch().v * (CeilDiv(output.Feature().v, 8)); + dispatchData.gws[2] = output.Batch().v * (CeilDiv(output.Feature().v, 8)); - return runInfo; + return dispatchData; } -JitConstants PoolingKernelGPUByxfOpt::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPUByxfOpt::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.h index 5c6547706b89b1..4bc02499d1ca34 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_opt.h @@ -28,7 +28,7 @@ class PoolingKernelGPUByxfOpt : public PoolingKernelBase { protected: bool Validate(const Params&, const optional_params&) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const pooling_params& params) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.cpp index 655f1648d0b425..2df5ab4811e48c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.cpp @@ -41,15 +41,15 @@ ParamsKey PoolingKernelGPUByxfPaddingOpt::GetSupportedKey() const { PoolingKernelBase::DispatchData PoolingKernelGPUByxfPaddingOpt::SetDefault(const pooling_params& params) const { const auto& output = params.output; - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws2 = output.Batch().v * (CeilDiv(output.Feature().v, 8)); + dispatchData.gws[2] = output.Batch().v * (CeilDiv(output.Feature().v, 8)); - return runInfo; + return dispatchData; } -JitConstants PoolingKernelGPUByxfPaddingOpt::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPUByxfPaddingOpt::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.h index f7566aac68a03d..96149530411edf 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_byxf_padding_opt.h @@ -28,7 +28,7 @@ class PoolingKernelGPUByxfPaddingOpt : public PoolingKernelBase { protected: bool Validate(const Params&, const optional_params&) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const pooling_params& params) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.cpp index 25ccfe1c6812b0..62d570e2a509a1 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.cpp @@ -43,19 +43,19 @@ ParamsKey PoolingKerneGPU_fs_b_yx_fsv32::GetSupportedKey() const { } PoolingKernelBase::DispatchData PoolingKerneGPU_fs_b_yx_fsv32::SetDefault(const pooling_params& params) const { - DispatchData runInfo = PoolingKernelBase::SetDefault(params); + DispatchData dispatchData = PoolingKernelBase::SetDefault(params); - runInfo.gws0 = params.output.X().v; // X output blocks - runInfo.gws1 = params.output.Y().v; // Y output clocks + dispatchData.gws[0] = params.output.X().v; // X output blocks + dispatchData.gws[1] = params.output.Y().v; // Y output clocks // in fs_b_yx_fsv32 format we will process 2 features per work item, so reads/writes are done in full writes for // fp16 - runInfo.gws2 = RoundUp(params.output.Feature().v, 32) * params.output.Batch().v / 2; + dispatchData.gws[2] = RoundUp(params.output.Feature().v, 32) * params.output.Batch().v / 2; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; - return runInfo; + return dispatchData; } bool PoolingKerneGPU_fs_b_yx_fsv32::Validate(const Params& p, const optional_params& o) const { @@ -74,8 +74,8 @@ bool PoolingKerneGPU_fs_b_yx_fsv32::Validate(const Params& p, const optional_par return true; } -JitConstants PoolingKerneGPU_fs_b_yx_fsv32::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKerneGPU_fs_b_yx_fsv32::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); auto pp = static_cast(params); // Heurestic needed for very big pool size. diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.h index 5bb61fa3309994..d224be06633b75 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_fs_b_yx_fsv32.h @@ -29,7 +29,7 @@ class PoolingKerneGPU_fs_b_yx_fsv32 : public PoolingKernelBase { protected: bool Validate(const Params& p, const optional_params& o) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, FusedOpType::SCALE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.cpp index 9df0ebaeb8bdce..66df1523c76b48 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.cpp @@ -59,8 +59,8 @@ KernelsData PoolingKernelGPUInt8Ref::GetKernelsData(const Params& params, const return GetCommonKernelsData(params, options, FORCE_PRIORITY_9); } -JitConstants PoolingKernelGPUInt8Ref::GetJitConstants(const pooling_params& params, DispatchData kd) const { - JitConstants jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPUInt8Ref::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + JitConstants jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.h index 6def2a4b290a4c..aeae5413f21a9b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_int8_ref.h @@ -27,7 +27,7 @@ class PoolingKernelGPUInt8Ref : public PoolingKernelBase { KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; bool Validate(const Params&, const optional_params&) const override; - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; std::vector GetSupportedFusedOps() const override { return { FusedOpType::QUANTIZE, FusedOpType::SCALE, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.cpp index 67dfa1dd161124..8568b64c8279df 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.cpp @@ -41,8 +41,8 @@ ParamsKey PoolingKernelGPURef::GetSupportedKey() const { return k; } -JitConstants PoolingKernelGPURef::GetJitConstants(const pooling_params& params, DispatchData kd) const { - auto jit = PoolingKernelBase::GetJitConstants(params, kd); +JitConstants PoolingKernelGPURef::GetJitConstants(const pooling_params& params, DispatchData dispatchData) const { + auto jit = PoolingKernelBase::GetJitConstants(params, dispatchData); jit.Merge(MakeTypeJitConstants(GetActivationType(params), "ACTIVATION")); jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.h index e42bcc8c77a1e9..4afdbadad514bd 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pooling/pooling_kernel_gpu_ref.h @@ -32,6 +32,6 @@ class PoolingKernelGPURef : public PoolingKernelBase { } protected: - JitConstants GetJitConstants(const pooling_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const pooling_params& params, DispatchData dispatchData) const override; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp index 4f4001379d72a1..05da60c34e93e4 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -31,24 +31,10 @@ JitConstants PyramidROIAlignKernelBase::GetJitConstants(const PyramidROIAlign_pa } PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelBase::SetDefault(const PyramidROIAlign_params& params) const { - DispatchData kd; - - kd.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - - std::vector global; - global = {1, 1, 1}; - - const auto& local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + DispatchData dispatchData; + dispatchData.gws = {1, 1, 1}; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + return dispatchData; } KernelsData PyramidROIAlignKernelBase::GetCommonKernelsData(const Params& params, @@ -58,7 +44,7 @@ KernelsData PyramidROIAlignKernelBase::GetCommonKernelsData(const Params& params const auto& prim_params = static_cast(params); - auto run_info = SetDefault(prim_params); + auto dispatchData = SetDefault(prim_params); KernelData k_data = KernelData::Default(params); auto cldnn_jit = GetJitConstants(prim_params); auto entry_point = GetEntryPoint(kernelName, prim_params.layerID, options); @@ -66,7 +52,7 @@ KernelsData PyramidROIAlignKernelBase::GetCommonKernelsData(const Params& params auto& kernel = k_data.kernels[0]; FillCLKernelData(kernel, - run_info, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp index 5da891451fa419..d9446c997e495e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/pyramid_roi_align/pyramid_roi_align_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,24 +42,16 @@ ParamsKey PyramidROIAlignKernelRef::GetSupportedKey() const { } PyramidROIAlignKernelBase::DispatchData PyramidROIAlignKernelRef::SetDefault(const PyramidROIAlign_params& params) const { - auto dispatch = PyramidROIAlignKernelBase::SetDefault(params); + auto dispatchData = PyramidROIAlignKernelBase::SetDefault(params); - std::vector global = { + dispatchData.gws = { params.output.X().v * params.output.Y().v, params.output.Feature().v, params.output.Batch().v }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - dispatch.gws0 = global[0]; - dispatch.gws1 = global[1]; - dispatch.gws2 = global[2]; - - dispatch.lws0 = local[0]; - dispatch.lws1 = local[1]; - dispatch.lws2 = local[2]; - - return dispatch; + return dispatchData; } KernelsData PyramidROIAlignKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.cpp index 5ec60546c3b516..d52551c0b3df3d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.cpp @@ -33,7 +33,7 @@ bool QuantizeKernelBase::Validate(const Params& p, const optional_params&) const return true; } -JitConstants QuantizeKernelBase::GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const { +JitConstants QuantizeKernelBase::GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const { JitConstants jit = MakeBaseParamsJitConstants(params); if (params.packed_binary_output) { @@ -55,9 +55,9 @@ JitConstants QuantizeKernelBase::GetJitConstants(const quantize_params& params, jit.AddConstant(MakeJitConstant("LEVELS", static_cast(params.levels))); - jit.AddConstant(MakeJitConstant("LWS_0", runInfo.lws0)); - jit.AddConstant(MakeJitConstant("LWS_1", runInfo.lws1)); - jit.AddConstant(MakeJitConstant("LWS_2", runInfo.lws2)); + jit.AddConstant(MakeJitConstant("LWS_0", dispatchData.lws[0])); + jit.AddConstant(MakeJitConstant("LWS_1", dispatchData.lws[1])); + jit.AddConstant(MakeJitConstant("LWS_2", dispatchData.lws[2])); return jit; } @@ -72,15 +72,15 @@ KernelsData QuantizeKernelBase::GetKernelsData(const Params& params, const optio return {}; } - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); - auto cldnn_jit = GetJitConstants(newParams, runInfo); + auto cldnn_jit = GetJitConstants(newParams, dispatchData); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo, DEFAULT); kernel.arguments = GetArgsDesc(static_cast(newParams.inputs.size()), false, false); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.h index 480e786ab847bf..c03ef65e8531c1 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_base.h @@ -29,7 +29,7 @@ class QuantizeKernelBase : public common_kernel_base { KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; protected: - virtual JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const; + virtual JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const; virtual CommonDispatchData SetDefault(const quantize_params& params, const optional_params&) const = 0; }; } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.cpp index 27fe85f5af4b4d..61443bd4a6a35c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - -#include #include "quantize_kernel_ref.h" #include "kernel_selector_utils.h" #include @@ -41,35 +39,33 @@ ParamsKey QuantizeKernelRef::GetSupportedKey() const { } CommonDispatchData QuantizeKernelRef::SetDefault(const quantize_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; auto output = params.output; if (output.GetLayout() == DataLayout::b_fs_yx_fsv16 && !params.packed_binary_output) { - runInfo.gws0 = output.Batch().v; - runInfo.gws1 = Align(output.Feature().v, sub_group_size); - runInfo.gws2 = output.Y().v * output.X().v * output.Z().v; + dispatchData.gws[0] = output.Batch().v; + dispatchData.gws[1] = Align(output.Feature().v, sub_group_size); + dispatchData.gws[2] = output.Y().v * output.X().v * output.Z().v; - runInfo.lws0 = 1; - runInfo.lws1 = sub_group_size; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; } else { - runInfo.gws0 = output.Batch().v; - runInfo.gws1 = params.packed_binary_output ? CeilDiv(output.Feature().v, 32) : output.Feature().v; - runInfo.gws2 = Align(output.X().v * output.Y().v * output.Z().v, 16); + dispatchData.gws[0] = output.Batch().v; + dispatchData.gws[1] = params.packed_binary_output ? CeilDiv(output.Feature().v, 32) : output.Feature().v; + dispatchData.gws[2] = Align(output.X().v * output.Y().v * output.Z().v, 16); - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 16; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 16; } - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - - return runInfo; + return dispatchData; } -JitConstants QuantizeKernelRef::GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); +JitConstants QuantizeKernelRef::GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (params.output.GetLayout() == DataLayout::b_fs_yx_fsv16 && !params.packed_binary_output) { jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", sub_group_size)); } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.h index f0263b231cb6ba..5e9bfab92c1f4e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_ref.h @@ -26,7 +26,7 @@ class QuantizeKernelRef : public QuantizeKernelBase { QuantizeKernelRef() : QuantizeKernelBase("quantize_gpu_ref") {} virtual ~QuantizeKernelRef() {} - JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const override; + JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const override; CommonDispatchData SetDefault(const quantize_params& params, const optional_params&) const override; bool Validate(const Params& p, const optional_params& o) const override; ParamsKey GetSupportedKey() const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.cpp index 8023c56722fecb..cd29dbfff05176 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -61,38 +61,28 @@ ParamsKey QuantizeKernelScaleShift::GetSupportedKey() const { } CommonDispatchData QuantizeKernelScaleShift::SetDefault(const quantize_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; auto output = params.output; if (output.GetLayout() == DataLayout::b_fs_yx_fsv16) { - runInfo.gws0 = output.Y().v * output.X().v; - runInfo.gws1 = Align(output.Feature().v, sub_group_size); - runInfo.gws2 = output.Batch().v; + dispatchData.gws[0] = output.Y().v * output.X().v; + dispatchData.gws[1] = Align(output.Feature().v, sub_group_size); + dispatchData.gws[2] = output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = sub_group_size; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; } else { - auto global = GetTensorFriendlyWorkGroups(output); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; + dispatchData.gws = GetTensorFriendlyWorkGroups(output); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); } - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - - return runInfo; + return dispatchData; } -JitConstants QuantizeKernelScaleShift::GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const { - JitConstants jit = Parent::GetJitConstants(params, runInfo); +JitConstants QuantizeKernelScaleShift::GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const { + JitConstants jit = Parent::GetJitConstants(params, dispatchData); if (params.output.GetLayout() == DataLayout::b_fs_yx_fsv16) { jit.AddConstant(MakeJitConstant("GWS_BATCH", 2)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.h index d88dfb32f66544..ac078157361e8a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/quantize/quantize_kernel_scale_shift_opt.h @@ -26,7 +26,7 @@ class QuantizeKernelScaleShift : public QuantizeKernelBase { QuantizeKernelScaleShift() : QuantizeKernelBase("quantize_gpu_scale_shift_opt") {} virtual ~QuantizeKernelScaleShift() {} - JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& runInfo) const override; + JitConstants GetJitConstants(const quantize_params& params, const CommonDispatchData& dispatchData) const override; CommonDispatchData SetDefault(const quantize_params& params, const optional_params&) const override; bool Validate(const Params& p, const optional_params& o) const override; ParamsKey GetSupportedKey() const override; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_b_fs_yx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_b_fs_yx_fsv16.cpp index 56cf27936bb660..5548d52fb3e3d3 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_b_fs_yx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_b_fs_yx_fsv16.cpp @@ -72,22 +72,15 @@ ParamsKey ReduceKernel_b_fs_yx_fsv16::GetSupportedKey() const { } CommonDispatchData ReduceKernel_b_fs_yx_fsv16::SetDefault(const reduce_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; auto in_dims = calc_in_dims(params); - std::vector global = {16, - CeilDiv(in_dims[3].v, calc_read_offset(params)) * in_dims[2].v, // X, Y - CeilDiv(in_dims[1].v, SIMD) * in_dims[0].v}; // F, B + dispatchData.gws = { 16, + CeilDiv(in_dims[3].v, calc_read_offset(params)) * in_dims[2].v, // X, Y + CeilDiv(in_dims[1].v, SIMD) * in_dims[0].v }; // F, B + dispatchData.lws = { SIMD, 1, 1 }; - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = SIMD; - runInfo.lws1 = 1; - runInfo.lws2 = 1; - - return runInfo; + return dispatchData; } JitConstants ReduceKernel_b_fs_yx_fsv16::GetJitConstants(const reduce_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_base.cpp index 526080e572169d..3db770f1943373 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_base.cpp @@ -235,7 +235,7 @@ KernelsData ReduceKernelBase::GetCommonKernelsData(const Params& p, } const reduce_params& params = static_cast(p); - DispatchData runInfo = SetDefault(params, options); + DispatchData dispatchData = SetDefault(params, options); KernelData kd = KernelData::Default(params); @@ -245,7 +245,7 @@ KernelsData ReduceKernelBase::GetCommonKernelsData(const Params& p, auto& kernel = kd.kernels[0]; FillCLKernelData(kernel, - runInfo, + dispatchData, params.engineInfo, kernelName, jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_ref.cpp index ca26a3764a4f8f..5cd1f6d3d33ab2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reduce/reduce_kernel_ref.cpp @@ -43,23 +43,14 @@ ParamsKey ReduceKernelRef::GetSupportedKey() const { } CommonDispatchData ReduceKernelRef::SetDefault(const reduce_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = {params.output.X().v * params.output.Y().v, - params.output.Z().v * params.output.W().v, - params.output.Batch().v * params.output.Feature().v}; + dispatchData.gws = { params.output.X().v * params.output.Y().v, + params.output.Z().v * params.output.W().v, + params.output.Batch().v * params.output.Feature().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants ReduceKernelRef::GetJitConstants(const reduce_params& params) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/region_yolo/region_yolo_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/region_yolo/region_yolo_kernel_ref.cpp index aa66932b35c29d..a253affea22de5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/region_yolo/region_yolo_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/region_yolo/region_yolo_kernel_ref.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,35 +47,23 @@ JitConstants RegionYoloKernelRef::GetJitConstants(const region_yolo_params& ry) } RegionYoloKernelRef::DispatchData SetDefault(const region_yolo_params& params) { - RegionYoloKernelRef::DispatchData kd; - - kd.fp16UnitUsed = (params.inputs[0].GetDType() == Datatype::F16); + RegionYoloKernelRef::DispatchData dispatchData; const auto& input = params.inputs[0]; - std::vector global; if (input.GetLayout() == DataLayout::bfyx) { - global = {input.X().v * input.Y().v, 1, 1}; + dispatchData.gws = {input.X().v * input.Y().v, 1, 1}; } else { - global = {input.Feature().v * input.Batch().v, input.X().v, input.Y().v}; + dispatchData.gws = {input.Feature().v * input.Batch().v, input.X().v, input.Y().v}; } - // Determine global work sizes. - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return kd; + return dispatchData; } KernelsData RegionYoloKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { assert(params.GetType() == KernelType::REGION_YOLO); const region_yolo_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); auto cldnn_jit = GetJitConstants(orgParams); @@ -83,7 +71,7 @@ KernelsData RegionYoloKernelRef::GetKernelsData(const Params& params, const opti auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = FORCE_PRIORITY_9; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_from_winograd_2x3_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_from_winograd_2x3_kernel.cpp index e746526292fa99..22389965226505 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_from_winograd_2x3_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_from_winograd_2x3_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -45,21 +45,21 @@ JitConstants ReorderFromWinograd2x3Kernel::GetJitConstants(const reorder_params& ReorderFromWinograd2x3Kernel::DispatchData ReorderFromWinograd2x3Kernel::SetDefault( const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; constexpr auto output_tile_width = 2; // by definition of F(2,3) const auto& input = params.inputs[0]; const auto& output = params.output; - kd.gws0 = static_cast(output.Feature().v * output.Batch().v); - kd.gws1 = static_cast(output.X().v / output_tile_width); - kd.gws2 = static_cast(output.Y().v); + dispatchData.gws[0] = static_cast(output.Feature().v * output.Batch().v); + dispatchData.gws[1] = static_cast(output.X().v / output_tile_width); + dispatchData.gws[2] = static_cast(output.Y().v); - kd.lws0 = input.Feature().v > 32 ? 32 : static_cast(input.Feature().v); - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = input.Feature().v > 32 ? 32 : static_cast(input.Feature().v); + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData ReorderFromWinograd2x3Kernel::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_base.cpp index 43491a2332da35..ded7b6a7014420 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016-2019 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -151,26 +151,16 @@ JitConstants ReorderKernelBase::GetJitConstants(const reorder_params& params) co ReorderKernelBase::DispatchData ReorderKernelBase::SetDefault(const reorder_weights_params& params) const { const auto& out = params.output; - DispatchData kd; + DispatchData dispatchData; - std::vector global(3); + dispatchData.gws = { out.G().v * out.OFM().v, out.IFM().v, out.X().v * out.Y().v * out.Z().v }; + dispatchData.lws= GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - global = {out.G().v * out.OFM().v, out.IFM().v, out.X().v * out.Y().v * out.Z().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } ReorderKernelBase::DispatchData ReorderKernelBase::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; auto& input = params.inputs[0]; DataTensor input_tensor = input; @@ -183,36 +173,28 @@ ReorderKernelBase::DispatchData ReorderKernelBase::SetDefault(const reorder_para input_tensor = DataTensor(input_sizes, input.GetDType(), DataLayout::image_2d_rgba); } - auto global = GetTensorFriendlyWorkGroups(input_tensor); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.gws = GetTensorFriendlyWorkGroups(input_tensor); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); if (params.inputs[0].GetLayout() == DataLayout::fs_b_yx_fsv32) { std::vector sizes = { 32, 16, 8, 4 }; for (auto& s : sizes) { - if (kd.gws2 % s == 0) { - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = s; + if (dispatchData.gws[2] % s == 0) { + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = s; break; } } } if (params.output.GetLayout() == DataLayout::bs_fs_yx_bsv16_fsv16 && params.inputs[0].Feature().v % 16 == 0) { - kd.lws0 = 1; - kd.lws1 = 16; - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 16; + dispatchData.lws[2] = 1; } - return kd; + return dispatchData; } KernelsData ReorderKernelBase::GetCommonKernelsData(const reorder_weights_params& params, const optional_params& options, float estimated_time) const { @@ -223,9 +205,9 @@ KernelsData ReorderKernelBase::GetCommonKernelsData(const reorder_weights_params KernelData kd = KernelData::Default(params); reorder_weights_params& newParams = *static_cast(kd.params.get()); - DispatchData runInfo; + DispatchData dispatchData; - runInfo = SetDefault(newParams); + dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); @@ -233,7 +215,7 @@ KernelsData ReorderKernelBase::GetCommonKernelsData(const reorder_weights_params auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kernel.arguments = GetArgsDesc(1, false, false); @@ -251,9 +233,7 @@ KernelsData ReorderKernelBase::GetCommonKernelsData(const reorder_params& params KernelData kd = KernelData::Default(params); reorder_params& newParams = *static_cast(kd.params.get()); - DispatchData runInfo; - - runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); @@ -261,7 +241,7 @@ KernelsData ReorderKernelBase::GetCommonKernelsData(const reorder_params& params auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kernel.arguments = GetArgsDesc(1, false, false); if (newParams.mode == MeanSubtractMode::IN_BUFFER) { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_binary.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_binary.cpp index c4a651f37246ff..10d4c1e81d213e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_binary.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_binary.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -60,22 +60,14 @@ JitConstants ReorderKernelBinary::GetJitConstants(const reorder_params& params) } ReorderKernelBinary::DispatchData ReorderKernelBinary::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.inputs[0]; - std::vector global{input.Batch().v, CeilDiv(input.Feature().v, 32), input.Y().v * input.X().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { input.Batch().v, CeilDiv(input.Feature().v, 32), input.Y().v * input.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData ReorderKernelBinary::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fast_b1.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fast_b1.cpp index 9bdc21cd7a464f..f71a2ab970e020 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fast_b1.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fast_b1.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -92,21 +92,21 @@ JitConstants ReorderKernelFastBatch1::GetJitConstants(const reorder_params& para } ReorderKernelFastBatch1::DispatchData ReorderKernelFastBatch1::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; unsigned int gws = (unsigned int)output.LogicalSize(); - kd.gws0 = Align(gws, 32); - kd.gws1 = 1; - kd.gws2 = 1; + dispatchData.gws[0] = Align(gws, 32); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - kd.lws0 = 32; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData ReorderKernelFastBatch1::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp index 2db66415c965b9..83c80b28097ee5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -68,19 +68,19 @@ JitConstants ReorderKernel_fs_b_yx_fsv32_to_bfyx::GetJitConstants(const reorder_ } ReorderKernelBase::DispatchData ReorderKernel_fs_b_yx_fsv32_to_bfyx::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; auto x_aligned = Align(params.output.X().v, x_block_align); - kd.gws0 = params.output.Batch().v; - kd.gws1 = Align(params.output.Feature().v, fsv); - kd.gws2 = params.output.Y().v * x_aligned / GetOptimalSize(x_aligned, optimal_x_sizes); + dispatchData.gws[0] = params.output.Batch().v; + dispatchData.gws[1] = Align(params.output.Feature().v, fsv); + dispatchData.gws[2] = params.output.Y().v * x_aligned / GetOptimalSize(x_aligned, optimal_x_sizes); - kd.lws0 = 1; - kd.lws1 = GetOptimalSize(kd.gws1, optimal_feature_sizes); - kd.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = GetOptimalSize(dispatchData.gws[1], optimal_feature_sizes); + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData ReorderKernel_fs_b_yx_fsv32_to_bfyx::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_to_yxfb_batched.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_to_yxfb_batched.cpp index 8e2a284d5036f5..0874f578577a13 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_to_yxfb_batched.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_to_yxfb_batched.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -67,21 +67,21 @@ JitConstants ReorderKernel_to_yxfb_batched::GetJitConstants(const reorder_params } ReorderKernelBase::DispatchData ReorderKernel_to_yxfb_batched::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.inputs[0]; unsigned int gws = (unsigned int)input.LogicalSize(); - kd.gws0 = Align(gws, 8 * input.Batch().v) / input.Batch().v; - kd.gws1 = 1; - kd.gws2 = 1; + dispatchData.gws[0] = Align(gws, 8 * input.Batch().v) / input.Batch().v; + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - kd.lws0 = 8; - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = 8; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData ReorderKernel_to_yxfb_batched::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_to_winograd_2x3_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_to_winograd_2x3_kernel.cpp index 26f3dae4ab9e47..906bd56c7ccda6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_to_winograd_2x3_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_to_winograd_2x3_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -43,20 +43,20 @@ JitConstants ReorderToWinograd2x3Kernel::GetJitConstants(const reorder_params& p } ReorderToWinograd2x3Kernel::DispatchData ReorderToWinograd2x3Kernel::SetDefault(const reorder_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.inputs[0]; const auto& output = params.output; - kd.gws0 = static_cast(input.Feature().v * input.Batch().v); - kd.gws1 = static_cast(params.winograd_nr_tiles_x); - kd.gws2 = static_cast(output.Y().v); + dispatchData.gws[0] = static_cast(input.Feature().v * input.Batch().v); + dispatchData.gws[1] = static_cast(params.winograd_nr_tiles_x); + dispatchData.gws[2] = static_cast(output.Y().v); - kd.lws0 = input.Feature().v > 32 ? 32 : static_cast(input.Feature().v); - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[0] = input.Feature().v > 32 ? 32 : static_cast(input.Feature().v); + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } KernelsData ReorderToWinograd2x3Kernel::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_binary_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_binary_kernel.cpp index 3e86a5f97dd578..8012dc9f874a23 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_binary_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_binary_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,20 +34,12 @@ ReorderWeightsBinaryKernel::DispatchData ReorderWeightsBinaryKernel::SetDefault( const reorder_weights_params& params) const { const auto& out = params.output; - DispatchData kd; + DispatchData dispatchData; - std::vector global = {out.OFM().v, CeilDiv(out.IFM().v, 32), out.X().v * out.Y().v}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.OFM().v, CeilDiv(out.IFM().v, 32), out.X().v * out.Y().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData ReorderWeightsBinaryKernel::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_fyx_b_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_fyx_b_kernel.cpp index 28b6b4ea15e7b8..24a2194bd8d90e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_fyx_b_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_fyx_b_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,26 +33,15 @@ ParamsKey ReorderWeightsImage_fyx_b_Kernel::GetSupportedKey() const { return k; } -ReorderWeightsImage_fyx_b_Kernel::DispatchData ReorderWeightsImage_fyx_b_Kernel::SetDefault( - const reorder_weights_params& params) const { +ReorderWeightsImage_fyx_b_Kernel::DispatchData ReorderWeightsImage_fyx_b_Kernel::SetDefault(const reorder_weights_params& params) const { const auto& out = params.output; - DispatchData kd; + DispatchData dispatchData; - std::vector global(3); + dispatchData.gws = { out.OFM().v, Align(out.X().v * out.Y().v * out.IFM().v, 4) / 4, 1 }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - global = {out.OFM().v, Align(out.X().v * out.Y().v * out.IFM().v, 4) / 4, 1}; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData ReorderWeightsImage_fyx_b_Kernel::GetKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_winograd_6x3_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_winograd_6x3_kernel.cpp index 467fa0726a2ef2..d9f8ba5a833247 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_winograd_6x3_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_image_winograd_6x3_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,19 +35,19 @@ ParamsKey ReorderWeightsImageWinograd6x3Kernel::GetSupportedKey() const { ReorderWeightsImageWinograd6x3Kernel::DispatchData ReorderWeightsImageWinograd6x3Kernel::SetDefault( const reorder_weights_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.input; - kd.gws0 = 1; - kd.gws1 = 3; - kd.gws2 = static_cast(input.IFM().v * input.OFM().v); + dispatchData.gws[0] = 1; + dispatchData.gws[1] = 3; + dispatchData.gws[2] = static_cast(input.IFM().v * input.OFM().v); - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 32; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 32; - return kd; + return dispatchData; } KernelsData ReorderWeightsImageWinograd6x3Kernel::GetKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_opt.cpp index 09b0d776d8cc08..32536f9da456e8 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_opt.cpp @@ -107,7 +107,7 @@ static inline size_t GetOptimalSize(size_t val, std::vector optimal_size ReorderWeightsOpt::DispatchData ReorderWeightsOpt::SetDefault( const reorder_weights_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& output = params.output; const auto output_layout = output.GetLayout(); @@ -123,22 +123,19 @@ ReorderWeightsOpt::DispatchData ReorderWeightsOpt::SetDefault( const auto ifm_block = (osv_first) ? ifm_block_supported ? GetOptimalSize(output.IFM().v, preferred_sizes) : 1 : subgroup_size; - std::vector global; if (osv_first) { - global = {output.G().v * (output.IFM().v / ifm_block), output.Z().v * output.Y().v * output.X().v, Align(output.OFM().v, ofm_block)}; + dispatchData.gws = { output.G().v * (output.IFM().v / ifm_block), + output.Z().v * output.Y().v * output.X().v, + Align(output.OFM().v, ofm_block) }; } else { - global = {output.G().v * (output.OFM().v / ofm_block), output.Z().v * output.Y().v * output.X().v, Align(output.IFM().v, ifm_block)}; + dispatchData.gws = { output.G().v * (output.OFM().v / ofm_block), + output.Z().v * output.Y().v * output.X().v, + Align(output.IFM().v, ifm_block) }; } - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; + dispatchData.lws = { 1, 1, 16 }; - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 16; - - return kd; + return dispatchData; } JitConstants ReorderWeightsOpt::GetJitConstants(const reorder_weights_params& params) const { @@ -174,7 +171,7 @@ bool ReorderWeightsOpt::Validate(const Params& params, const optional_params& /* const auto& p = static_cast(params); const auto& input = p.input; const auto& output = p.output; - + if (input.GroupedLayout() != output.GroupedLayout()) { return false; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_2x3_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_2x3_kernel.cpp index 84ad96ba5f0812..cca683f0088b66 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_2x3_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_2x3_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,19 +35,19 @@ ParamsKey ReorderWeightsWinograd2x3Kernel::GetSupportedKey() const { ReorderWeightsWinograd2x3Kernel::DispatchData ReorderWeightsWinograd2x3Kernel::SetDefault( const reorder_weights_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.input; - kd.gws0 = 1; - kd.gws1 = 3; - kd.gws2 = static_cast(input.IFM().v * input.OFM().v); + dispatchData.gws[0] = 1; + dispatchData.gws[1] = 3; + dispatchData.gws[2] = static_cast(input.IFM().v * input.OFM().v); - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 32; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 32; - return kd; + return dispatchData; } KernelsData ReorderWeightsWinograd2x3Kernel::GetKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_6x3_kernel.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_6x3_kernel.cpp index 2ac9dd08306b2a..b9355d6ca8e887 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_6x3_kernel.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_weights_winograd_6x3_kernel.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,19 +34,19 @@ ParamsKey ReorderWeightsWinograd6x3Kernel::GetSupportedKey() const { ReorderWeightsWinograd6x3Kernel::DispatchData ReorderWeightsWinograd6x3Kernel::SetDefault( const reorder_weights_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& input = params.input; - kd.gws0 = 1; - kd.gws1 = 3; - kd.gws2 = static_cast(input.IFM().v * input.OFM().v); + dispatchData.gws[0] = 1; + dispatchData.gws[1] = 3; + dispatchData.gws[2] = static_cast(input.IFM().v * input.OFM().v); - kd.lws0 = 1; - kd.lws1 = 1; - kd.lws2 = 32; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 32; - return kd; + return dispatchData; } KernelsData ReorderWeightsWinograd6x3Kernel::GetKernelsData(const Params& params, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorg_yolo/reorg_yolo_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorg_yolo/reorg_yolo_kernel_ref.cpp index 3e06aecf45bf49..8f55732be31e9d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorg_yolo/reorg_yolo_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorg_yolo/reorg_yolo_kernel_ref.cpp @@ -1,5 +1,5 @@ /* -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,35 +44,23 @@ JitConstants ReorgYoloKernelRef::GetJitConstants(const reorg_yolo_params& ry) co return jit; } ReorgYoloKernelRef::DispatchData SetDefault(const reorg_yolo_params& params) { - ReorgYoloKernelRef::DispatchData kd; - - kd.fp16UnitUsed = (params.inputs[0].GetDType() == Datatype::F16); + ReorgYoloKernelRef::DispatchData dispatchData; const auto& input = params.inputs[0]; - std::vector global; if (input.GetLayout() == DataLayout::bfyx) { - global = {input.X().v, input.Y().v, input.Feature().v}; + dispatchData.gws = {input.X().v, input.Y().v, input.Feature().v}; } else { - global = {input.Feature().v * input.Batch().v, input.X().v, input.Y().v}; + dispatchData.gws = {input.Feature().v * input.Batch().v, input.X().v, input.Y().v}; } - // Determine global work sizes. - kd.gws0 = global[0]; - kd.gws1 = global[1]; - kd.gws2 = global[2]; - - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return kd; + return dispatchData; } KernelsData ReorgYoloKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { assert(params.GetType() == KernelType::REORG_YOLO); const reorg_yolo_params& orgParams = static_cast(params); - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); auto cldnn_jit = GetJitConstants(orgParams); @@ -80,7 +68,7 @@ KernelsData ReorgYoloKernelRef::GetKernelsData(const Params& params, const optio auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = FORCE_PRIORITY_9; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_base.cpp index 6f933f4fd7f745..1ff3913aa7f39d 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_base.cpp @@ -58,40 +58,29 @@ size_t ResampleKernelBase::GetFeatureBlockSize(const resample_params& params) co } ResampleKernelBase::DispatchData ResampleKernelBase::SetDefault(const kernel_selector::resample_params &arg) const { - DispatchData runInfo; - std::vector global; - std::vector local; + DispatchData dispatchData; const auto& out = arg.output; if (arg.resampleType == ResampleType::NEAREST_NEIGHBOR) - global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.gws = { out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v }; else if (arg.resampleType == ResampleType::BILINEAR_INTERP || arg.resampleType == ResampleType::LINEAR_ONNX) - global = {Align(out.X().v, 32), out.Y().v, out.Batch().v}; + dispatchData.gws = { Align(out.X().v, 32), out.Y().v, out.Batch().v }; else if (arg.resampleType == ResampleType::CAFFE_BILINEAR_INTERP) - global = {out.X().v * out.Y().v, CeilDiv(out.Feature().v, GetFeatureBlockSize(arg)), out.Batch().v * out.Z().v}; + dispatchData.gws = { out.X().v * out.Y().v, CeilDiv(out.Feature().v, GetFeatureBlockSize(arg)), out.Batch().v * out.Z().v }; else - global = {out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v}; + dispatchData.gws = { out.X().v, out.Y().v * out.Z().v, out.Feature().v * out.Batch().v }; - local = GetOptimalLocalWorkGroupSizes(global, arg.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, arg.engineInfo); if (arg.resampleType == ResampleType::BILINEAR_INTERP || arg.resampleType == ResampleType::LINEAR_ONNX) { - local[0] = 32; - local[1] = 1; - local[2] = 1; + dispatchData.lws[0] = 32; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; + dispatchData.efficiency = FORCE_PRIORITY_7; - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - runInfo.efficiency = FORCE_PRIORITY_7; - runInfo.fp16UnitUsed = out.GetDType() == Datatype::F16; - - return runInfo; + return dispatchData; } bool ResampleKernelBase::Validate(const Params& p, const optional_params& o) const { @@ -227,16 +216,16 @@ KernelsData ResampleKernelBase::GetCommonKernelsData(const Params& params, const KernelData kd = KernelData::Default(params); resample_params& newParams = *static_cast(kd.params.get()); - auto runInfo = SetDefault(newParams); + auto dispatchData = SetDefault(newParams); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, GetFusedPrimitiveInputsCount(params)); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; return {kd}; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_opt.cpp index 9a74a619c68d0d..da201ed3bec2cd 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_opt.cpp @@ -52,21 +52,20 @@ ParamsKey ResampleKernelOpt::GetSupportedKey() const { } ResampleKernelBase::DispatchData ResampleKernelOpt::SetDefault(const kernel_selector::resample_params &arg) const { - DispatchData runInfo; + DispatchData dispatchData; const auto& out = arg.output; - runInfo.gws0 = CeilDiv(out.X().v, GetOptimalBlockSize(arg)) * out.Y().v; - runInfo.gws1 = Align(out.Feature().v, sub_group_size); - runInfo.gws2 = arg.output.Batch().v; + dispatchData.gws[0] = CeilDiv(out.X().v, GetOptimalBlockSize(arg)) * out.Y().v; + dispatchData.gws[1] = Align(out.Feature().v, sub_group_size); + dispatchData.gws[2] = arg.output.Batch().v; - runInfo.lws0 = 1; - runInfo.lws1 = sub_group_size; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = sub_group_size; + dispatchData.lws[2] = 1; - runInfo.efficiency = FORCE_PRIORITY_3; - runInfo.fp16UnitUsed = out.GetDType() == Datatype::F16; + dispatchData.efficiency = FORCE_PRIORITY_3; - return runInfo; + return dispatchData; } bool ResampleKernelOpt::Validate(const Params& p, const optional_params& o) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_ref.cpp index 90069a73514702..eb66fbacd20b06 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/resample/resample_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016-2019 Intel Corporation +// Copyright (c) 2016-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -123,25 +123,14 @@ JitConstants ResampleKernelRef::GetJitConstants(const resample_params& params) c } ResampleKernelBase::DispatchData ResampleKernelRef::SetDefault(const resample_params& arg) const { - auto dispatch = Parent::SetDefault(arg); + auto dispatchData = Parent::SetDefault(arg); if (use_packing(arg)) { auto pack = packing_factor(arg); - std::vector global; - std::vector local; - - global = { arg.output.X().v, arg.output.Y().v * arg.output.Z().v, CeilDiv(arg.output.Feature().v, pack) * arg.output.Batch().v }; - local = GetOptimalLocalWorkGroupSizes(global, arg.engineInfo); - - dispatch.gws0 = global[0]; - dispatch.gws1 = global[1]; - dispatch.gws2 = global[2]; - - dispatch.lws0 = local[0]; - dispatch.lws1 = local[1]; - dispatch.lws2 = local[2]; + dispatchData.gws = { arg.output.X().v, arg.output.Y().v * arg.output.Z().v, CeilDiv(arg.output.Feature().v, pack) * arg.output.Batch().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, arg.engineInfo); } - return dispatch; + return dispatchData; } } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reverse_sequence/reverse_sequence_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reverse_sequence/reverse_sequence_kernel_ref.cpp index f3926a75580c73..bcd95a840870c6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reverse_sequence/reverse_sequence_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reverse_sequence/reverse_sequence_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,23 +41,15 @@ ParamsKey ReverseSequenceKernelRef::GetSupportedKey() const { CommonDispatchData ReverseSequenceKernelRef::SetDefault(const reverse_sequence_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = {params.output.Batch().v, - params.output.Feature().v, - params.output.Y().v * params.output.X().v}; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Y().v * params.output.X().v }; - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants ReverseSequenceKernelRef::GetJitConstants(const reverse_sequence_params& params) const { @@ -75,14 +67,14 @@ KernelsData ReverseSequenceKernelRef::GetKernelsData(const Params& params, const assert(params.GetType() == KernelType::REVERSE_SEQUENCE); - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, "", false, false, 2); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, 2); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/roi_pooling/roi_pooling_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/roi_pooling/roi_pooling_kernel_base.cpp index b084ac4bb5d57d..1dbba05d50d225 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/roi_pooling/roi_pooling_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/roi_pooling/roi_pooling_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Intel Corporation +// Copyright (c) 2019-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,24 +18,22 @@ namespace kernel_selector { static ROIPoolingKernelBase::DispatchData SetDefault(const roi_pooling_params& params) { - ROIPoolingKernelBase::DispatchData kd; - - kd.fp16UnitUsed = (params.inputs[0].GetDType() == Datatype::F16); + ROIPoolingKernelBase::DispatchData dispatchData; // Determine global work sizes. - kd.gws0 = params.output.LogicalSize(); - kd.gws1 = 1; - kd.gws2 = 1; + dispatchData.gws[0] = params.output.LogicalSize(); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; // Find largest positive local work size that is divider for global work size. - kd.lws0 = std::min(std::max(kd.gws0, static_cast(1)), static_cast(32)); - while (kd.gws0 % kd.lws0 != 0) { - --kd.lws0; + dispatchData.lws[0] = std::min(std::max(dispatchData.gws[0], static_cast(1)), static_cast(32)); + while (dispatchData.gws[0] % dispatchData.lws[0] != 0) { + --dispatchData.lws[0]; } - kd.lws1 = 1; - kd.lws2 = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - return kd; + return dispatchData; } JitConstants ROIPoolingKernelBase::GetJitConstants(const roi_pooling_params& rp) const { @@ -59,7 +57,7 @@ KernelsData ROIPoolingKernelBase::GetCommonKernelsData(const Params& params, return {}; } - DispatchData runInfo = SetDefault(orgParams); + DispatchData dispatchData = SetDefault(orgParams); KernelData kd = KernelData::Default(params); auto cldnn_jit = GetJitConstants(orgParams); @@ -67,7 +65,7 @@ KernelsData ROIPoolingKernelBase::GetCommonKernelsData(const Params& params, auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 1}); if (orgParams.mode == PoolType::DEFORMABLE_BILINEAR && !orgParams.no_trans) kernel.arguments.push_back({ArgumentDescriptor::Types::INPUT, 2}); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/scatter_update/scatter_update_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/scatter_update/scatter_update_kernel_ref.cpp index 352db1e9b20f42..af73c0c4979715 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/scatter_update/scatter_update_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/scatter_update/scatter_update_kernel_ref.cpp @@ -87,7 +87,7 @@ static inline std::string GetOrderString(std::vector& order) { std::string order_str = order[0]; for (size_t i = 1; i < order.size(); i++) order_str += ", " + order[i]; - + return order_str; } @@ -114,7 +114,7 @@ static std::string GetUpdatesIndexOrder(const scatter_update_params& params, siz std::string FYX_indices_size = "(INPUT1_FEATURE_NUM * INPUT1_SIZE_Y * INPUT1_SIZE_X)"; std::string YX_indices_size = "(INPUT1_SIZE_Y * INPUT1_SIZE_X)"; std::string X_indices_size = "(INPUT1_SIZE_X)"; - + // Shift indices of ScatterUpdate updates input related to Indices dims for (size_t i = default_order.size() - 1; i > (axis + indices_non_empty_dims - 1); i--) default_order[i] = default_order[i - indices_non_empty_dims + 1]; @@ -141,76 +141,65 @@ static std::string GetUpdatesIndexOrder(const scatter_update_params& params, siz } CommonDispatchData ScatterUpdateKernelRef::SetDefault(const scatter_update_params& params, const optional_params&, bool is_second) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; const auto& output = params.output; - std::vector global(3); const size_t indices_size = params.inputs[1].LogicalSize(); switch (params.inputs[0].GetLayout()) { case DataLayout::bfyx: - global = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; if (is_second) { if (params.axis == ScatterUpdateAxis::BATCH) - global[2] = indices_size * output.Feature().v; + dispatchData.gws[2] = indices_size * output.Feature().v; else if (params.axis == ScatterUpdateAxis::FEATURE) - global[2] = indices_size * output.Batch().v; + dispatchData.gws[2] = indices_size * output.Batch().v; else if (params.axis == ScatterUpdateAxis::Y) - global[1] = indices_size; + dispatchData.gws[1] = indices_size; else - global[0] = indices_size; + dispatchData.gws[0] = indices_size; } break; case DataLayout::bfzyx: - global = {output.X().v * output.Y().v, output.Z().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v * output.Y().v, output.Z().v, output.Feature().v * output.Batch().v}; if (is_second) { if (params.axis == ScatterUpdateAxis::BATCH) - global[2] = indices_size * output.Feature().v; + dispatchData.gws[2] = indices_size * output.Feature().v; else if (params.axis == ScatterUpdateAxis::FEATURE) - global[2] = indices_size * output.Batch().v; + dispatchData.gws[2] = indices_size * output.Batch().v; else if (params.axis == ScatterUpdateAxis::Z) - global[1] = indices_size; + dispatchData.gws[1] = indices_size; else if (params.axis == ScatterUpdateAxis::Y) - global[0] = indices_size * output.X().v; + dispatchData.gws[0] = indices_size * output.X().v; else - global[0] = indices_size * output.Y().v; + dispatchData.gws[0] = indices_size * output.Y().v; } break; case DataLayout::bfwzyx: - global = {output.X().v * output.Y().v, output.Z().v * output.W().v, output.Feature().v * output.Batch().v}; + dispatchData.gws = {output.X().v * output.Y().v, output.Z().v * output.W().v, output.Feature().v * output.Batch().v}; if (is_second) { if (params.axis == ScatterUpdateAxis::BATCH) - global[2] = indices_size * output.Feature().v; + dispatchData.gws[2] = indices_size * output.Feature().v; else if (params.axis == ScatterUpdateAxis::FEATURE) - global[2] = indices_size * output.Batch().v; + dispatchData.gws[2] = indices_size * output.Batch().v; else if (params.axis == ScatterUpdateAxis::Z) - global[1] = indices_size * output.W().v; + dispatchData.gws[1] = indices_size * output.W().v; else if (params.axis == ScatterUpdateAxis::W) - global[1] = indices_size * output.Z().v; + dispatchData.gws[1] = indices_size * output.Z().v; else if (params.axis == ScatterUpdateAxis::Y) - global[0] = indices_size * output.X().v; + dispatchData.gws[0] = indices_size * output.X().v; else - global[0] = indices_size * output.Y().v; + dispatchData.gws[0] = indices_size * output.Y().v; } break; default: break; } - - std::vector local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - return runInfo; + return dispatchData; } static std::string GetOutputIndexOnAxis(const scatter_update_params& params, size_t axis) { @@ -270,7 +259,7 @@ KernelsData ScatterUpdateKernelRef::GetKernelsData(const Params& params, const o const scatter_update_params& orgParams = static_cast(params); const size_t indices_size = orgParams.inputs[1].LogicalSize(); int start_with_iteration = 0; - + // if dim of output along axis is equal to logical size of indices, we miss copying kernel if (orgParams.inputs[0].Extract(orgParams.inputs[0].GetLayout(), Tensor::DataChannelName(orgParams.axis), orgParams.inputs[0].GetDims()).v == indices_size) { start_with_iteration = 1; @@ -281,7 +270,7 @@ KernelsData ScatterUpdateKernelRef::GetKernelsData(const Params& params, const o auto cldnn_jit = GetJitConstants(newParams); for (int i = start_with_iteration; i < 2; i++) { - auto runInfo = SetDefault(newParams, options, (i == 1)); + auto dispatchData = SetDefault(newParams, options, (i == 1)); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); if (i == 1){ @@ -291,11 +280,11 @@ KernelsData ScatterUpdateKernelRef::GetKernelsData(const Params& params, const o clKernelData& kernel = kd.kernels[i - start_with_iteration]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, "", false, false, 3, GetFusedPrimitiveInputsCount(params)); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, 3, GetFusedPrimitiveInputsCount(params)); } kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; - + return {kd}; } } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/select/select_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/select/select_kernel_base.cpp index 8463a0e381e864..7807c851c1a126 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/select/select_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/select/select_kernel_base.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -98,7 +98,7 @@ JitConstants SelectKernelBase::GetJitConstants(const select_params& params) cons } SelectKernelBase::DispatchData SelectKernelBase::SetDefault(const select_params& params) const { - DispatchData kd; + DispatchData dispatchData; const auto& out = params.output; @@ -111,16 +111,12 @@ SelectKernelBase::DispatchData SelectKernelBase::SetDefault(const select_params& gws.push_back(1U); } - kd.gws0 = gws[0]; - kd.gws1 = gws[1]; - kd.gws2 = gws[2] * gws[3]; + dispatchData.gws[0] = gws[0]; + dispatchData.gws[1] = gws[1]; + dispatchData.gws[2] = gws[2] * gws[3]; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes({kd.gws0, kd.gws1, kd.gws2}, params.engineInfo); - kd.lws0 = local[0]; - kd.lws1 = local[1]; - kd.lws2 = local[2]; - - return kd; + return dispatchData; } KernelsData SelectKernelBase::GetCommonKernelsData(const Params& params, const optional_params& options) const { @@ -135,12 +131,12 @@ KernelsData SelectKernelBase::GetCommonKernelsData(const Params& params, const o auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); - DispatchData runInfo = SetDefault(newParams); + DispatchData dispatchData = SetDefault(newParams); auto& kernel = kd.kernels[0]; - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelName, jit, entry_point, params.engineInfo, DEFAULT); kernel.arguments = GetArgsDesc((uint32_t)newParams.inputs.size(), false, false); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/shuffle_channels/shuffle_channels_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/shuffle_channels/shuffle_channels_kernel_ref.cpp index a1284985d694ca..68ce9240611d93 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/shuffle_channels/shuffle_channels_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/shuffle_channels/shuffle_channels_kernel_ref.cpp @@ -55,23 +55,14 @@ bool ShuffleChannelsKernelRef::Validate(const Params& p, const optional_params& CommonDispatchData ShuffleChannelsKernelRef::SetDefault(const shuffle_channels_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = {params.output.Batch().v, - params.output.Feature().v, - params.output.Y().v * params.output.X().v}; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Y().v * params.output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants ShuffleChannelsKernelRef::GetJitConstants(const shuffle_channels_params& params) const { @@ -109,14 +100,14 @@ KernelsData ShuffleChannelsKernelRef::GetKernelsData(const Params& params, const assert(params.GetType() == KernelType::SHUFFLE_CHANNELS); - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.cpp index 25ca988612ffe2..5d9547a5c4a435 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.cpp @@ -65,8 +65,8 @@ std::vector SoftmaxItemsClassKernelBase::GetSoftmaxDimGlobalSizes(Softma } } -JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params& params, DispatchData kd) const { - auto jit = SoftmaxKernelBase::GetJitConstants(params, kd); +JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params& params, DispatchData dispatchData) const { + auto jit = SoftmaxKernelBase::GetJitConstants(params, dispatchData); switch (params.dim) { case SoftmaxDim::X: diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.h index 2d401039cda0ee..987778c7f41104 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_items_class_kernel_base.h @@ -24,7 +24,7 @@ class SoftmaxItemsClassKernelBase : public SoftmaxKernelBase { virtual ~SoftmaxItemsClassKernelBase() {} protected: - JitConstants GetJitConstants(const softmax_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const softmax_params& params, DispatchData dispatchData) const override; static ParamsKey GetDefaultSupportedKey(); static std::vector GetSoftmaxDimGlobalSizes(SoftmaxDim dim, const DataTensor& output); }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.cpp index 065520e6ed1089..77b35351c0b767 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.cpp @@ -16,43 +16,42 @@ namespace kernel_selector { JitConstants SoftmaxKernelBase::GetJitConstants(const softmax_params& params, - SoftmaxKernelBase::DispatchData kd) const { + SoftmaxKernelBase::DispatchData dispatchData) const { JitConstants mem_consts = MakeBaseParamsJitConstants(params); mem_consts.AddConstants({MakeJitConstant("ALONG_" + toString(params.dim), "")}); mem_consts.AddConstants({ - MakeJitConstant("ITEMS_NUM", kd.itemsNum), - MakeJitConstant("LWS", kd.lws0), - MakeJitConstant("GWS", kd.gws0), - MakeJitConstant("DATA_SETS_COUNT", kd.dataSetsCount), - MakeJitConstant("DATA_SET_SIZE", kd.dataSetSize), - MakeJitConstant("LEFTOVERS", kd.leftovers), + MakeJitConstant("ITEMS_NUM", dispatchData.itemsNum), + MakeJitConstant("LWS", dispatchData.lws[0]), + MakeJitConstant("GWS", dispatchData.gws[0]), + MakeJitConstant("DATA_SETS_COUNT", dispatchData.dataSetsCount), + MakeJitConstant("DATA_SET_SIZE", dispatchData.dataSetSize), + MakeJitConstant("LEFTOVERS", dispatchData.leftovers), }); return mem_consts; } -SoftmaxKernelBase::DispatchData SoftmaxKernelBase::SetDefault(const softmax_params& params, +SoftmaxKernelBase::DispatchData SoftmaxKernelBase::SetDefault(const softmax_params&, const optional_params&) const { - DispatchData runInfo; + DispatchData dispatchData; - runInfo.gws0 = 1; - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = 1; + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - runInfo.leftovers = 0; - runInfo.itemsNum = 0; - runInfo.normIndex = 0; - runInfo.dataSetsCount = 0; - runInfo.dataSetSize = 0; + dispatchData.leftovers = 0; + dispatchData.itemsNum = 0; + dispatchData.normIndex = 0; + dispatchData.dataSetsCount = 0; + dispatchData.dataSetSize = 0; - return runInfo; + return dispatchData; } bool SoftmaxKernelBase::Validate(const Params& p, const optional_params& o) const { @@ -71,15 +70,15 @@ KernelsData SoftmaxKernelBase::GetCommonKernelsData(const Params& params, const const softmax_params& orgParams = static_cast(params); KernelData kd = KernelData::Default(params); - auto runInfo = SetDefault(orgParams, options); - auto cldnn_jit = GetJitConstants(orgParams, runInfo); + auto dispatchData = SetDefault(orgParams, options); + auto cldnn_jit = GetJitConstants(orgParams, dispatchData); auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, options); auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); - kd.estimatedTime = runInfo.efficiency; + kd.estimatedTime = dispatchData.efficiency; return {kd}; } @@ -118,12 +117,12 @@ SoftmaxKernelBase::DispatchData SoftmaxKernelBaseBF::SetDefault(const softmax_pa const optional_params& options) const { const auto& input = params.inputs[0]; - DispatchData kd = Parent::SetDefault(params, options); + DispatchData dispatchData = Parent::SetDefault(params, options); auto flatten_input = input.FlattenFeatureAndSpatials(); - kd.dataSetSize = flatten_input.Feature().v; - kd.dataSetsCount = input.Batch().v; + dispatchData.dataSetSize = flatten_input.Feature().v; + dispatchData.dataSetsCount = input.Batch().v; - return kd; + return dispatchData; } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.h index d01e91d2ec2d0a..937d0ebdd42428 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_base.h @@ -59,7 +59,7 @@ class SoftmaxKernelBase : public common_kernel_base { protected: virtual bool Validate(const Params&, const optional_params&) const; - virtual JitConstants GetJitConstants(const softmax_params& params, DispatchData kd) const; + virtual JitConstants GetJitConstants(const softmax_params& params, DispatchData dispatchData) const; virtual DispatchData SetDefault(const softmax_params& params, const optional_params& optParams) const; KernelsData GetCommonKernelsData(const Params& params, const optional_params& optParams) const; }; @@ -74,4 +74,4 @@ class SoftmaxKernelBaseBF : public SoftmaxKernelBase { bool Validate(const Params&, const optional_params&) const override; DispatchData SetDefault(const softmax_params& params, const optional_params& optParams) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_bf.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_bf.cpp index 8a33e175b4f197..ae5d29f87c1c18 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_bf.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_bf.cpp @@ -35,37 +35,37 @@ ParamsKey SoftmaxKernel_bf::GetSupportedKey() const { SoftmaxKernel_bf::Parent::DispatchData SoftmaxKernel_bf::SetDefault(const softmax_params& params, const optional_params& optParams) const { - auto kd = Parent::SetDefault(params, optParams); + auto dispatchData = Parent::SetDefault(params, optParams); // start with 1 thread per data set - kd.gws0 = 1; - kd.gws1 = kd.dataSetsCount; - kd.itemsNum = kd.dataSetSize; + dispatchData.gws[0] = 1; + dispatchData.gws[1] = dispatchData.dataSetsCount; + dispatchData.itemsNum = dispatchData.dataSetSize; - kd.normIndex = 0; + dispatchData.normIndex = 0; // We have two units of data per work item in current implementation. - auto local_mem_per_wi = 2 * (kd.fp16UnitUsed ? sizeof(short) : sizeof(float)); + auto local_mem_per_wi = 2 * BytesPerElement(params.inputs[0].GetDType()); // Combining device execution and local memory restrictions to compute maximum possible LWS. auto max_lws = std::min(params.engineInfo.maxWorkGroupSize, params.engineInfo.maxLocalMemSize / local_mem_per_wi); - kd.lws0 = 1; + dispatchData.lws[0] = 1; // Compute maximum possible LWS that does not exceed device capabilities and optimizes number of global memory // reads. - while ((kd.itemsNum > 32 || kd.lws0 < kd.itemsNum) && (2 * kd.lws0 <= max_lws)) { - kd.lws0 *= 2; - kd.itemsNum /= 2; + while ((dispatchData.itemsNum > 32 || dispatchData.lws[0] < dispatchData.itemsNum) && (2 * dispatchData.lws[0] <= max_lws)) { + dispatchData.lws[0] *= 2; + dispatchData.itemsNum /= 2; } - assert((kd.itemsNum + 1) * kd.lws0 >= kd.dataSetSize && "More than 'lws0' items per batch remains! Lws too small?"); + assert((dispatchData.itemsNum + 1) * dispatchData.lws[0] >= dispatchData.dataSetSize && "More than 'lws[0]' items per batch remains! Lws too small?"); - kd.gws0 = kd.lws0; - kd.leftovers = kd.dataSetSize % kd.lws0; + dispatchData.gws[0] = dispatchData.lws[0]; + dispatchData.leftovers = dispatchData.dataSetSize % dispatchData.lws[0]; - assert(kd.itemsNum > 0 && kd.lws0 && kd.gws0 > 0); + assert(dispatchData.itemsNum > 0 && dispatchData.lws[0] && dispatchData.gws[0] > 0); - kd.efficiency = FORCE_PRIORITY_6; - return kd; + dispatchData.efficiency = FORCE_PRIORITY_6; + return dispatchData; } KernelsData SoftmaxKernel_bf::GetKernelsData(const Params& params, const optional_params& optionalParams) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_fb.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_fb.cpp index 27d0a1dfd4549f..f31a03d9859ba2 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_fb.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_fb.cpp @@ -35,36 +35,36 @@ ParamsKey SoftmaxKernel_fb::GetSupportedKey() const { SoftmaxKernel_fb::Parent::DispatchData SoftmaxKernel_fb::SetDefault(const softmax_params& params, const optional_params& optParams) const { - auto kd = Parent::SetDefault(params, optParams); + auto dispatchData = Parent::SetDefault(params, optParams); // start with 1 thread per data set - kd.gws0 = kd.dataSetsCount; - kd.gws1 = 1; - kd.itemsNum = kd.dataSetSize; + dispatchData.gws[0] = dispatchData.dataSetsCount; + dispatchData.gws[1] = 1; + dispatchData.itemsNum = dispatchData.dataSetSize; - kd.normIndex = 1; + dispatchData.normIndex = 1; // We have two units of data per work item in current implementation. - auto local_mem_per_wi = 2 * (kd.fp16UnitUsed ? sizeof(short) : sizeof(float)); + auto local_mem_per_wi = 2 * BytesPerElement(params.inputs[0].GetDType()); // Combining device execution and local memory restrictions to compute maximum possible LWS. auto max_lws = static_cast( std::min(params.engineInfo.maxWorkGroupSize, params.engineInfo.maxLocalMemSize / local_mem_per_wi)); - kd.lws0 = std::min(kd.dataSetsCount, max_lws); + dispatchData.lws[0] = std::min(dispatchData.dataSetsCount, max_lws); // Compute maximum possible LWS that does not exceed device capabilities and optimizes number of global memory // reads. - while ((kd.itemsNum > 32 || kd.lws0 < kd.itemsNum) && (2 * kd.lws0 <= max_lws)) { - kd.lws0 *= 2; - kd.itemsNum /= 2; + while ((dispatchData.itemsNum > 32 || dispatchData.lws[0] < dispatchData.itemsNum) && (2 * dispatchData.lws[0] <= max_lws)) { + dispatchData.lws[0] *= 2; + dispatchData.itemsNum /= 2; } - kd.gws0 = kd.lws0; - kd.gws1 = 1; - kd.leftovers = (kd.dataSetSize * kd.dataSetsCount) % kd.lws0; + dispatchData.gws[0] = dispatchData.lws[0]; + dispatchData.gws[1] = 1; + dispatchData.leftovers = (dispatchData.dataSetSize * dispatchData.dataSetsCount) % dispatchData.lws[0]; - assert(kd.itemsNum > 0 && kd.lws0 && kd.gws0 > 0); + assert(dispatchData.itemsNum > 0 && dispatchData.lws[0] && dispatchData.gws[0] > 0); - kd.efficiency = FORCE_PRIORITY_6; - return kd; + dispatchData.efficiency = FORCE_PRIORITY_6; + return dispatchData; } bool kernel_selector::SoftmaxKernel_fb::Validate(const Params& params, const optional_params& o) const { @@ -74,8 +74,7 @@ bool kernel_selector::SoftmaxKernel_fb::Validate(const Params& params, const opt const auto& softmax_params = static_cast(params); - auto kd = Parent::SetDefault(softmax_params, o); - auto local_mem_per_wi = 2 * (kd.fp16UnitUsed ? sizeof(short) : sizeof(float)); + auto local_mem_per_wi = 2 * BytesPerElement(softmax_params.inputs[0].GetDType()); auto max_lws = static_cast( std::min(params.engineInfo.maxWorkGroupSize, params.engineInfo.maxLocalMemSize / local_mem_per_wi)); diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.cpp index 4af7fc052bc0ef..3d6cdef27d3d48 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.cpp @@ -24,7 +24,7 @@ ParamsKey SoftmaxKerneItemsClassOptimized::GetSupportedKey() const { return GetD SoftmaxKerneItemsClassOptimized::Parent::DispatchData SoftmaxKerneItemsClassOptimized::SetDefault( const softmax_params& params, const optional_params& optParams) const { - auto runInfo = Parent::SetDefault(params, optParams); + auto dispatchData = Parent::SetDefault(params, optParams); auto& input = params.inputs[0]; @@ -50,30 +50,27 @@ SoftmaxKerneItemsClassOptimized::Parent::DispatchData SoftmaxKerneItemsClassOpti break; } - runInfo.gws0 = global[0]; - runInfo.gws1 = - global[1] * workitems_per_classes; // we multiply it by workitems_per_classes because we split computations of - // one "full item classes output" into multiple workitems by "full item - // classes output" i mean N outputs where N is number of item classes. - runInfo.gws2 = global[2]; + dispatchData.gws[0] = global[0]; + dispatchData.gws[1] = global[1] * workitems_per_classes; // we multiply it by workitems_per_classes because we split computations of + // one "full item classes output" into multiple workitems by "full item + // classes output" i mean N outputs where N is number of item classes. + dispatchData.gws[2] = global[2]; - runInfo.lws0 = 1; - runInfo.lws1 = workitems_per_classes; - runInfo.lws2 = 1; + dispatchData.lws = { 1, workitems_per_classes, 1 }; - runInfo.leftovers = item_class_count % workitems_per_classes; + dispatchData.leftovers = item_class_count % workitems_per_classes; if (item_class_count >= 32) { - runInfo.efficiency = FORCE_PRIORITY_7; + dispatchData.efficiency = FORCE_PRIORITY_7; } else { - runInfo.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; } - return runInfo; + return dispatchData; } -JitConstants SoftmaxKerneItemsClassOptimized::GetJitConstants(const softmax_params& params, DispatchData kd) const { - auto jit = SoftmaxItemsClassKernelBase::GetJitConstants(params, kd); +JitConstants SoftmaxKerneItemsClassOptimized::GetJitConstants(const softmax_params& params, DispatchData dispatchData) const { + auto jit = SoftmaxItemsClassKernelBase::GetJitConstants(params, dispatchData); jit.AddConstant(MakeJitConstant("WORKITEMS_PER_CLASSES", workitems_per_classes)); jit.AddConstant(MakeJitConstant("HAS_DRIVER_PROBLEMS", params.engineInfo.bIMADSupport)); @@ -84,4 +81,4 @@ KernelsData SoftmaxKerneItemsClassOptimized::GetKernelsData(const Params& params const optional_params& options) const { return GetCommonKernelsData(params, options); } -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.h index 2dfb35a15ceb55..354f28b01f9a1e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_items_class_optimized.h @@ -27,7 +27,7 @@ class SoftmaxKerneItemsClassOptimized : public SoftmaxItemsClassKernelBase { ParamsKey GetSupportedKey() const override; protected: - JitConstants GetJitConstants(const softmax_params& params, DispatchData kd) const override; + JitConstants GetJitConstants(const softmax_params& params, DispatchData dispatchData) const override; DispatchData SetDefault(const softmax_params& params, const optional_params& optParams) const override; }; -} // namespace kernel_selector \ No newline at end of file +} // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_ref.cpp index 809b9e5ec981eb..8a3f7ec75034ea 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/softmax/softmax_kernel_ref.cpp @@ -20,25 +20,17 @@ ParamsKey SoftmaxKernelRef::GetSupportedKey() const { return GetDefaultSupported SoftmaxKernelRef::Parent::DispatchData SoftmaxKernelRef::SetDefault(const softmax_params& params, const optional_params& optParams) const { - auto runInfo = Parent::SetDefault(params, optParams); + auto dispatchData = Parent::SetDefault(params, optParams); - const auto global = GetSoftmaxDimGlobalSizes(params.dim, params.output); + dispatchData.gws = GetSoftmaxDimGlobalSizes(params.dim, params.output); - assert(global.size() == 3); + assert(dispatchData.gws.size() == 3); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; + dispatchData.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - runInfo.efficiency = DONT_USE_IF_HAVE_SOMETHING_ELSE; - - return runInfo; + return dispatchData; } KernelsData SoftmaxKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_batch/space_to_batch_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_batch/space_to_batch_kernel_base.cpp index 169ff62784c923..1b21b6664dd07e 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_batch/space_to_batch_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_batch/space_to_batch_kernel_base.cpp @@ -41,27 +41,16 @@ bool SpaceToBatchKernelBase::Validate(const Params& p, const optional_params& o) CommonDispatchData SpaceToBatchKernelBase::SetDefault(const space_to_batch_params& params, const optional_params&) const { const auto& out = params.output; - CommonDispatchData runInfo; - std::vector global; - std::vector local; - + CommonDispatchData dispatchData; if (out.GetLayout() == DataLayout::b_fs_yx_fsv16 && out.Feature().v % 16 == 0) { - global = { out.Batch().v, out.Feature().v, out.Y().v * out.X().v }; - local = {1, 16, 1}; + dispatchData.gws = { out.Batch().v, out.Feature().v, out.Y().v * out.X().v }; + dispatchData.lws = {1, 16, 1}; } else { - global = { out.Batch().v, out.Feature().v, out.W().v * out.Z().v * out.Y().v * out.X().v }; - local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); + dispatchData.gws = { out.Batch().v, out.Feature().v, out.W().v * out.Z().v * out.Y().v * out.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); } - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants SpaceToBatchKernelBase::GetJitConstants(const space_to_batch_params& params) const { @@ -101,14 +90,14 @@ KernelsData SpaceToBatchKernelBase::GetCommonKernelsData(const Params& params, c return {}; } - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, 1, GetFusedPrimitiveInputsCount(params)); kd.estimatedTime = estimatedTime; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_depth/space_to_depth_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_depth/space_to_depth_kernel_ref.cpp index 8a0b22841e8bbf..23337fb746973a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_depth/space_to_depth_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/space_to_depth/space_to_depth_kernel_ref.cpp @@ -59,23 +59,14 @@ bool SpaceToDepthKernelRef::Validate(const Params& p, const optional_params& o) CommonDispatchData SpaceToDepthKernelRef::SetDefault(const space_to_depth_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; - std::vector global = {params.output.Batch().v, - params.output.Feature().v, - params.output.Z().v * params.output.Y().v * params.output.X().v}; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Z().v * params.output.Y().v * params.output.X().v }; + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - auto local = GetOptimalLocalWorkGroupSizes(global, params.engineInfo); - - runInfo.gws0 = global[0]; - runInfo.gws1 = global[1]; - runInfo.gws2 = global[2]; - - runInfo.lws0 = local[0]; - runInfo.lws1 = local[1]; - runInfo.lws2 = local[2]; - - return runInfo; + return dispatchData; } JitConstants SpaceToDepthKernelRef::GetJitConstants(const space_to_depth_params& params) const { @@ -111,14 +102,14 @@ KernelsData SpaceToDepthKernelRef::GetKernelsData(const Params& params, const op return {}; } - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point, + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, DEFAULT, false, false, 1, GetFusedPrimitiveInputsCount(params)); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/strided_slice/strided_slice_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/strided_slice/strided_slice_kernel_ref.cpp index b5b0acfdaa9c75..5c3bbcc3104964 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/strided_slice/strided_slice_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/strided_slice/strided_slice_kernel_ref.cpp @@ -89,25 +89,18 @@ bool StridedSliceKernelRef::Validate(const Params& p, const optional_params& o) } CommonDispatchData StridedSliceKernelRef::SetDefault(const strided_slice_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; // If the new_axis_mask is set, then begin, end, and stride are ignored // and a new length 1 dimension is adding. Input data just copying to output // TODO: remove data copying in case where only shape size changing - std::vector gws = {params.output.Batch().v, params.output.Feature().v, - params.output.Z().v * params.output.Y().v * params.output.X().v}; + dispatchData.gws = { params.output.Batch().v, + params.output.Feature().v, + params.output.Z().v * params.output.Y().v * params.output.X().v }; - auto lws = GetOptimalLocalWorkGroupSizes(gws, params.engineInfo); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); - runInfo.gws0 = gws[0]; - runInfo.gws1 = gws[1]; - runInfo.gws2 = gws[2]; - - runInfo.lws0 = lws[0]; - runInfo.lws1 = lws[1]; - runInfo.lws2 = lws[2]; - - return runInfo; + return dispatchData; } JitConstants StridedSliceKernelRef::GetJitConstants(const strided_slice_params& params) const { @@ -167,14 +160,14 @@ KernelsData StridedSliceKernelRef::GetKernelsData(const Params& params, const op assert(params.GetType() == KernelType::STRIDED_SLICE); - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/tile/tile_kernel_ref.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/tile/tile_kernel_ref.cpp index dc95efd1f2fd04..de2a1e9659e14b 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/tile/tile_kernel_ref.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/tile/tile_kernel_ref.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ ParamsKey TileKernelRef::GetSupportedKey() const { } CommonDispatchData TileKernelRef::SetDefault(const tile_params& params, const optional_params&) const { - CommonDispatchData runInfo; + CommonDispatchData dispatchData; auto in = params.inputs[0]; @@ -77,26 +77,24 @@ CommonDispatchData TileKernelRef::SetDefault(const tile_params& params, const op } if (inner_size > 1) { - runInfo.gws0 = outer_size; - runInfo.gws1 = inner_size; - runInfo.gws2 = 1; + dispatchData.gws[0] = outer_size; + dispatchData.gws[1] = inner_size; + dispatchData.gws[2] = 1; - runInfo.lws0 = 1; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 1; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } else { - runInfo.gws0 = Align(outer_size, 16); - runInfo.gws1 = 1; - runInfo.gws2 = 1; + dispatchData.gws[0] = Align(outer_size, 16); + dispatchData.gws[1] = 1; + dispatchData.gws[2] = 1; - runInfo.lws0 = 16; - runInfo.lws1 = 1; - runInfo.lws2 = 1; + dispatchData.lws[0] = 16; + dispatchData.lws[1] = 1; + dispatchData.lws[2] = 1; } - runInfo.fp16UnitUsed = params.inputs[0].GetDType() == Datatype::F16; - - return runInfo; + return dispatchData; } JitConstants TileKernelRef::GetJitConstants(const tile_params& params) const { @@ -135,14 +133,14 @@ KernelsData TileKernelRef::GetKernelsData(const Params& params, const optional_p KernelData kd = KernelData::Default(params); tile_params& newParams = *static_cast(kd.params.get()); - auto runInfo = SetDefault(newParams, options); + auto dispatchData = SetDefault(newParams, options); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, options); auto cldnn_jit = GetJitConstants(newParams); std::string jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entry_point); + FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point); kd.estimatedTime = DONT_USE_IF_HAVE_SOMETHING_ELSE; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.cpp index a6f3bb2bbea245..4bf514bbae2d18 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.cpp @@ -160,30 +160,6 @@ std::shared_ptr common_kernel_base::GetKernelString(const std::str return kernel_string; } -static void Check_RunInfoData(const std::string& kernelName, const kernel_selector::CommonDispatchData& runInfo) { - if (runInfo.lws0 * runInfo.lws1 * runInfo.lws2 > 256) { - std::cout << "ERROR: dispatch data for kernel: " << kernelName << " LWS cannot be greater than 256!\n" - << std::endl; - } - if (runInfo.gws0 == 0 || runInfo.gws1 == 0 || runInfo.gws2 == 0 || runInfo.lws0 == 0 || runInfo.lws1 == 0 || - runInfo.lws2 == 0) { - std::cout << "ERROR: dispatch data for kernel: " << kernelName << " dispatch data cannot contain zeros!" - << std::endl; - } - if (runInfo.gws0 % runInfo.lws0 != 0) { - std::cout << "ERROR: dispatch data for kernel: " << kernelName << " is incorrect: GWS0: " << runInfo.gws0 - << " LWS0: " << runInfo.lws0 << std::endl; - } - if (runInfo.gws1 % runInfo.lws1 != 0) { - std::cout << "ERROR: dispatch data for kernel: " << kernelName << " is incorrect: GWS1: " << runInfo.gws1 - << " LWS1: " << runInfo.lws1 << std::endl; - } - if (runInfo.gws2 % runInfo.lws2 != 0) { - std::cout << "ERROR: dispatch data for kernel: " << kernelName << " is incorrect: GWS2: " << runInfo.gws2 - << " LWS2: " << runInfo.lws2 << std::endl; - } -} - uint32_t common_kernel_base::GetFusedPrimitiveInputsCount(const Params ¶ms) const { auto p = dynamic_cast(params); uint32_t fused_deps_total = 0; @@ -195,7 +171,7 @@ uint32_t common_kernel_base::GetFusedPrimitiveInputsCount(const Params ¶ms) } void common_kernel_base::FillCLKernelData(clKernelData& kernel, - const CommonDispatchData& runInfo, + const CommonDispatchData& dispatchData, const EngineInfo& engine_info, const std::string& kernelMapName, const std::string& jit, @@ -205,11 +181,10 @@ void common_kernel_base::FillCLKernelData(clKernelData& kernel, bool bias, int number_of_inputs, uint32_t number_of_inputs_for_fused_prims) const { - Check_RunInfoData(kernelMapName, runInfo); - kernel.workGroups.global = {runInfo.gws0, runInfo.gws1, runInfo.gws2}; - kernel.workGroups.local = {runInfo.lws0, runInfo.lws1, runInfo.lws2}; + CheckDispatchData(kernelMapName, dispatchData); + kernel.workGroups.global = dispatchData.gws; + kernel.workGroups.local = dispatchData.lws; kernel.kernelString = GetKernelString(kernelMapName, jit, entryPoint, engine_info, exeMode); - kernel.arguments = - GetArgsDesc(number_of_inputs, weights, bias, number_of_inputs_for_fused_prims); + kernel.arguments = GetArgsDesc(number_of_inputs, weights, bias, number_of_inputs_for_fused_prims); } } // namespace kernel_selector diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.h index 3dc1c5ffd535e8..cb89cc1b711eba 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/common_kernel_base.h @@ -21,16 +21,6 @@ #include namespace kernel_selector { -struct CommonDispatchData { - // TODO: change it to std::vector - size_t gws0, gws1, gws2; - size_t lws0, lws1, lws2; - bool - fp16UnitUsed; ///< Value indicating that FP16 half precision floating point type will be used (instead of single precision). - float efficiency; - - CommonDispatchData() : gws0(0), gws1(0), gws2(0), lws0(0), lws1(0), lws2(0), fp16UnitUsed(false), efficiency(0.0f){} -}; class common_kernel_base : public KernelBase { public: @@ -58,7 +48,7 @@ class common_kernel_base : public KernelBase { uint32_t GetFusedPrimitiveInputsCount(const Params ¶ms) const; void FillCLKernelData(clKernelData& kernel, - const CommonDispatchData& runInfo, + const CommonDispatchData& dispatchData, const EngineInfo& engine_info, const std::string& kernel_map_name, const std::string& jit, diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp index d5664401a290e6..ae8720433356e6 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp @@ -475,14 +475,16 @@ class WeightTensorJitConstant : public TensorBaseTJitConstant; - if (l == WeightsLayout::oiyx || l == WeightsLayout::oizyx || l == WeightsLayout::goiyx || + if (l == WeightsLayout::oiyx || + l == WeightsLayout::oizyx || + l == WeightsLayout::goiyx || l == WeightsLayout::goizyx) { args macroNameArgs = {"prefix", "g", "o", "i", "z", "y", "x"}; - const auto name = toString(l); - this->calcFunction = FuncBody(name); - this->macroName = MacroName(name, macroNameArgs); + this->calcFunction = FuncBody(layout_name); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); this->macroBody = R"V0G0N( \ CAT(prefix, _OFFSET) + \ (x)*CAT(prefix, _X_PITCH) + \ @@ -495,9 +497,8 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantcalcFunction = FuncBody(name); - this->macroName = MacroName(name, macroNameArgs); + this->calcFunction = FuncBody(layout_name); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); this->macroBody = R"V0G0N( \ CAT(prefix, _OFFSET) + \ (g)*CAT(prefix, _GROUPS_PITCH) + \ @@ -515,9 +516,8 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantcalcFunction = FuncBody(name); - this->macroName = MacroName(name, macroNameArgs); + this->calcFunction = FuncBody(layout_name); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); this->macroBody = R"V0G0N( \ CAT(prefix, _OFFSET) + \ (g * CAT(prefix, _GROUPS_PITCH)) + \ @@ -532,9 +532,8 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantcalcFunction = FuncBody(name); - this->macroName = MacroName(name, macroNameArgs); + this->calcFunction = FuncBody(layout_name); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); this->macroBody = R"V0G0N( \ CAT(prefix, _OFFSET) + \ (g)*CAT(prefix, _GROUPS_PITCH) + \ @@ -552,7 +551,6 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantmacroName = MacroName(name, macroNameArgs); - this->calcFunction = FuncBody(name, funcArgs, body); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); + this->calcFunction = FuncBody(layout_name, funcArgs, body); if (l == WeightsLayout::os_is_yx_osv16_isv16) - this->macroBody = FuncCall(name, {"o", "i", "0", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), "1", Cat("_IFM_NUM"), Cat("_OFM_NUM"), "16", "16"}); + this->macroBody = FuncCall(layout_name, {"o", "i", "0", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), "1", Cat("_IFM_NUM"), Cat("_OFM_NUM"), "16", "16"}); else if (l == WeightsLayout::os_is_zyx_osv32_isv16) - this->macroBody = FuncCall(name, {"o", "i", "z", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), Cat("_SIZE_Z"), Cat("_IFM_NUM"), Cat("_OFM_NUM"), "32", "16"}); + this->macroBody = FuncCall(layout_name, {"o", "i", "z", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), Cat("_SIZE_Z"), Cat("_IFM_NUM"), Cat("_OFM_NUM"), "32", "16"}); else if (l == WeightsLayout::os_is_zyx_osv64_isv16) - this->macroBody = FuncCall(name, {"o", "i", "z", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), Cat("_SIZE_Z"), Cat("_IFM_NUM"), Cat("_OFM_NUM"), "64", "16"}); + this->macroBody = FuncCall(layout_name, {"o", "i", "z", "y", "x", Cat("_SIZE_X"), Cat("_SIZE_Y"), Cat("_SIZE_Z"), Cat("_IFM_NUM"), Cat("_OFM_NUM"), "64", "16"}); } else if (l == WeightsLayout::g_os_zyx_is_osv16_isv16 || l == WeightsLayout::g_os_zyx_is_osv16_isv32 || l == WeightsLayout::g_os_zyx_is_osv32_isv16 || l == WeightsLayout::g_os_zyx_is_osv32_isv32) { args macroNameArgs = {"prefix", "g", "o", "i", "z", "y", "x"}; args funcArgs = {"g", "o", "i", "z", "y", "x", "g_size", "o_size", "i_size", "z_size", "y_size", "x_size", "osv", "isv"}; - const auto name = toString(l); const auto body = R"V0G0N( \ uint is_size = (i_size + isv - 1) / isv; \ uint os_size = (o_size + osv - 1) / osv; \ @@ -612,8 +609,8 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantmacroName = MacroName(name, macroNameArgs); - this->calcFunction = FuncBody(name, funcArgs, body); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); + this->calcFunction = FuncBody(layout_name, funcArgs, body); std::string osv = "16", isv = "16"; if (l == WeightsLayout::g_os_zyx_is_osv16_isv16) { osv = "16"; isv = "16"; @@ -624,12 +621,11 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantmacroBody = FuncCall(name, {"g", "o", "i", "z", "y", "x", Cat("_GROUPS_NUM"), Cat("_OFM_NUM"), Cat("_IFM_NUM"), Cat("_SIZE_Z"), - Cat("_SIZE_Y"), Cat("_SIZE_X"), osv, isv}); + this->macroBody = FuncCall(layout_name, {"g", "o", "i", "z", "y", "x", Cat("_GROUPS_NUM"), Cat("_OFM_NUM"), Cat("_IFM_NUM"), Cat("_SIZE_Z"), + Cat("_SIZE_Y"), Cat("_SIZE_X"), osv, isv}); } else if (l == WeightsLayout::os_is_yx_osv16_isv4 || l == WeightsLayout::os_is_yx_osv32_isv4) { args macroNameArgs = {"prefix", "o", "i", "y", "x"}; args funcArgs = {"o", "i", "y", "x", "i_size", "o_size", "x_size", "otd"}; - const auto name = toString(l); const auto body = R"V0G0N( \ uint out_depth_tile = o / otd; \ uint od = o - out_depth_tile * otd; \ @@ -644,12 +640,12 @@ class WeightTensorJitConstant : public TensorBaseTJitConstantmacroName = MacroName(name, macroNameArgs); - this->calcFunction = FuncBody(name, funcArgs, body); + this->macroName = MacroName(tensor_name, layout_name, macroNameArgs); + this->calcFunction = FuncBody(layout_name, funcArgs, body); if (l == WeightsLayout::os_is_yx_osv16_isv4) - this->macroBody = FuncCall(name, {"o", "i", "y", "x", Cat("_IFM_PITCH"), Cat("_OFM_PITCH"), Cat("_SIZE_X"), "16"}); + this->macroBody = FuncCall(layout_name, {"o", "i", "y", "x", Cat("_IFM_PITCH"), Cat("_OFM_PITCH"), Cat("_SIZE_X"), "16"}); else if (l == WeightsLayout::os_is_yx_osv32_isv4) - this->macroBody = FuncCall(name, {"o", "i", "y", "x", Cat("_IFM_PITCH"), Cat("_OFM_PITCH"), Cat("_SIZE_X"), "32"}); + this->macroBody = FuncCall(layout_name, {"o", "i", "y", "x", Cat("_IFM_PITCH"), Cat("_OFM_PITCH"), Cat("_SIZE_X"), "32"}); } else { // throw error? } @@ -667,12 +663,12 @@ class WeightTensorJitConstant : public TensorBaseTJitConstant args) { + static const std::string MacroName(std::string tensor_name, std::string layout_name, std::initializer_list args) { std::string args_str = ""; size_t counter = 0; for (auto& arg : args) args_str += (++counter == args.size()) ? (arg) : (arg + ", "); - return "GET_WEIGHTS_" + name + "_INDEX(" + args_str + ")"; + return "GET_" + tensor_name + "_" + layout_name + "_INDEX(" + args_str + ")"; } static const std::string FuncBody(std::string name, std::initializer_list args = {}, std::string body = "return 0;") { @@ -727,7 +723,9 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { std::string index_func_val; auto layout = _tensor.GetLayout(); - WeightIndexFuncDesc indexFuncDesc {layout}; + auto layout_str = toString(layout); + WeightIndexFuncDesc indexFuncDesc{_name, layout}; + std::string called_func_name = "GET_" + _name + "_" + layout_str + "_INDEX"; if (WeightsTensor::DoesGroupDimExist(layout)) { if (WeightsTensor::ChannelsCount(layout) <= 5) { std::vector grouped_4d_channels = { @@ -740,15 +738,14 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { bool is_grouped_4d_layout = is_common_nd_layout(grouped_4d_channels, layout); if (is_grouped_4d_layout) { index_macro_name = _name + "_GET_INDEX(g, o, i, y, x)"; - auto layout_str = toString(layout); if (layout == WeightsLayout::goiyx) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, 0, y, x)"; else if (layout == WeightsLayout::g_os_is_yx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, 0, y, x, 16)"; else if (layout == WeightsLayout::g_os_iyx_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, y, x, 16)"; else if (layout == WeightsLayout::g_is_os_yx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, 0, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, 0, y, x, 16)"; } else { assert(0); } @@ -764,13 +761,12 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { bool is_grouped_5d_layout = is_common_nd_layout(grouped_5d_channels, layout); if (is_grouped_5d_layout) { index_macro_name = _name + "_GET_INDEX(g, o, i, z, y, x)"; - auto layout_str = toString(layout); if (layout == WeightsLayout::goizyx) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, z, y, x)"; else if (layout == WeightsLayout::g_os_is_zyx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, z, y, x, 16)"; else if (layout == WeightsLayout::g_is_os_zyx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", g, o, i, z, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", g, o, i, z, y, x, 16)"; } else { assert(0); } @@ -786,19 +782,18 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { bool is_common_4d_layout = is_common_nd_layout(base_4d_channels, layout); if (is_common_4d_layout) { index_macro_name = _name + "_GET_INDEX(o, i, y, x)"; - auto layout_str = toString(layout); if (layout == WeightsLayout::oiyx) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, 0, y, x)"; else if (layout == WeightsLayout::os_is_yx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, 0, y, x, 16)"; else if (layout == WeightsLayout::os_iyx_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, y, x, 16)"; else if (layout == WeightsLayout::os_iyx_osv32 || layout == WeightsLayout::os_iyx_osv32__ai32) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, y, x, 32)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, y, x, 32)"; else if (layout == WeightsLayout::is_os_yx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, 0, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, 0, y, x, 16)"; else if (layout == WeightsLayout::os_is_yx_osv16_isv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", o, i, 0, y, x)"; + index_func_val = called_func_name + "(" + _name + ", o, i, 0, y, x)"; } else { assert(0); } @@ -813,15 +808,14 @@ JitDefinitions WeightTensorJitConstant::GetDefinitions() const { bool is_common_5d_layout = is_common_nd_layout(base_5d_channels, layout); if (is_common_5d_layout) { index_macro_name = _name + "_GET_INDEX(o, i, z, y, x)"; - auto layout_str = toString(layout); if (layout == WeightsLayout::oizyx) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, z, y, x)"; else if (layout == WeightsLayout::os_is_zyx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, z, y, x, 16)"; else if (layout == WeightsLayout::is_os_zyx_isv16_osv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", 0, o, i, z, y, x, 16)"; + index_func_val = called_func_name + "(" + _name + ", 0, o, i, z, y, x, 16)"; else if (layout == WeightsLayout::os_is_zyx_osv32_isv16 || layout == WeightsLayout::os_is_zyx_osv64_isv16) - index_func_val = "GET_WEIGHTS_" + layout_str + "_INDEX(" + _name + ", o, i, z, y, x)"; + index_func_val = called_func_name + "(" + _name + ", o, i, z, y, x)"; } else { assert(0); } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.cpp index 3d23771d0fc620..acb26f792b9f1a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.cpp @@ -15,10 +15,51 @@ #include "kernel_base.h" +#include + namespace kernel_selector { const primitive_db KernelBase::db; thread_local size_t KernelBase::counter = 0; +std::string toString(const kernel_selector::CommonDispatchData& dispatchData) { + auto gws = dispatchData.gws; + auto lws = dispatchData.lws; + std::stringstream os; + os << "GWS(" << gws.size() << "): "; + for (auto e : gws) { + os << e << " "; + } + os << "LWS(" << lws.size() << "): "; + for (auto e : lws) { + os << e << " "; + } + return os.str(); +} + +void KernelBase::CheckDispatchData(const std::string& kernelName, const kernel_selector::CommonDispatchData& dispatchData) { + if (dispatchData.gws.size() != 3 || dispatchData.lws.size() != 3) + throw std::runtime_error("ERROR: Invalid dispatch data for kernel: " + kernelName + ": " + + ": LWS and GWS size is expected to be equal to 3. Actual: " + + toString(dispatchData)); + + if (dispatchData.lws[0] * dispatchData.lws[1] * dispatchData.lws[2] > 256) { + throw std::runtime_error("ERROR: Invalid dispatch data for kernel: " + kernelName + + ": LWS cannot be greater than 256. Actual: " + + toString(dispatchData)); + } + for (size_t i = 0; i < dispatchData.gws.size(); i++) { + if (dispatchData.gws[i] == 0 || dispatchData.lws[i] == 0) + throw std::runtime_error("ERROR: Invalid dispatch data for kernel: " + kernelName + + ": Dispatch data cannot contain zeros. Actual: " + + toString(dispatchData)); + + if (dispatchData.gws[i] % dispatchData.lws[i] != 0) + throw std::runtime_error("ERROR: Invalid dispatch data for kernel: " + kernelName + + ": GWS must be divisible by corresponding LWS. Actual: " + + toString(dispatchData)); + } +} + static bool IsTypeUsedIn(Datatype type, const base_params& params) { return params.output.GetDType() == type || std::any_of(params.inputs.begin(), params.inputs.end(), [=](const DataTensor& input) -> bool { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.h index 3b2aa558ba3043..a795b5f64286ca 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_base.h @@ -26,6 +26,20 @@ namespace kernel_selector { using primitive_db = kernel_selector::gpu::cache::primitive_db; +struct CommonDispatchData { + std::vector gws; + std::vector lws; + float efficiency; + + CommonDispatchData() : gws({0, 0, 0}), lws({0, 0, 0}), efficiency(0.0f) {} +}; + +std::string toString(const kernel_selector::CommonDispatchData& dispatchData); + +static inline std::ostream &operator<<(std::ostream &os, CommonDispatchData disptchData) { + return os << toString(disptchData); +} + class KernelBase { public: using FusedOpType = KernelType; @@ -56,6 +70,7 @@ class KernelBase { static const primitive_db db; const std::string kernelName; + static void CheckDispatchData(const std::string& kernelName, const kernel_selector::CommonDispatchData& dispatchData); static size_t UniqeID() { return counter++; } // TODO: use interlocked virtual Datatype GetUnitType(const base_params& params) const; From 347e92cc821dcbd1be2a6a71aa27961c77583edf Mon Sep 17 00:00:00 2001 From: Anna Likholat Date: Mon, 19 Oct 2020 19:38:55 +0300 Subject: [PATCH 30/35] [JAVA] Fixed IECore constructor (#2685) --- inference-engine/ie_bridges/java/cpp/ie_core.cpp | 2 +- inference-engine/ie_bridges/java/cpp/openvino_java.hpp | 2 +- .../ie_bridges/java/org/intel/openvino/IECore.java | 4 ++-- inference-engine/ie_bridges/java/samples/README.md | 1 + 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/inference-engine/ie_bridges/java/cpp/ie_core.cpp b/inference-engine/ie_bridges/java/cpp/ie_core.cpp index 896e82bdd4642f..692985414eb983 100644 --- a/inference-engine/ie_bridges/java/cpp/ie_core.cpp +++ b/inference-engine/ie_bridges/java/cpp/ie_core.cpp @@ -25,7 +25,7 @@ JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore(JNIEnv *env, jobj return 0; } -JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore_1(JNIEnv *env, jobject obj, jstring xmlConfigFile) +JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore1(JNIEnv *env, jobject obj, jstring xmlConfigFile) { static const char method_name[] = "GetCore_1"; try diff --git a/inference-engine/ie_bridges/java/cpp/openvino_java.hpp b/inference-engine/ie_bridges/java/cpp/openvino_java.hpp index 781b4b77e1eb6d..c3555aead2eed5 100644 --- a/inference-engine/ie_bridges/java/cpp/openvino_java.hpp +++ b/inference-engine/ie_bridges/java/cpp/openvino_java.hpp @@ -21,7 +21,7 @@ JNIEXPORT void JNICALL Java_org_intel_openvino_IECore_SetConfig(JNIEnv *, jobjec JNIEXPORT void JNICALL Java_org_intel_openvino_IECore_SetConfig1(JNIEnv *, jobject, jlong, jobject); JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetConfig(JNIEnv *, jobject, jlong, jstring, jstring); JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore(JNIEnv *, jobject); -JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore_1(JNIEnv *, jobject, jstring); +JNIEXPORT jlong JNICALL Java_org_intel_openvino_IECore_GetCore1(JNIEnv *, jobject, jstring); JNIEXPORT void JNICALL Java_org_intel_openvino_IECore_delete(JNIEnv *, jobject, jlong); // diff --git a/inference-engine/ie_bridges/java/org/intel/openvino/IECore.java b/inference-engine/ie_bridges/java/org/intel/openvino/IECore.java index 7530458bb67a9b..43f34faee7423c 100644 --- a/inference-engine/ie_bridges/java/org/intel/openvino/IECore.java +++ b/inference-engine/ie_bridges/java/org/intel/openvino/IECore.java @@ -10,7 +10,7 @@ public IECore() { } public IECore(String xmlConfigFile) { - super(GetCore_1(xmlConfigFile)); + super(GetCore1(xmlConfigFile)); } public CNNNetwork ReadNetwork(final String modelPath, final String weightPath) { @@ -92,7 +92,7 @@ private static native long LoadNetwork1( private static native long GetCore(); - private static native long GetCore_1(String xmlConfigFile); + private static native long GetCore1(String xmlConfigFile); @Override protected native void delete(long nativeObj); diff --git a/inference-engine/ie_bridges/java/samples/README.md b/inference-engine/ie_bridges/java/samples/README.md index 6cdb661c243897..1cbf7cf1d2e58a 100644 --- a/inference-engine/ie_bridges/java/samples/README.md +++ b/inference-engine/ie_bridges/java/samples/README.md @@ -8,6 +8,7 @@ Upon start-up, the application reads command-line parameters and loads a network ## Build Create an environment variable with Inference Engine installation path: +```bash export IE_PATH=/path/to/openvino/bin/intel64/Release/lib ``` From c2394508c158e46b9d2bff61eda5f94d0e4178d9 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Tue, 20 Oct 2020 09:57:55 +0300 Subject: [PATCH 31/35] Implement LookupTableInsert shape inference (#2348) * Implement LookupTableInsertV2 shape inference It is needed if other nodes not beeing pruned in the graph have a conditional dependence on LookupTableInsertV2 node. Signed-off-by: Roman Kazantsev * Fix after core-review #1 Signed-off-by: Roman Kazantsev * Fix the code after review #2 Signed-off-by: Roman Kazantsev * Fix after code review #3 --- model-optimizer/automation/package_BOM.txt | 2 + .../front/tf/LookupTableInsert_ext.py | 38 ++++++++++ .../extensions/ops/LookupTableInsert.py | 58 +++++++++++++++ .../extensions/ops/LookupTableInsert_test.py | 72 +++++++++++++++++++ 4 files changed, 170 insertions(+) create mode 100644 model-optimizer/extensions/front/tf/LookupTableInsert_ext.py create mode 100644 model-optimizer/extensions/ops/LookupTableInsert.py create mode 100644 model-optimizer/extensions/ops/LookupTableInsert_test.py diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 60dcace71c8152..b3a599a17cea90 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -390,6 +390,7 @@ extensions/front/tf/identity_ext.py extensions/front/tf/identityN_to_identity.py extensions/front/tf/InterpolateTransposes.py extensions/front/tf/IteratorGetNext_ext.py +extensions/front/tf/LookupTableInsert_ext.py extensions/front/tf/LoopCond_ext.py extensions/front/tf/lrn_ext.py extensions/front/tf/mask_rcnn_support.json @@ -630,6 +631,7 @@ extensions/ops/identity.py extensions/ops/instance_normalization.py extensions/ops/interp.py extensions/ops/interpolate.py +extensions/ops/LookupTableInsert.py extensions/ops/LSTM.py extensions/ops/lstm_cell.py extensions/ops/lstm_sequence.py diff --git a/model-optimizer/extensions/front/tf/LookupTableInsert_ext.py b/model-optimizer/extensions/front/tf/LookupTableInsert_ext.py new file mode 100644 index 00000000000000..609291f8d67dd6 --- /dev/null +++ b/model-optimizer/extensions/front/tf/LookupTableInsert_ext.py @@ -0,0 +1,38 @@ +""" + Copyright (C) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +from extensions.ops.LookupTableInsert import LookupTableInsert +from mo.front.extractor import FrontExtractorOp + + +class LookupTableInsertFrontExtractor(FrontExtractorOp): + op = 'LookupTableInsert' + enabled = True + + @classmethod + def extract(cls, node): + LookupTableInsert.update_node_stat(node, {}) + return cls.enabled + + +class LookupTableInsertV2FrontExtractor(FrontExtractorOp): + op = 'LookupTableInsertV2' + enabled = True + + @classmethod + def extract(cls, node): + LookupTableInsert.update_node_stat(node, {}) + return cls.enabled diff --git a/model-optimizer/extensions/ops/LookupTableInsert.py b/model-optimizer/extensions/ops/LookupTableInsert.py new file mode 100644 index 00000000000000..a225003a7b1477 --- /dev/null +++ b/model-optimizer/extensions/ops/LookupTableInsert.py @@ -0,0 +1,58 @@ +""" + Copyright (C) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import numpy as np + +from mo.front.common.partial_infer.utils import int64_array +from mo.graph.graph import Node, Graph +from mo.ops.op import Op + + +class LookupTableInsert(Op): + ''' + This operation has only output control flow edges and no output data edges in some models. + And for these cases implementation of the shape inference is needed since the shape inference is executed + before control flow edges resolving. This operation has non-tensor output so the output shape is empty. + ''' + enabled = False + op = 'LookupTableInsert' + + def __init__(self, graph: Graph, attrs: dict): + mandatory_props = { + 'type': None, + 'op': self.op, + 'infer': self.infer, + 'in_ports_count': 3, + 'out_ports_count': 1, + } + super().__init__(graph, mandatory_props, attrs) + + @staticmethod + def infer(node: Node): + node_name = node.soft_get('name', node.id) + connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] + assert len(connected_in_ports) == 3, \ + "Incorrect number of inputs for {} node".format(node_name) + + # check shapes of input tensors + keys_shape = node.in_port(1).data.get_shape() + values_shape = node.in_port(2).data.get_shape() + assert np.array_equal(keys_shape, values_shape), \ + 'Shapes of tensors with keys and values must be equal for {} node'.format(node_name) + + # set output shape that must be empty + # since output is not a tensor + node.out_port(0).data.set_shape(int64_array([])) diff --git a/model-optimizer/extensions/ops/LookupTableInsert_test.py b/model-optimizer/extensions/ops/LookupTableInsert_test.py new file mode 100644 index 00000000000000..bf822e3b974e2f --- /dev/null +++ b/model-optimizer/extensions/ops/LookupTableInsert_test.py @@ -0,0 +1,72 @@ +""" + Copyright (C) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import unittest + +import numpy as np + +from extensions.ops.LookupTableInsert import LookupTableInsert +from mo.front.common.partial_infer.utils import int64_array +from mo.graph.graph import Node +from mo.utils.unittest.graph import build_graph + +nodes_attributes = {'table': {'kind': 'op'}, + 'table_data': {'shape': None, 'value': None, 'kind': 'data'}, + 'keys': {'kind': 'op'}, + 'keys_data': {'shape': None, 'value': None, 'kind': 'data'}, + 'values': {'kind': 'op'}, + 'values_data': {'shape': None, 'value': None, 'kind': 'data'}, + 'lookuptableinsert_node': {'op': 'LookupTableInsert', 'kind': 'op'}, + 'output': {'shape': None, 'value': None, 'kind': 'data'}} + +# graph 1 +edges1 = [('table', 'table_data'), + ('keys', 'keys_data'), + ('values', 'values_data'), + ('table_data', 'lookuptableinsert_node', {'in': 0}), + ('keys_data', 'lookuptableinsert_node', {'in': 1}), + ('values_data', 'lookuptableinsert_node', {'in': 2}), + ('lookuptableinsert_node', 'output')] + +# valid test case +inputs1 = {'table_data': {}, + 'keys_data': {'shape': int64_array([4])}, + 'values_data': {'shape': int64_array([4])}} + +# invalid test case +inputs2 = {'table_data': {}, + 'keys_data': {'shape': int64_array([5, 2])}, + 'values_data': {'shape': int64_array([4])}} + +class TestLookupTableInsert(unittest.TestCase): + def test_infer1(self): + graph = build_graph(nodes_attributes, edges1, inputs1) + lookuptableinsert_node = Node(graph, 'lookuptableinsert_node') + LookupTableInsert.infer(lookuptableinsert_node) + + # prepare reference results + ref_output_shape = int64_array([]) + + # get the result + res_output_shape = graph.node['output']['shape'] + + self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), + 'shapes do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) + + def test_infer_invalid1(self): + graph = build_graph(nodes_attributes, edges1, inputs2) + lookuptableinsert_node = Node(graph, 'lookuptableinsert_node') + self.assertRaises(AssertionError, LookupTableInsert.infer, lookuptableinsert_node) From 8002b16eb2f0188a8fdc9be57157680a8b6b4c4f Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Tue, 20 Oct 2020 11:19:03 +0200 Subject: [PATCH 32/35] [ONNX] Add type conversion for Pow op inputs (#2589) Co-authored-by: mitruska --- .../include/onnx_import/op/pow.hpp | 10 +-- ngraph/frontend/onnx_import/src/op/pow.cpp | 67 +++++++++++++++++++ ngraph/python/tests/__init__.py | 4 -- ngraph/python/tests/test_onnx/test_backend.py | 20 +++--- .../models/onnx/pow_float32_float32.prototxt | 60 +++++++++++++++++ .../models/onnx/pow_float32_int32.prototxt | 59 ++++++++++++++++ .../models/onnx/pow_int32_float32.prototxt | 60 +++++++++++++++++ ngraph/test/onnx/onnx_import.in.cpp | 42 ++++++++++++ 8 files changed, 297 insertions(+), 25 deletions(-) create mode 100644 ngraph/frontend/onnx_import/src/op/pow.cpp create mode 100644 ngraph/test/models/onnx/pow_float32_float32.prototxt create mode 100644 ngraph/test/models/onnx/pow_float32_int32.prototxt create mode 100644 ngraph/test/models/onnx/pow_int32_float32.prototxt diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/pow.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/pow.hpp index 94e95e647ca088..bf2ebe817ec938 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/op/pow.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/pow.hpp @@ -16,11 +16,7 @@ #pragma once -#include - -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" -#include "onnx_import/default_opset.hpp" namespace ngraph { @@ -30,11 +26,7 @@ namespace ngraph { namespace set_1 { - inline OutputVector pow(const Node& node) - { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; - } + OutputVector pow(const Node& node); } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/op/pow.cpp b/ngraph/frontend/onnx_import/src/op/pow.cpp new file mode 100644 index 00000000000000..16c3906733765f --- /dev/null +++ b/ngraph/frontend/onnx_import/src/op/pow.cpp @@ -0,0 +1,67 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +#include "ngraph/node.hpp" +#include "onnx_import/default_opset.hpp" +#include "onnx_import/op/pow.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + OutputVector pow(const Node& node) + { + auto inputs = node.get_ng_inputs(); + NGRAPH_CHECK(inputs.size() == 2, + "Power operation requires 2 inputs. Got: ", + inputs.size()); + + auto base = inputs[0]; + auto exponent = inputs[1]; + auto base_type = inputs[0].get_element_type(); + auto exponent_type = inputs[1].get_element_type(); + if (exponent_type != base_type) + { + if (exponent_type.is_integral() || + (base_type.is_real() && + base_type.bitwidth() >= exponent_type.bitwidth())) + { + exponent = + std::make_shared(exponent, base_type); + } + else + { + base = std::make_shared(base, exponent_type); + auto power = std::make_shared(base, exponent); + return {std::make_shared(power, base_type)}; + } + } + return {std::make_shared(base, exponent)}; + } + + } // namespace set_1 + + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/ngraph/python/tests/__init__.py b/ngraph/python/tests/__init__.py index d92646156decad..c2f10c84ca6e1a 100644 --- a/ngraph/python/tests/__init__.py +++ b/ngraph/python/tests/__init__.py @@ -154,10 +154,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_38717 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "GreaterOrEqual") xfail_issue_38719 = xfail_test(reason="nGraph does not support the following ONNX operations: GatherND") -xfail_issue_38721 = xfail_test(reason="RuntimeError: While validating ONNX node '': " - "While validating node 'v1::Power Power_" - "(x[0]:f32{3}, y[0]:i64{3}) -> (dynamic?)' with friendly_name " - "'Power_': Argument element types are inconsistent.") xfail_issue_38722 = xfail_test(reason="RuntimeError: While validating ONNX nodes MatMulInteger" "and QLinearMatMul" "Input0 scale and input0 zero point shape must be same and 1") diff --git a/ngraph/python/tests/test_onnx/test_backend.py b/ngraph/python/tests/test_onnx/test_backend.py index db0fb74c5f948e..3fdc27a2b6aa38 100644 --- a/ngraph/python/tests/test_onnx/test_backend.py +++ b/ngraph/python/tests/test_onnx/test_backend.py @@ -68,7 +68,6 @@ xfail_issue_33589, xfail_issue_38719, xfail_issue_33535, - xfail_issue_38721, xfail_issue_38722, xfail_issue_38723, xfail_issue_38724, @@ -189,7 +188,11 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu", "OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu", "OnnxBackendNodeModelTest.test_max_int64_cpu", + "OnnxBackendNodeModelTest.test_pow_types_float32_int64_cpu", + "OnnxBackendNodeModelTest.test_pow_types_float_cpu", + "OnnxBackendNodeModelTest.test_pow_types_int64_float32_cpu", "OnnxBackendNodeModelTest.test_pow_types_int64_int64_cpu", + "OnnxBackendNodeModelTest.test_pow_types_int_cpu", "OnnxBackendNodeModelTest.test_min_int64_cpu", "OnnxBackendNodeModelTest.test_gather_negative_indices_cpu", "OnnxBackendNodeModelTest.test_scatternd_cpu"), @@ -248,7 +251,8 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_min_uint32_cpu"), (xfail_issue_36478, "OnnxBackendNodeModelTest.test_max_uint64_cpu", - "OnnxBackendNodeModelTest.test_min_uint64_cpu"), + "OnnxBackendNodeModelTest.test_min_uint64_cpu", + "OnnxBackendNodeModelTest.test_pow_types_float32_uint64_cpu"), (xfail_issue_36437, "OnnxBackendNodeModelTest.test_argmax_default_axis_example_cpu", "OnnxBackendNodeModelTest.test_argmax_default_axis_random_cpu", @@ -273,7 +277,8 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_random_select_last_index_cpu", "OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_example_select_last_index_cpu", "OnnxBackendNodeModelTest.test_argmin_keepdims_example_select_last_index_cpu", - "OnnxBackendNodeModelTest.test_argmin_keepdims_random_select_last_index_cpu"), + "OnnxBackendNodeModelTest.test_argmin_keepdims_random_select_last_index_cpu", + "OnnxBackendNodeModelTest.test_pow_types_float32_uint32_cpu"), (xfail_issue_38088, "OnnxBackendPyTorchConvertedModelTest.test_GLU_cpu"), (xfail_issue_38089, @@ -598,15 +603,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_cpu", "OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu", "OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_cpu"), - (xfail_issue_38721, - "OnnxBackendNodeModelTest.test_pow_types_int_cpu", - "OnnxBackendNodeModelTest.test_pow_types_int64_float32_cpu", - "OnnxBackendNodeModelTest.test_pow_types_int32_float32_cpu", - "OnnxBackendNodeModelTest.test_pow_types_float_cpu", - "OnnxBackendNodeModelTest.test_pow_types_float32_uint64_cpu", - "OnnxBackendNodeModelTest.test_pow_types_float32_uint32_cpu", - "OnnxBackendNodeModelTest.test_pow_types_float32_int64_cpu", - "OnnxBackendNodeModelTest.test_pow_types_float32_int32_cpu"), (xfail_issue_38722, "OnnxBackendNodeModelTest.test_matmulinteger_cpu", "OnnxBackendNodeModelTest.test_qlinearmatmul_2D_cpu", diff --git a/ngraph/test/models/onnx/pow_float32_float32.prototxt b/ngraph/test/models/onnx/pow_float32_float32.prototxt new file mode 100644 index 00000000000000..4fb1c23b15a53e --- /dev/null +++ b/ngraph/test/models/onnx/pow_float32_float32.prototxt @@ -0,0 +1,60 @@ +ir_version: 7 +producer_name: "onnx-importer-test" +graph { + node { + input: "X" + input: "N" + output: "Y" + op_type: "Pow" + } + name: "test-model-lstm" + input { + name: "X" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } + input { + name: "N" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } +} +opset_import { + domain: "" + version: 12 +} diff --git a/ngraph/test/models/onnx/pow_float32_int32.prototxt b/ngraph/test/models/onnx/pow_float32_int32.prototxt new file mode 100644 index 00000000000000..8100a447a07f70 --- /dev/null +++ b/ngraph/test/models/onnx/pow_float32_int32.prototxt @@ -0,0 +1,59 @@ +producer_name: "onnx-importer-test" +graph { + node { + input: "X" + input: "N" + output: "Y" + op_type: "Pow" + } + name: "test-model-lstm" + input { + name: "X" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } + input { + name: "N" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } +} +opset_import { + domain: "" + version: 12 +} diff --git a/ngraph/test/models/onnx/pow_int32_float32.prototxt b/ngraph/test/models/onnx/pow_int32_float32.prototxt new file mode 100644 index 00000000000000..901164cf8e69aa --- /dev/null +++ b/ngraph/test/models/onnx/pow_int32_float32.prototxt @@ -0,0 +1,60 @@ +ir_version: 7 +producer_name: "onnx-importer-test" +graph { + node { + input: "X" + input: "N" + output: "Y" + op_type: "Pow" + } + name: "test-model-lstm" + input { + name: "X" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } + input { + name: "N" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + } + } + } + } +} +opset_import { + domain: "" + version: 12 +} diff --git a/ngraph/test/onnx/onnx_import.in.cpp b/ngraph/test/onnx/onnx_import.in.cpp index e2544235eee931..e98ef1a1d0f833 100644 --- a/ngraph/test/onnx/onnx_import.in.cpp +++ b/ngraph/test/onnx/onnx_import.in.cpp @@ -2300,6 +2300,48 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_pad_constant) test_case.run(); } +NGRAPH_TEST(${BACKEND_NAME}, onnx_model_pow_float32_float32) +{ + const auto function = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/pow_float32_float32.prototxt")); + auto test_case = test::TestCase(function); + + test_case.add_input({1.f, 2.f, 3.f, 4.f}); // base + test_case.add_input({3.5f}); // exponent + + test_case.add_expected_output(Shape{1, 4}, {1.f, 11.313708f, 46.765373f, 128.f}); + + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, onnx_model_pow_float32_int32) +{ + const auto function = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/pow_float32_int32.prototxt")); + auto test_case = test::TestCase(function); + + test_case.add_input({1.f, 2.f, 3.f, 4.f}); // base + test_case.add_input({3}); // exponent + + test_case.add_expected_output(Shape{1, 4}, {1.f, 8.f, 27.f, 64.f}); + + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, onnx_model_pow_int32_float32) +{ + const auto function = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/pow_int32_float32.prototxt")); + auto test_case = test::TestCase(function); + + test_case.add_input({1, 2, 3, 4}); // base + test_case.add_input({3.5f}); // exponent + + test_case.add_expected_output(Shape{1, 4}, {1, 11, 46, 128}); + + test_case.run(); +} + NGRAPH_TEST(${BACKEND_NAME}, onnx_model_reciprocal) { const auto function = onnx_import::import_onnx_model( From 83670dd5cb33ca87019b606dd5b06b2cd138abd7 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Tue, 20 Oct 2020 11:36:46 +0200 Subject: [PATCH 33/35] Remove deprecated Any op from nGraph (#2719) --- ngraph/core/include/ngraph/op/any.hpp | 61 ---- .../core/include/ngraph/op/op_version_tbl.hpp | 1 - ngraph/core/include/ngraph/ops.hpp | 1 - .../include/ngraph/runtime/reference/any.hpp | 55 ---- .../runtime/reference/logical_reduction.hpp | 19 +- ngraph/core/src/op/any.cpp | 48 --- .../constant_folding_logical_reduction.cpp | 17 +- ngraph/test/CMakeLists.txt | 2 - ngraph/test/backend/any.in.cpp | 280 ------------------ ngraph/test/constant_folding.cpp | 28 -- ngraph/test/op_is.cpp | 9 - .../runtime/interpreter/int_executable.hpp | 11 - ngraph/test/runtime/opset0_tbl.hpp | 1 - ngraph/test/type_prop/any.cpp | 161 ---------- 14 files changed, 20 insertions(+), 674 deletions(-) delete mode 100644 ngraph/core/include/ngraph/op/any.hpp delete mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/any.hpp delete mode 100644 ngraph/core/src/op/any.cpp delete mode 100644 ngraph/test/backend/any.in.cpp delete mode 100644 ngraph/test/type_prop/any.cpp diff --git a/ngraph/core/include/ngraph/op/any.hpp b/ngraph/core/include/ngraph/op/any.hpp deleted file mode 100644 index d1bb405d9119b9..00000000000000 --- a/ngraph/core/include/ngraph/op/any.hpp +++ /dev/null @@ -1,61 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#include "ngraph/op/util/logical_reduction.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Logical "any" reduction operation. - class NGRAPH_DEPRECATED( - "This operation is deprecated and will be removed soon. Please don't use it.") - NGRAPH_API Any : public util::LogicalReduction - { - NGRAPH_SUPPRESS_DEPRECATED_START - public: - static constexpr NodeTypeInfo type_info{"Any", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs an "any" reduction operation. - Any() = default; - /// \brief Constructs an "any" reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - Any(const Output& arg, const AxisSet& reduction_axes); - /// \brief Constructs an "any" reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - Any(const Output& arg, const Output& reduction_axes); - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override { return true; } - /// \return The default value for Any. - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - }; - } - NGRAPH_SUPPRESS_DEPRECATED_START - using v0::Any; - NGRAPH_SUPPRESS_DEPRECATED_END - } -} diff --git a/ngraph/core/include/ngraph/op/op_version_tbl.hpp b/ngraph/core/include/ngraph/op/op_version_tbl.hpp index decf321fcaddaa..e970533bcc570e 100644 --- a/ngraph/core/include/ngraph/op/op_version_tbl.hpp +++ b/ngraph/core/include/ngraph/op/op_version_tbl.hpp @@ -33,7 +33,6 @@ NGRAPH_OP(Acos, ngraph::op::v0, 0) NGRAPH_OP(Acosh, ngraph::op::v3, 3) NGRAPH_OP(Add, ngraph::op::v0, 0) NGRAPH_OP(Add, ngraph::op::v1, 1) -NGRAPH_OP(Any, ngraph::op::v0, 0) NGRAPH_OP(Asin, ngraph::op::v0, 0) NGRAPH_OP(Asinh, ngraph::op::v3, 3) NGRAPH_OP(Atan, ngraph::op::v0, 0) diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index 761e3268ed294d..6f2809322ba317 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -23,7 +23,6 @@ #include "ngraph/op/acosh.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/and.hpp" -#include "ngraph/op/any.hpp" #include "ngraph/op/asin.hpp" #include "ngraph/op/asinh.hpp" #include "ngraph/op/assign.hpp" diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/any.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/any.hpp deleted file mode 100644 index 89b05b0ca54ea2..00000000000000 --- a/ngraph/core/reference/include/ngraph/runtime/reference/any.hpp +++ /dev/null @@ -1,55 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#include - -#include "ngraph/coordinate_transform.hpp" -#include "ngraph/shape_util.hpp" - -namespace ngraph -{ - namespace runtime - { - namespace reference - { - static inline void any(const char* arg, - char* out, - const Shape& in_shape, - const AxisSet& reduction_axes, - bool keep_dims) - { - CoordinateTransform output_transform(reduce(in_shape, reduction_axes, keep_dims)); - - for (const Coordinate& output_coord : output_transform) - { - out[output_transform.index(output_coord)] = 0; - } - - CoordinateTransform input_transform(in_shape); - - for (const Coordinate& input_coord : input_transform) - { - Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); - out[output_transform.index(output_coord)] = - out[output_transform.index(output_coord)] || - arg[input_transform.index(input_coord)]; - } - } - } - } -} diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp index 2c063297802631..8b98434d06454b 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp @@ -19,7 +19,6 @@ #include #include "ngraph/coordinate_transform.hpp" -#include "ngraph/runtime/reference/any.hpp" #include "ngraph/shape_util.hpp" namespace ngraph @@ -59,7 +58,23 @@ namespace ngraph const AxisSet& reduction_axes, bool keep_dims) { - runtime::reference::any(arg, out, input_shape, reduction_axes, keep_dims); + CoordinateTransform output_transform( + reduce(input_shape, reduction_axes, keep_dims)); + + for (const Coordinate& output_coord : output_transform) + { + out[output_transform.index(output_coord)] = 0; + } + + CoordinateTransform input_transform(input_shape); + + for (const Coordinate& input_coord : input_transform) + { + Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); + out[output_transform.index(output_coord)] = + out[output_transform.index(output_coord)] || + arg[input_transform.index(input_coord)]; + } } } } diff --git a/ngraph/core/src/op/any.cpp b/ngraph/core/src/op/any.cpp deleted file mode 100644 index 2b70221ac6cbac..00000000000000 --- a/ngraph/core/src/op/any.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "ngraph/op/any.hpp" -#include "ngraph/graph_util.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -constexpr NodeTypeInfo op::Any::type_info; - -op::Any::Any(const Output& arg, const AxisSet& reduction_axes) - : LogicalReduction(arg, reduction_axes) -{ - constructor_validate_and_infer_types(); -} - -op::Any::Any(const Output& arg, const Output& reduction_axes) - : LogicalReduction(arg, reduction_axes) -{ - constructor_validate_and_infer_types(); -} - -shared_ptr op::Any::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1)); -} - -shared_ptr op::Any::get_default_value() const -{ - return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); -} diff --git a/ngraph/core/src/pass/constant_folding_logical_reduction.cpp b/ngraph/core/src/pass/constant_folding_logical_reduction.cpp index 69dc2d50cd340c..0ee8024a982b46 100644 --- a/ngraph/core/src/pass/constant_folding_logical_reduction.cpp +++ b/ngraph/core/src/pass/constant_folding_logical_reduction.cpp @@ -16,10 +16,8 @@ #include "constant_folding.hpp" #include "ngraph/log.hpp" -#include "ngraph/op/any.hpp" #include "ngraph/op/reduce_logical_and.hpp" #include "ngraph/op/reduce_logical_or.hpp" -#include "ngraph/runtime/reference/any.hpp" #include "ngraph/runtime/reference/logical_reduction.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -33,15 +31,7 @@ static shared_ptr fold_constant_logical_reduction(shared_ptrget_shape()) * sizeof(char)); char* data_ptr = buffer.get_ptr(); - if (auto any = as_type_ptr<::ngraph::op::Any>(reduction_node)) - { - runtime::reference::any(constant->get_data_ptr(), - data_ptr, - reduction_node->get_input_shape(0), - any->get_reduction_axes(), - false); - } - else if (auto reduce_and = as_type_ptr<::ngraph::op::v1::ReduceLogicalAnd>(reduction_node)) + if (auto reduce_and = as_type_ptr<::ngraph::op::v1::ReduceLogicalAnd>(reduction_node)) { const auto reduction_axes = reduce_and->get_reduction_axes(); const auto input_shape = reduce_and->get_input_shape(0); @@ -78,9 +68,8 @@ void pass::ConstantFolding::construct_constant_logical_reduction() auto constant_axes_label = make_shared(element::i64, Shape{2}, pattern::has_class()); auto is_supported_reduction = [](std::shared_ptr n) { - return (pattern::has_class<::ngraph::op::Any>()(n) || - pattern::has_class<::ngraph::op::v1::ReduceLogicalAnd>()(n) || - pattern::has_class<::ngraph::op::v1::ReduceLogicalOr>()(n)); + return pattern::has_class<::ngraph::op::v1::ReduceLogicalAnd>()(n) || + pattern::has_class<::ngraph::op::v1::ReduceLogicalOr>()(n); }; auto reduction = std::make_shared(element::i32, diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 58dd0300758e6e..e39adcb2b9f543 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -96,7 +96,6 @@ set(SRC shape.cpp specialize_function.cpp tensor.cpp - type_prop/any.cpp type_prop/assign.cpp type_prop/avg_pool.cpp type_prop/batch_norm.cpp @@ -260,7 +259,6 @@ set(MULTI_TEST_SRC backend/acosh.in.cpp backend/add.in.cpp backend/aliased_output.in.cpp - backend/any.in.cpp backend/api.in.cpp backend/asin.in.cpp backend/asinh.in.cpp diff --git a/ngraph/test/backend/any.in.cpp b/ngraph/test/backend/any.in.cpp deleted file mode 100644 index 7c1fd8aeb8db0d..00000000000000 --- a/ngraph/test/backend/any.in.cpp +++ /dev/null @@ -1,280 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/random.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -// Trivial case with no reduced axes. -NGRAPH_TEST(${BACKEND_NAME}, any_trivial) -{ - Shape shape{2, 2}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, vector{0, 1, 1, 0}); - auto result = backend->create_tensor(element::boolean, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 1, 1, 0}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2_to_scalar_true) -{ - Shape shape{2, 2}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, vector{0, 1, 1, 0}); - auto result = backend->create_tensor(element::boolean, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2_to_scalar_false) -{ - Shape shape{2, 2}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, vector{0, 0, 0, 0}); - auto result = backend->create_tensor(element::boolean, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x0_to_scalar) -{ - Shape shape{2, 0}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - auto result = backend->create_tensor(element::boolean, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x3_eliminate_col_dim) -{ - Shape shape{2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, test::NDArray({{0, 1, 0}, {0, 0, 0}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 0}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x3_eliminate_row_dim) -{ - Shape shape{2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, test::NDArray({{0, 1, 0}, {0, 0, 1}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{3}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 1, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dim_0) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2, 3}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1, 1, 0, 0, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dim_1) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2, 3}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 1, 1, 1, 0, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dim_2) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2, 2}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1, 1, 0}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dims_0_1) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{3}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dims_0_2) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dims_1_2) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{1, 2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{2}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1}), read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, any_2x2x3_eliminate_dims_0_1_2) -{ - Shape shape{2, 2, 3}; - auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data( - a, test::NDArray({{{0, 1, 0}, {0, 0, 1}}, {{1, 0, 1}, {0, 0, 0}}}).get_vector()); - auto result = backend->create_tensor(element::boolean, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1}), read_vector(result)); -} diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index 0d32c2cf404a81..614bf7438c124d 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -1444,34 +1444,6 @@ TEST(constant_folding, const_reduce_logical_and__keepdims_3d) ASSERT_EQ(values_expected, values_out); } -TEST(constant_folding, const_any) -{ - Shape input_shape{3, 3}; - - vector values_in{1, 0, 0, 1, 0, 1, 0, 0, 0}; - auto constant = op::Constant::create(element::boolean, input_shape, values_in); - auto convert = make_shared(constant, AxisSet{1}); - convert->set_friendly_name("test"); - auto f = make_shared(convert, ParameterVector{}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 0); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto new_const = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); - auto values_out = new_const->get_vector(); - - vector values_expected{1, 1, 0}; - - ASSERT_EQ(values_expected, values_out); -} - TEST(constant_folding, const_reduce_logical_or__no_keepdims) { const Shape input_shape{3, 3}; diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp index 3b18906206f65d..a4504f20d03407 100644 --- a/ngraph/test/op_is.cpp +++ b/ngraph/test/op_is.cpp @@ -56,15 +56,6 @@ namespace EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); } - void op_is_Any() - { - op::Any node; - EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); - EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); - EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); - } - void op_is_Asin() { op::Asin node; diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index d78518810df042..cc54b84f3ef2aa 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -30,7 +30,6 @@ #include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/reference/abs.hpp" #include "ngraph/runtime/reference/acos.hpp" -#include "ngraph/runtime/reference/any.hpp" #include "ngraph/runtime/reference/asin.hpp" #include "ngraph/runtime/reference/atan.hpp" #include "ngraph/runtime/reference/atan2.hpp" @@ -208,16 +207,6 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ args[0]->get_data_ptr(), out[0]->get_data_ptr(), element_count); break; } - case OP_TYPEID::Any: - { - const op::Any* any = static_cast(&node); - reference::any(args[0]->get_data_ptr(), - out[0]->get_data_ptr(), - node.get_input_shape(0), - any->get_reduction_axes(), - false); - break; - } case OP_TYPEID::Asin: { size_t element_count = shape_size(node.get_output_shape(0)); diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp index a0eac8c3e6599f..2d918225cd4fb9 100644 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ b/ngraph/test/runtime/opset0_tbl.hpp @@ -53,7 +53,6 @@ NGRAPH_OP(Abs, ngraph::op) NGRAPH_OP(Acos, ngraph::op) NGRAPH_OP(Add, ngraph::op) -NGRAPH_OP(Any, ngraph::op) NGRAPH_OP(Asin, ngraph::op) NGRAPH_OP(Atan, ngraph::op) NGRAPH_OP(AvgPool, ngraph::op::v0) diff --git a/ngraph/test/type_prop/any.cpp b/ngraph/test/type_prop/any.cpp deleted file mode 100644 index 785548a39919a1..00000000000000 --- a/ngraph/test/type_prop/any.cpp +++ /dev/null @@ -1,161 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -TEST(type_prop, any_deduce) -{ - auto param_0 = make_shared(element::boolean, Shape{2, 4}); - - auto r0 = make_shared(param_0, AxisSet{0}); - ASSERT_EQ(r0->get_element_type(), element::boolean); - ASSERT_EQ(r0->get_shape(), (Shape{4})); - - auto r1 = make_shared(param_0, AxisSet{1}); - ASSERT_EQ(r1->get_element_type(), element::boolean); - ASSERT_EQ(r1->get_shape(), (Shape{2})); - - auto r01 = make_shared(param_0, AxisSet{0, 1}); - ASSERT_EQ(r01->get_element_type(), element::boolean); - ASSERT_EQ(r01->get_shape(), (Shape{})); - - auto r_none = make_shared(param_0, AxisSet{}); - ASSERT_EQ(r_none->get_element_type(), element::boolean); - ASSERT_EQ(r_none->get_shape(), (Shape{2, 4})); -} - -TEST(type_prop, any_deduce_et_dynamic) -{ - auto param_0 = make_shared(element::dynamic, Shape{2, 4}); - - auto r0 = make_shared(param_0, AxisSet{0}); - ASSERT_EQ(r0->get_element_type(), element::boolean); - ASSERT_EQ(r0->get_shape(), (Shape{4})); - - auto r1 = make_shared(param_0, AxisSet{1}); - ASSERT_EQ(r1->get_element_type(), element::boolean); - ASSERT_EQ(r1->get_shape(), (Shape{2})); - - auto r01 = make_shared(param_0, AxisSet{0, 1}); - ASSERT_EQ(r01->get_element_type(), element::boolean); - ASSERT_EQ(r01->get_shape(), (Shape{})); - - auto r_none = make_shared(param_0, AxisSet{}); - ASSERT_EQ(r_none->get_element_type(), element::boolean); - ASSERT_EQ(r_none->get_shape(), (Shape{2, 4})); -} - -TEST(type_prop, any_et_non_boolean) -{ - auto param_0 = make_shared(element::i32, Shape{2, 4}); - - try - { - auto r = make_shared(param_0, AxisSet{0, 1}); - // Should have thrown, so fail if it didn't - FAIL() << "Did not detect invalid element type for Any"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element type must be boolean")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, any_axis_oob) -{ - auto param_0 = make_shared(element::boolean, Shape{2, 4}); - - try - { - auto r = make_shared(param_0, AxisSet{0, 2, 1}); - // Should have thrown, so fail if it didn't - FAIL() << "Did not detect out-of-bound axis for Any"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (2) is out of bounds")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, any_partial_rank_dynamic) -{ - auto param = make_shared(element::boolean, PartialShape::dynamic()); - auto axes = AxisSet{2385, 0, 4404}; // arbitrary - auto any = make_shared(param, axes); - - EXPECT_EQ(any->get_output_element_type(0), element::boolean); - EXPECT_TRUE(any->get_output_partial_shape(0).is_dynamic()); -} - -TEST(type_prop, any_partial_rank_static_dynamic_ok_result_static) -{ - auto param = make_shared(element::boolean, - PartialShape{1, 2, Dimension::dynamic(), 4, 5}); - auto axes = AxisSet{2, 3}; - auto any = make_shared(param, axes); - - EXPECT_EQ(any->get_output_element_type(0), element::boolean); - EXPECT_EQ(any->get_shape(), (Shape{1, 2, 5})); -} - -TEST(type_prop, any_partial_rank_static_dynamic_ok_result_dynamic) -{ - auto param = make_shared( - element::boolean, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()}); - auto axes = AxisSet{2, 3}; - auto any = make_shared(param, axes); - - EXPECT_EQ(any->get_output_element_type(0), element::boolean); - EXPECT_TRUE( - any->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, Dimension::dynamic()})); -} - -TEST(type_prop, any_partial_rank_static_dynamic_axes_oob) -{ - auto param = make_shared( - element::boolean, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()}); - auto axes = AxisSet{2, 5, 1}; - - try - { - auto any = make_shared(param, axes); - // Should have thrown, so fail if it didn't - FAIL() << "Did not detect out-of-bound axis for Any (rank-static dynamic input)"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (5) is out of bounds")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} From a405546054b031b043cfc5fc3fcef0d81fc5a1a5 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 20 Oct 2020 13:40:06 +0300 Subject: [PATCH 34/35] Add LogSoftmax-5 to MO and ngraph (#2409) Co-authored-by: Evgeny Lazarev --- .../log_softmax_decomposition.hpp | 26 ++ .../common_optimizations.cpp | 2 + .../log_softmax_decomposition.cpp | 44 +++ .../ngraph_reader/log_softmax_tests.cpp | 166 +++++++- .../log_softmax_decomposition_test.cpp | 52 +++ model-optimizer/automation/package_BOM.txt | 3 +- .../extensions/front/LogSoftmax.py | 92 ----- .../extensions/front/LogSoftmax_test.py | 86 ---- .../front/kaldi/logsoftmax_component_ext.py | 2 +- .../front/onnx/flattenONNX_to_reshape.py | 4 - .../onnx/logsoftmaxONNX_to_logsoftmax.py | 2 +- .../extensions/front/onnx/softmax_ext.py | 3 +- .../extensions/front/tf/log_softmax_ext.py | 32 ++ .../extensions/front/tf/softmax_ext.py | 16 +- model-optimizer/mo/ops/log_softmax.py | 67 ++++ model-optimizer/mo/ops/softmax.py | 32 -- .../ngraph/runtime/reference/log_softmax.hpp | 62 +++ ngraph/test/CMakeLists.txt | 1 + ngraph/test/backend/log_softmax.in.cpp | 368 ++++++++++++++++++ ngraph/test/runtime/ie/ie_executable.cpp | 4 + ngraph/test/runtime/ie/unit_test.manifest | 7 + .../runtime/interpreter/int_executable.hpp | 15 + .../runtime/interpreter/opset_int_tbl.hpp | 1 + 23 files changed, 847 insertions(+), 240 deletions(-) create mode 100644 inference-engine/src/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp create mode 100644 inference-engine/src/transformations/src/transformations/op_conversions/log_softmax_decomposition.cpp create mode 100644 inference-engine/tests/functional/inference_engine/transformations/log_softmax_decomposition_test.cpp delete mode 100644 model-optimizer/extensions/front/LogSoftmax.py delete mode 100644 model-optimizer/extensions/front/LogSoftmax_test.py create mode 100644 model-optimizer/extensions/front/tf/log_softmax_ext.py create mode 100644 model-optimizer/mo/ops/log_softmax.py create mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp create mode 100644 ngraph/test/backend/log_softmax.in.cpp diff --git a/inference-engine/src/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp b/inference-engine/src/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp new file mode 100644 index 00000000000000..acbcf40f2d0110 --- /dev/null +++ b/inference-engine/src/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace ngraph { +namespace pass { + + class TRANSFORMATIONS_API LogSoftmaxDecomposition; + +} // namespace pass +} // namespace ngraph + +/** + * @ingroup ie_transformation_common_api + * @brief LogSoftmaxDecomposition transformation into sub-graph x - log(reduce_sum(exp(x), axis)). + */ +class ngraph::pass::LogSoftmaxDecomposition : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + LogSoftmaxDecomposition(); +}; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp index f4e5df8600afae..059faa72337fec 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -41,6 +41,7 @@ #include "transformations/op_conversions/reduce_l1_decomposition.hpp" #include "transformations/op_conversions/reduce_l2_decomposition.hpp" #include "transformations/op_conversions/hswish_decomposition.hpp" +#include "transformations/op_conversions/log_softmax_decomposition.hpp" #include #include @@ -78,6 +79,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptradd_matcher(); decomp->add_matcher(); decomp->add_matcher(); + decomp->add_matcher(); decomp->add_matcher(); decomp->add_matcher(); decomp->add_matcher(); diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/log_softmax_decomposition.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/log_softmax_decomposition.cpp new file mode 100644 index 00000000000000..12c4d2535bdf69 --- /dev/null +++ b/inference-engine/src/transformations/src/transformations/op_conversions/log_softmax_decomposition.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/op_conversions/log_softmax_decomposition.hpp" + +#include + +#include +#include +#include + +NGRAPH_RTTI_DEFINITION(ngraph::pass::LogSoftmaxDecomposition, "LogSoftmaxDecomposition", 0); + +ngraph::pass::LogSoftmaxDecomposition::LogSoftmaxDecomposition() { + // Decomposes LogSoftmax(x, axis) op into sub-graph x - log(reduce_sum(exp(x), axis)) + auto log_softmax = ngraph::pattern::wrap_type(); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + auto& pattern_to_output = m.get_pattern_value_map(); + auto log_softmax_node = std::dynamic_pointer_cast(pattern_to_output.at(log_softmax).get_node_shared_ptr()); + + if (m_transformation_callback(log_softmax_node)) { + return false; + } + + auto axis1 = ngraph::opset5::Constant::create(element::Type_t::i64, ngraph::Shape{1}, { log_softmax_node->get_axis() }); + auto axis2 = ngraph::opset5::Constant::create(element::Type_t::i64, ngraph::Shape{1}, { log_softmax_node->get_axis() }); + auto max = std::make_shared(log_softmax_node->input_value(0), axis1, true); + auto sub = std::make_shared(log_softmax_node->input_value(0), max); + auto exp = std::make_shared(sub); + auto sum = std::make_shared(exp, axis2, true); + auto log = std::make_shared(sum); + auto sub_end = std::make_shared(sub, log); + + sub_end->set_friendly_name(m.get_match_root()->get_friendly_name()); + ngraph::copy_runtime_info(log_softmax_node, { axis1, axis2, max, sub, exp, sum, log, sub_end }); + ngraph::replace_node(m.get_match_root(), sub_end); + return true; + }; + + auto m = std::make_shared(log_softmax, "LogSoftmaxDecomposition"); + register_matcher(m, callback); +} diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/log_softmax_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/log_softmax_tests.cpp index f3a0a01d074b5d..7b452f4f1bd28b 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/log_softmax_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/log_softmax_tests.cpp @@ -17,7 +17,7 @@ TEST_F(NGraphReaderTests, ReadLogSoftmaxNetwork) { - + @@ -47,7 +47,7 @@ TEST_F(NGraphReaderTests, ReadLogSoftmaxNetwork) { )V0G0N"; - std::string modelV5 = R"V0G0N( + std::string model_ref = R"V0G0N( @@ -58,16 +58,153 @@ TEST_F(NGraphReaderTests, ReadLogSoftmaxNetwork) { - - + + + + 1 + + + + + + + + + + + 1 + 1000 + + + 1 + + + + + 1 + 1 + + + + + + + + 1 + 1 + + + + + 1 + 1 + + + + + + + + 1 + 1000 + + + 1 + 1 + + + + + 1 + 1000 + + + + + + + 1 + 1000 + + + + + 1 + 1000 + + + + + + + 1 + + + + + + + + + + 1 + 1000 + 1 + + + + + 1 + 1 + + + + + + + 1 + 1 + + + + + 1 + 1 + + + + + + + + 1 + 1 + + + + + 1 + 1 + + + + + + + + 1 1000 + + 1 + 1 + - + 1 1000 @@ -75,10 +212,25 @@ TEST_F(NGraphReaderTests, ReadLogSoftmaxNetwork) { - + + + + + + + + + + + + )V0G0N"; - compareIRs(model, modelV5, 0); + compareIRs(model, model_ref, 16, [](Blob::Ptr& weights) { + auto* data = reinterpret_cast(weights->buffer().as()); + data[0] = 1; + data[1] = 1; + }); } diff --git a/inference-engine/tests/functional/inference_engine/transformations/log_softmax_decomposition_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/log_softmax_decomposition_test.cpp new file mode 100644 index 00000000000000..b6e5884fd20490 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/transformations/log_softmax_decomposition_test.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + +using namespace testing; + +TEST(TransformationTests, LogSoftmaxDecomposition) { + std::shared_ptr f(nullptr), f_ref(nullptr); + { + auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 2}); + auto log_softmax = std::make_shared(data, 1); + + f = std::make_shared(ngraph::NodeVector{log_softmax}, ngraph::ParameterVector{data}); + + ngraph::pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); + } + + { + auto input0 = std::make_shared(ngraph::element::f64, ngraph::Shape{3, 2}); + auto axis1_const = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto max = std::make_shared(input0, axis1_const, true); + auto sub = std::make_shared(input0, max); + auto exp = std::make_shared(sub); + auto axis2_const = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto sum = std::make_shared(exp, axis2_const, true); + auto log = std::make_shared(sum); + auto sub_end = std::make_shared(sub, log); + + f_ref = std::make_shared(ngraph::NodeVector{sub_end}, ngraph::ParameterVector{input0}); + } + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index b3a599a17cea90..4990bd90e6e92a 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -150,7 +150,6 @@ extensions/front/kaldi/tanh_component_ext.py extensions/front/kaldi/tdnn_component_replacer.py extensions/front/LayerNorm.py extensions/front/Log1p.py -extensions/front/LogSoftmax.py extensions/front/MatMul_normalizer.py extensions/front/Mish_fusion.py extensions/front/MoveEmbeddedInputsToInputs.py @@ -390,6 +389,7 @@ extensions/front/tf/identity_ext.py extensions/front/tf/identityN_to_identity.py extensions/front/tf/InterpolateTransposes.py extensions/front/tf/IteratorGetNext_ext.py +extensions/front/tf/log_softmax_ext.py extensions/front/tf/LookupTableInsert_ext.py extensions/front/tf/LoopCond_ext.py extensions/front/tf/lrn_ext.py @@ -905,6 +905,7 @@ mo/ops/expand_dims.py mo/ops/fill.py mo/ops/flatten.py mo/ops/group_norm.py +mo/ops/log_softmax.py mo/ops/lrn.py mo/ops/lstmnonlinearity.py mo/ops/memory.py diff --git a/model-optimizer/extensions/front/LogSoftmax.py b/model-optimizer/extensions/front/LogSoftmax.py deleted file mode 100644 index 4a7e2de28fa3aa..00000000000000 --- a/model-optimizer/extensions/front/LogSoftmax.py +++ /dev/null @@ -1,92 +0,0 @@ -""" - Copyright (C) 2018-2020 Intel Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -""" -from extensions.ops.ReduceOps import ReduceMax, ReduceSum -from extensions.ops.activation_ops import Exp, Log -from extensions.ops.elementwise import Sub -from mo.front.common.partial_infer.utils import int64_array -from mo.front.common.replacement import FrontReplacementOp -from mo.front.tf.graph_utils import create_op_with_const_inputs -from mo.graph.graph import Graph, Node, rename_nodes - - -class LogSoftmaxFrontReplacer(FrontReplacementOp): - """ - Replace LogSoftmax operation with ReduceMax + Sub + Exp + ReduceSum + Log + Sub. - - More precisely, this transformation implements the following formulas of the calculation of LogSoftmax: - - shifted_data = input_data - ReduceMax(input_data, axis), (1) - output = shifted_data - Log(ReduceSum(Exp(shifted_data), axis)). - - These formulas is used to calculate LogSoftmax in implementation of TensorFlow (see - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/softmax_op_functor.h), - Kaldi (see https://github.com/kaldi-asr/kaldi/blob/master/src/cudamatrix/cu-kernels.cu), - MxNet (see https://github.com/apache/incubator-mxnet/blob/master/src/operator/nn/softmax-inl.h). - - ONNX implements LogSoftmax according to formulas - - flatten_data = Flatten(input_data, axis), (1') - shifted_data = flatten_data - ReduceMax(flatten_data, 1), - z = shifted_data - Log(ReduceSum(Exp(shifted_data), 1)), - output = Reshape(z, input_data.shape) - - (see https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/codegen/mti/math/logsoftmax.cc, - https://github.com/microsoft/onnxruntime-tvm/blob/master/topi/include/topi/nn/softmax.h) - - Formally speaking, the formula (1) is equivalent to the formula - output = Log(SoftMax(input_data, axis)) (2) - - But LogSoftMax is calculated according to formula (1) for better numeric stability. - """ - op = "LogSoftmax" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - assert node.has_valid('axis'), 'The node "{}" does not have mandatory attribute "axis"'.format(node_name) - - # Creating of ReduceMax -> Sub -> Exp block - first_sub_node = Sub(graph, {'name': node_name + '/Sub_/first_'}).create_node() - reduce_max_node = create_op_with_const_inputs(graph, - ReduceMax, - {1: int64_array([node.axis])}, - op_attrs={'name': node_name + '/ReduceMax_', 'keep_dims': True}) - reduce_max_node.out_port(0).connect(first_sub_node.in_port(1)) - - # Creating of Exp -> ReduceSum -> Log block - exp_node = Exp(graph, {'name': node_name + '/Exp_'}).create_node() - reduce_sum_node = create_op_with_const_inputs(graph, - ReduceSum, - {1: int64_array([node.axis])}, - op_attrs={'name': node_name + '/ReduceSum_', 'keep_dims': True}) - log_node = Log(graph, {'name': node_name + '/Log_'}).create_node() - - first_sub_node.out_port(0).connect(exp_node.in_port(0)) - exp_node.out_port(0).connect(reduce_sum_node.in_port(0)) - reduce_sum_node.out_port(0).connect(log_node.in_port(0)) - - # Creating of the last Sub node - second_sub_node = Sub(graph, {}).create_node() - rename_nodes([(node, node_name + '/delete'), (second_sub_node, node_name)]) - log_node.out_port(0).connect(second_sub_node.in_port(1)) - first_sub_node.out_port(0).connect(second_sub_node.in_port(0)) - - # Correcting of input edges - source = node.in_port(0).get_source() - first_sub_node.in_port(0).connect(source) - reduce_max_node.in_port(0).connect(source) - - return [second_sub_node.id] diff --git a/model-optimizer/extensions/front/LogSoftmax_test.py b/model-optimizer/extensions/front/LogSoftmax_test.py deleted file mode 100644 index 5c5050cb3a834d..00000000000000 --- a/model-optimizer/extensions/front/LogSoftmax_test.py +++ /dev/null @@ -1,86 +0,0 @@ -""" - Copyright (C) 2018-2020 Intel Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -""" - -import unittest - -from generator import generator, generate - -from extensions.front.LogSoftmax import LogSoftmaxFrontReplacer -from mo.front.common.partial_infer.utils import int64_array -from mo.utils.ir_engine.compare_graphs import compare_graphs -from mo.utils.unittest.graph import build_graph - -graph_node_attributes = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'logsoftmax': {'type': None, 'kind': 'op', 'op': 'LogSoftmax', 'axis': -1}, - 'output': {'kind': 'op', 'type': 'Result', 'op': 'Result'}, -} - - -graph_edges = [ - ('placeholder', 'logsoftmax'), - ('logsoftmax', 'output'), -] - - -graph_ref_node_attributes = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'exp': {'type': 'Exp', 'kind': 'op', 'op': 'Exp'}, - 'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'op': 'ReduceSum', 'keep_dims': True}, - 'reduce_max': {'type': 'ReduceMax', 'kind': 'op', 'op': 'ReduceMax', 'keep_dims': True}, - 'log': {'type': 'Log', 'kind': 'op', 'op': 'Log'}, - 'second_sub': {'type': 'Subtract', 'kind': 'op', 'op': 'Sub'}, - 'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None, 'shape': int64_array([1])}, - 'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None, 'shape': int64_array([1])}, - 'first_sub': {'type': 'Subtract', 'kind': 'op', 'op': 'Sub'}, - 'output': {'kind': 'op', 'type': 'Result', 'op': 'Result'}, -} - - -graph_ref_edges = [ - ('placeholder', 'reduce_max', {'in': 0, 'out': 0}), - ('placeholder', 'first_sub', {'in': 0, 'out': 0}), - ('reduce_max', 'first_sub', {'in': 1}), - ('reduce_max_axis', 'reduce_max', {'in': 1}), - ('first_sub', 'exp', {'in': 0, 'out': 0}), - ('first_sub', 'second_sub', {'in': 0, 'out': 0}), - ('exp', 'reduce_sum', {'in': 0}), - ('reduce_sum_axis', 'reduce_sum', {'in': 1}), - ('reduce_sum', 'log'), - ('log', 'second_sub', {'in': 1}), - ('second_sub', 'output'), -] - - -@generator -class LogSoftmaxReplacerTest(unittest.TestCase): - @generate(*[(-1, 'NCHW'), (-1, 'NHWC'), (0, 'NHWC'), - (0, 'NCHW'), (2, 'NCHW'), (2, 'NHWC'), - (-2, 'NHWC'), (-2, 'NCHW')]) - def test_logsoftmax_replacer(self, axis, layout): - graph = build_graph(nodes_attrs=graph_node_attributes, edges=graph_edges) - graph_ref = build_graph(nodes_attrs=graph_ref_node_attributes, - edges=graph_ref_edges, - update_attributes={ - 'reduce_max_axis': {'value': int64_array([axis])}, - 'reduce_sum_axis': {'value': int64_array([axis])}, - }) - graph.graph['layout'] = layout - graph.stage = 'front' - LogSoftmaxFrontReplacer().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) - diff --git a/model-optimizer/extensions/front/kaldi/logsoftmax_component_ext.py b/model-optimizer/extensions/front/kaldi/logsoftmax_component_ext.py index 8d4ddc6ff4331a..3f60ae944dafeb 100644 --- a/model-optimizer/extensions/front/kaldi/logsoftmax_component_ext.py +++ b/model-optimizer/extensions/front/kaldi/logsoftmax_component_ext.py @@ -14,7 +14,7 @@ limitations under the License. """ -from mo.ops.softmax import LogSoftmax +from mo.ops.log_softmax import LogSoftmax from mo.front.extractor import FrontExtractorOp diff --git a/model-optimizer/extensions/front/onnx/flattenONNX_to_reshape.py b/model-optimizer/extensions/front/onnx/flattenONNX_to_reshape.py index bffd69d9a79950..02c2d1f8b489d4 100644 --- a/model-optimizer/extensions/front/onnx/flattenONNX_to_reshape.py +++ b/model-optimizer/extensions/front/onnx/flattenONNX_to_reshape.py @@ -36,10 +36,6 @@ class FlattenONNXToReshape(FrontReplacementSubgraph): """ enabled = True - def run_before(self): - from extensions.front.LogSoftmax import LogSoftmaxFrontReplacer - return [LogSoftmaxFrontReplacer] - def pattern(self): return dict(nodes=[('flatten', dict(op='FlattenONNX'))], edges=[]) diff --git a/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py b/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py index 24387861eecd2a..8a630460b20867 100644 --- a/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py +++ b/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py @@ -18,7 +18,7 @@ from mo.ops.flatten import FlattenONNX from mo.ops.reshape import Reshape from mo.ops.shape import Shape -from mo.ops.softmax import LogSoftmax +from mo.ops.log_softmax import LogSoftmax class LogSoftmaxONNXFrontReplacer(FrontReplacementOp): diff --git a/model-optimizer/extensions/front/onnx/softmax_ext.py b/model-optimizer/extensions/front/onnx/softmax_ext.py index 0a3c524582762f..59d92d233a6f30 100644 --- a/model-optimizer/extensions/front/onnx/softmax_ext.py +++ b/model-optimizer/extensions/front/onnx/softmax_ext.py @@ -16,7 +16,8 @@ from mo.front.extractor import FrontExtractorOp from mo.front.onnx.extractors.utils import onnx_attr -from mo.ops.softmax import LogSoftmaxONNX, SoftmaxONNX +from mo.ops.softmax import SoftmaxONNX +from mo.ops.log_softmax import LogSoftmaxONNX class SoftmaxExtractor(FrontExtractorOp): diff --git a/model-optimizer/extensions/front/tf/log_softmax_ext.py b/model-optimizer/extensions/front/tf/log_softmax_ext.py new file mode 100644 index 00000000000000..64c6e839b139aa --- /dev/null +++ b/model-optimizer/extensions/front/tf/log_softmax_ext.py @@ -0,0 +1,32 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +from mo.front.extractor import FrontExtractorOp +from mo.ops.log_softmax import LogSoftmax + + +class LogSoftmaxExtractor(FrontExtractorOp): + op = 'LogSoftmax' + enabled = True + + @classmethod + def extract(cls, node): + # the default value for the TF LogSoftmax is -1 + axis = -1 + if 'axis' in node.pb.attr: + axis = node.pb.attr['axis'].i + LogSoftmax.update_node_stat(node, {'axis': axis}) + return cls.enabled diff --git a/model-optimizer/extensions/front/tf/softmax_ext.py b/model-optimizer/extensions/front/tf/softmax_ext.py index 94c2b0ff4affef..fc4461abce77ed 100644 --- a/model-optimizer/extensions/front/tf/softmax_ext.py +++ b/model-optimizer/extensions/front/tf/softmax_ext.py @@ -15,7 +15,7 @@ """ from mo.front.extractor import FrontExtractorOp -from mo.ops.softmax import LogSoftmax, Softmax +from mo.ops.softmax import Softmax class SoftmaxExtractor(FrontExtractorOp): @@ -30,17 +30,3 @@ def extract(cls, node): axis = node.pb.attr['axis'].i Softmax.update_node_stat(node, {'axis': axis}) return cls.enabled - - -class LogSoftmaxExtractor(FrontExtractorOp): - op = 'LogSoftmax' - enabled = True - - @classmethod - def extract(cls, node): - # the default value for the TF LogSoftmax is -1 - axis = -1 - if 'axis' in node.pb.attr: - axis = node.pb.attr['axis'].i - LogSoftmax.update_node_stat(node, {'axis': axis}) - return cls.enabled diff --git a/model-optimizer/mo/ops/log_softmax.py b/model-optimizer/mo/ops/log_softmax.py new file mode 100644 index 00000000000000..fe6d6e9055d92f --- /dev/null +++ b/model-optimizer/mo/ops/log_softmax.py @@ -0,0 +1,67 @@ +""" + Copyright (C) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + + +from mo.front.common.partial_infer.elemental import copy_shape_infer +from mo.graph.graph import Graph, Node +from mo.ops.op import Op, PermuteAttrs + + +class LogSoftmax(Op): + op = 'LogSoftmax' + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'type': self.op, + 'op': self.op, + 'version': 'opset5', + 'infer': self.infer, + 'axis': 1, + 'in_ports_count': 1, + 'out_ports_count': 1, + }, attrs) + + def supported_attrs(self): + return ['axis'] + + @staticmethod + def infer(node: Node): + assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 1,\ + 'LogSoftmax node with id {} have more than one port connected'.format(node.id) + if node.axis < 0: + node.axis = len(node.in_port(0).data.get_shape()) + node.axis + assert 0 <= node.axis < len(node.in_port(0).data.get_shape()),\ + 'LogSoftmax node with id {} has wrong axis attribute'.format(node.id) + copy_shape_infer(node) + PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) + + +class LogSoftmaxONNX(Op): + op = 'LogSoftmaxONNX' + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'infer': None, + 'kind': 'op', + 'axis': 1, + 'type': None, # the operation will be replaced with a + # Reshape(LogSoftmax(FlattenONNX(x, axis), 1), x.shape) sub-graph + 'op': self.op, + 'in_ports_count': 1, + 'out_ports_count': 1, + }, attrs) diff --git a/model-optimizer/mo/ops/softmax.py b/model-optimizer/mo/ops/softmax.py index 8a6a2463db5de4..333a38061d9705 100644 --- a/model-optimizer/mo/ops/softmax.py +++ b/model-optimizer/mo/ops/softmax.py @@ -59,35 +59,3 @@ def __init__(self, graph: Graph, attrs: dict): 'in_ports_count': 1, 'out_ports_count': 1, }, attrs) - - -class LogSoftmax(Op): - op = 'LogSoftmax' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'infer': None, - 'kind': 'op', - 'axis': 1, - 'type': None, # the operation will be replaced with a x - Log(ReduceSum(Exp(x), axis)) sub-graph - 'op': __class__.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - -class LogSoftmaxONNX(Op): - op = 'LogSoftmaxONNX' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'infer': None, - 'kind': 'op', - 'axis': 1, - 'type': None, # the operation will be replaced with a - # Reshape(LogSoftmax(FlattenONNX(x, axis), 1), x.shape) sub-graph - 'op': __class__.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp new file mode 100644 index 00000000000000..6e1caba0c33991 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/log_softmax.hpp @@ -0,0 +1,62 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include "ngraph/coordinate_transform.hpp" +#include "ngraph/runtime/reference/max.hpp" +#include "ngraph/runtime/reference/sum.hpp" +#include "ngraph/shape_util.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) + { + auto temp_shape = reduce(shape, axes, true); + auto temp_elements = shape_size(temp_shape); + auto temp_max = std::vector(temp_elements, 0); + auto temp_sum = std::vector(temp_elements, 0); + + max(arg, temp_max.data(), shape, axes, true); + + CoordinateTransform transform(shape); + CoordinateTransform temp_transform(temp_shape); + for (const Coordinate& coord : transform) + { + Coordinate temp_coord = reduce(coord, axes, true); + out[transform.index(coord)] = std::exp( + arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]); + } + + sum(out, temp_sum.data(), shape, axes, true); + + for (const Coordinate& coord : transform) + { + Coordinate temp_coord = reduce(coord, axes, true); + out[transform.index(coord)] = + (arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]) - + std::log(temp_sum[temp_transform.index(temp_coord)]); + } + } + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index e39adcb2b9f543..706690a2397b4a 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -294,6 +294,7 @@ set(MULTI_TEST_SRC backend/group_convolution.in.cpp backend/interpolate.in.cpp backend/log.in.cpp + backend/log_softmax.in.cpp backend/logical_or.in.cpp backend/logical_xor.in.cpp backend/lrn.in.cpp diff --git a/ngraph/test/backend/log_softmax.in.cpp b/ngraph/test/backend/log_softmax.in.cpp new file mode 100644 index 00000000000000..1304e8156325b5 --- /dev/null +++ b/ngraph/test/backend/log_softmax.in.cpp @@ -0,0 +1,368 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +// clang-format off +#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#endif + +#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#endif +// clang-format on + +#include "gtest/gtest.h" +#include "runtime/backend.hpp" +#include "ngraph/runtime/tensor.hpp" +#include "ngraph/ngraph.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/known_element_types.hpp" +#include "util/ndarray.hpp" +#include "util/test_control.hpp" +#include "util/test_tools.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) +{ + Shape shape{1}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{0}; + + auto f = make_shared(make_shared(A, 0), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) +{ + Shape shape{2, 4}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; + + auto f = make_shared(make_shared(A, 0), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) +{ + Shape shape{2, 4}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-3.4401896, + -2.4401896, + -1.4401897, + -0.4401897, + -3.4401896, + -2.4401896, + -1.4401897, + -0.4401897}; + + auto f = make_shared(make_shared(A, 1), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) +{ + Shape shape{2, 4}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-3.4401896, + -2.4401896, + -1.4401897, + -0.4401897, + -3.4401896, + -2.4401896, + -1.4401897, + -0.4401897}; + + auto f = make_shared(make_shared(A, -1), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) +{ + Shape shape{2, 4}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; + + auto f = make_shared(make_shared(A, -2), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03}; + + auto f = make_shared(make_shared(A, 0), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735}; + + auto f = make_shared(make_shared(A, 1), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596}; + + auto f = make_shared(make_shared(A, 2), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596, + -2.40760596, + -1.40760596, + -0.40760596}; + + auto f = make_shared(make_shared(A, -1), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735, + -3.04858735, + -3.04858735, + -3.04858735, + -0.04858735, + -0.04858735, + -0.04858735}; + + auto f = make_shared(make_shared(A, -2), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg3) +{ + Shape shape{3, 2, 3}; + auto A = make_shared(element::f32, shape); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); + auto result = backend->create_tensor(element::f32, shape); + + std::vector expected_result{-12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -12.0024818, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -6.00248181, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03, + -2.48181414e-03}; + + auto f = make_shared(make_shared(A, -3), ParameterVector{A}); + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close(expected_result, read_vector(result))); +} diff --git a/ngraph/test/runtime/ie/ie_executable.cpp b/ngraph/test/runtime/ie/ie_executable.cpp index d3f959cd7a3863..eba5a300e346e9 100644 --- a/ngraph/test/runtime/ie/ie_executable.cpp +++ b/ngraph/test/runtime/ie/ie_executable.cpp @@ -85,6 +85,10 @@ namespace ie_ops.insert(opset2.begin(), opset2.end()); auto& opset3 = get_opset3().get_type_info_set(); ie_ops.insert(opset3.begin(), opset3.end()); + auto& opset4 = get_opset4().get_type_info_set(); + ie_ops.insert(opset4.begin(), opset4.end()); + auto& opset5 = get_opset5().get_type_info_set(); + ie_ops.insert(opset5.begin(), opset5.end()); return ie_ops; } } diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index abee8acb08fee2..2b8e949435fa6b 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -1130,6 +1130,13 @@ IE_CPU.onnx_resize11_scales_nearest_asymmetric_floor_dynamic_sizes # Input data precision not supported. Expected float. ctc_greedy_decoder_f16 +# Wrong output when axis 0 +IE_CPU.log_softmax_1d_single_value +IE_CPU.log_softmax_2d_axis0 +IE_CPU.log_softmax_2d_axis_neg2 +IE_CPU.log_softmax_3d_axis_0 +IE_CPU.log_softmax_3d_axis_neg3 + #------------------------------------------------------------------------------- # # Inference Engine GPU plugin excludes diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index cc54b84f3ef2aa..bd5db5ed66fd9b 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -62,6 +62,7 @@ #include "ngraph/runtime/reference/gather_tree.hpp" #include "ngraph/runtime/reference/gru_cell.hpp" #include "ngraph/runtime/reference/log.hpp" +#include "ngraph/runtime/reference/log_softmax.hpp" #include "ngraph/runtime/reference/lrn.hpp" #include "ngraph/runtime/reference/lstm_cell.hpp" #include "ngraph/runtime/reference/matmul.hpp" @@ -874,6 +875,20 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ args[0]->get_data_ptr(), out[0]->get_data_ptr(), element_count); break; } + case OP_TYPEID::LogSoftmax_v5: + { + const op::v5::LogSoftmax* log_softmax = static_cast(&node); + int64_t i_axis = log_softmax->get_axis(); + if (i_axis < 0) + { + i_axis += args[0]->get_partial_shape().rank().get_length(); + } + reference::log_softmax(args[0]->get_data_ptr(), + out[0]->get_data_ptr(), + node.get_output_shape(0), + AxisSet{(size_t)i_axis}); + break; + } case OP_TYPEID::LRN: { const op::LRN* lrn = static_cast(&node); diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index 4cfe6693f17e4b..61fa35ddec2e26 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -57,4 +57,5 @@ NGRAPH_OP(GatherND, op::v5) NGRAPH_OP(LSTMSequence, op::v5) NGRAPH_OP(GRUSequence, op::v5) NGRAPH_OP(RNNSequence, op::v5) +NGRAPH_OP(LogSoftmax, op::v5) #undef ID_SUFFIX From a2e49469b5da50f5584e5aafefa169e3d1f7b65d Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Tue, 20 Oct 2020 14:31:59 +0300 Subject: [PATCH 35/35] Cleanup single_layer_tests (#2716) --- .../single_layer_tests/equal.cpp | 30 ---------- .../single_layer_tests/greater.cpp | 30 ---------- .../include/single_layer_tests/equal.hpp | 35 ------------ .../include/single_layer_tests/greater.hpp | 35 ------------ .../shared/src/single_layer_tests/equal.cpp | 56 ------------------- .../shared/src/single_layer_tests/greater.cpp | 56 ------------------- .../ngraph_functions/utils/ngraph_helpers.hpp | 3 +- .../tests/ngraph_functions/src/eltwise.cpp | 2 + 8 files changed, 4 insertions(+), 243 deletions(-) delete mode 100644 inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/equal.cpp delete mode 100644 inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/greater.cpp delete mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/equal.hpp delete mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/greater.hpp delete mode 100644 inference-engine/tests/functional/plugin/shared/src/single_layer_tests/equal.cpp delete mode 100644 inference-engine/tests/functional/plugin/shared/src/single_layer_tests/greater.cpp diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/equal.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/equal.cpp deleted file mode 100644 index ffd20a27934c83..00000000000000 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/equal.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_layer_tests/equal.hpp" - -#include "common_test_utils/test_constants.hpp" - -#include - -using namespace LayerTestsDefinitions; - -namespace { - -std::vector> inShapes = { - {{200}, {200}}, - {{1000}, {1}}, - {{1, 256, 512}, {1, 256, 512}}, - {{1}, {1, 256, 512}}, -}; - -INSTANTIATE_TEST_CASE_P(smoke_equalS32, EqualLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::Values(InferenceEngine::Precision::I32), - ::testing::Values(InferenceEngine::Precision::I32), - ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)), - EqualLayerTest::getTestCaseName); - -} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/greater.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/greater.cpp deleted file mode 100644 index 14d693df09b5e8..00000000000000 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/greater.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_layer_tests/greater.hpp" - -#include "common_test_utils/test_constants.hpp" - -#include - -using namespace LayerTestsDefinitions; - -namespace { - -std::vector> inShapes = { - {{200}, {200}}, - {{1000}, {1}}, - {{1, 256, 512}, {1, 256, 512}}, - {{1}, {1, 256, 512}}, -}; - -INSTANTIATE_TEST_CASE_P(smoke_greaterS32, GreaterLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::Values(InferenceEngine::Precision::FP16), - ::testing::Values(InferenceEngine::Precision::I32), - ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)), - GreaterLayerTest::getTestCaseName); - -} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/equal.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/equal.hpp deleted file mode 100644 index 8985f168a9c152..00000000000000 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/equal.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "functional_test_utils/layer_test_utils.hpp" - -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" - -#include -#include -#include -#include -#include - -namespace LayerTestsDefinitions { - -using EqualTestParam = typename std::tuple< - std::vector, // Input shapes - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - LayerTestsUtils::TargetDevice>; // Config - -class EqualLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/greater.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/greater.hpp deleted file mode 100644 index 32b3b9b06ff5e0..00000000000000 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/greater.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "functional_test_utils/layer_test_utils.hpp" - -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" - -#include -#include -#include -#include -#include - -namespace LayerTestsDefinitions { - -using GreaterTestParam = typename std::tuple< - std::vector, // Input shapes - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - LayerTestsUtils::TargetDevice>; // Config - -class GreaterLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/equal.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/equal.cpp deleted file mode 100644 index e92bb684986120..00000000000000 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/equal.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_layer_tests/equal.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include -#include -#include -#include -#include - - -namespace LayerTestsDefinitions { - -std::string EqualLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision inPrecision; - InferenceEngine::Precision outPrecision; - std::vector inputShapes; - std::string targetDevice; - - std::tie(inputShapes, inPrecision, outPrecision, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; - result << "inPrc=" << inPrecision.name() << "_"; - result << "outPrc=" << outPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - - return result.str(); -} - -void EqualLayerTest::SetUp() { - std::vector inputShapes; - InferenceEngine::Precision inputPrecision = InferenceEngine::Precision::UNSPECIFIED; - - std::tie(inputShapes, inputPrecision, outPrc, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto paramsVector = ngraph::builder::makeParams(ngPrc, {inputShapes}); - IE_ASSERT(paramsVector.size() == 2); - - auto equalOp = std::make_shared(paramsVector[0], paramsVector[1]); - ngraph::ResultVector results{std::make_shared(equalOp)}; - - function = std::make_shared(results, paramsVector, "Equal"); -} - -TEST_P(EqualLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/greater.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/greater.cpp deleted file mode 100644 index 8687c71561ccc0..00000000000000 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/greater.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_layer_tests/greater.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include -#include -#include -#include -#include - - -namespace LayerTestsDefinitions { - -std::string GreaterLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision inPrecision; - InferenceEngine::Precision outPrecision; - std::vector inputShapes; - std::string targetDevice; - - std::tie(inputShapes, inPrecision, outPrecision, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; - result << "inPrc=" << inPrecision.name() << "_"; - result << "outPrc=" << outPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - - return result.str(); -} - -void GreaterLayerTest::SetUp() { - std::vector inputShapes; - InferenceEngine::Precision inputPrecision = InferenceEngine::Precision::UNSPECIFIED; - - std::tie(inputShapes, inputPrecision, outPrc, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto paramsVector = ngraph::builder::makeParams(ngPrc, {inputShapes}); - IE_ASSERT(paramsVector.size() == 2); - - auto equalOp = std::make_shared(paramsVector[0], paramsVector[1]); - ngraph::ResultVector results{std::make_shared(equalOp)}; - - function = std::make_shared(results, paramsVector, "Greater"); -} - -TEST_P(GreaterLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index 9a32f229c4b7cf..2114f3fd4b1a1e 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -122,7 +122,8 @@ enum EltwiseTypes { DIVIDE, SQUARED_DIFF, POWER, - FLOOR_MOD + FLOOR_MOD, + MOD }; enum ComparisonTypes { diff --git a/inference-engine/tests/ngraph_functions/src/eltwise.cpp b/inference-engine/tests/ngraph_functions/src/eltwise.cpp index 9a2a52e5eb993c..1ed66932bee902 100644 --- a/inference-engine/tests/ngraph_functions/src/eltwise.cpp +++ b/inference-engine/tests/ngraph_functions/src/eltwise.cpp @@ -29,6 +29,8 @@ std::shared_ptr makeEltwise(const ngraph::Output &in0, return std::make_shared(in0, in1); case ngraph::helpers::EltwiseTypes::FLOOR_MOD: return std::make_shared(in0, in1); + case ngraph::helpers::EltwiseTypes::MOD: + return std::make_shared(in0, in1); default: { throw std::runtime_error("Incorrect type of Eltwise operation"); }