diff --git a/.gitignore b/.gitignore index 5b7ba4ac..a8928b0c 100644 --- a/.gitignore +++ b/.gitignore @@ -105,3 +105,16 @@ ENV/ # result files for demo static/results + +# model files +models/ + +# binary images +*.jpg +*.png + +# output directory +outputs/ + +# training dataset directory +data/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..5efafb3b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "lanms/pybind11"] + path = lanms/pybind11 + url = https://github.com/pybind/pybind11/ diff --git a/EAST_colab.ipynb b/EAST_colab.ipynb new file mode 100644 index 00000000..d4f92851 --- /dev/null +++ b/EAST_colab.ipynb @@ -0,0 +1,5386 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "EAST_colab.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "NYxrBWUbMAMc", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "11ac6bc0-189e-4e94-d0e7-764249480e22" + }, + "source": [ + "!git clone -l -s --recursive git://github.com/burak-yildizoz/EAST.git EAST\n", + "!ls" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'EAST'...\n", + "warning: --local is ignored\n", + "remote: Enumerating objects: 56, done.\u001b[K\n", + "remote: Counting objects: 1% (1/56)\u001b[K\rremote: Counting objects: 3% (2/56)\u001b[K\rremote: Counting objects: 5% (3/56)\u001b[K\rremote: Counting objects: 7% (4/56)\u001b[K\rremote: Counting objects: 8% (5/56)\u001b[K\rremote: Counting objects: 10% (6/56)\u001b[K\rremote: Counting objects: 12% (7/56)\u001b[K\rremote: Counting objects: 14% (8/56)\u001b[K\rremote: Counting objects: 16% (9/56)\u001b[K\rremote: Counting objects: 17% (10/56)\u001b[K\rremote: Counting objects: 19% (11/56)\u001b[K\rremote: Counting objects: 21% (12/56)\u001b[K\rremote: Counting objects: 23% (13/56)\u001b[K\rremote: Counting objects: 25% (14/56)\u001b[K\rremote: Counting objects: 26% (15/56)\u001b[K\rremote: Counting objects: 28% (16/56)\u001b[K\rremote: Counting objects: 30% (17/56)\u001b[K\rremote: Counting objects: 32% (18/56)\u001b[K\rremote: Counting objects: 33% (19/56)\u001b[K\rremote: Counting objects: 35% (20/56)\u001b[K\rremote: Counting objects: 37% (21/56)\u001b[K\rremote: Counting objects: 39% (22/56)\u001b[K\rremote: Counting objects: 41% (23/56)\u001b[K\rremote: Counting objects: 42% (24/56)\u001b[K\rremote: Counting objects: 44% (25/56)\u001b[K\rremote: Counting objects: 46% (26/56)\u001b[K\rremote: Counting objects: 48% (27/56)\u001b[K\rremote: Counting objects: 50% (28/56)\u001b[K\rremote: Counting objects: 51% (29/56)\u001b[K\rremote: Counting objects: 53% (30/56)\u001b[K\rremote: Counting objects: 55% (31/56)\u001b[K\rremote: Counting objects: 57% (32/56)\u001b[K\rremote: Counting objects: 58% (33/56)\u001b[K\rremote: Counting objects: 60% (34/56)\u001b[K\rremote: Counting objects: 62% (35/56)\u001b[K\rremote: Counting objects: 64% (36/56)\u001b[K\rremote: Counting objects: 66% (37/56)\u001b[K\rremote: Counting objects: 67% (38/56)\u001b[K\rremote: Counting objects: 69% (39/56)\u001b[K\rremote: Counting objects: 71% (40/56)\u001b[K\rremote: Counting objects: 73% (41/56)\u001b[K\rremote: Counting objects: 75% (42/56)\u001b[K\rremote: Counting objects: 76% (43/56)\u001b[K\rremote: Counting objects: 78% (44/56)\u001b[K\rremote: Counting objects: 80% (45/56)\u001b[K\rremote: Counting objects: 82% (46/56)\u001b[K\rremote: Counting objects: 83% (47/56)\u001b[K\rremote: Counting objects: 85% (48/56)\u001b[K\rremote: Counting objects: 87% (49/56)\u001b[K\rremote: Counting objects: 89% (50/56)\u001b[K\rremote: Counting objects: 91% (51/56)\u001b[K\rremote: Counting objects: 92% (52/56)\u001b[K\rremote: Counting objects: 94% (53/56)\u001b[K\rremote: Counting objects: 96% (54/56)\u001b[K\rremote: Counting objects: 98% (55/56)\u001b[K\rremote: Counting objects: 100% (56/56)\u001b[K\rremote: Counting objects: 100% (56/56), done.\u001b[K\n", + "remote: Compressing objects: 100% (40/40), done.\u001b[K\n", + "remote: Total 330 (delta 26), reused 39 (delta 15), pack-reused 274\u001b[K\n", + "Receiving objects: 100% (330/330), 2.03 MiB | 28.43 MiB/s, done.\n", + "Resolving deltas: 100% (155/155), done.\n", + "Submodule 'lanms/pybind11' (https://github.com/pybind/pybind11/) registered for path 'lanms/pybind11'\n", + "Cloning into '/content/EAST/lanms/pybind11'...\n", + "remote: Enumerating objects: 57, done. \n", + "remote: Counting objects: 100% (57/57), done. \n", + "remote: Compressing objects: 100% (40/40), done. \n", + "remote: Total 14631 (delta 19), reused 25 (delta 12), pack-reused 14574 \n", + "Receiving objects: 100% (14631/14631), 6.00 MiB | 25.28 MiB/s, done.\n", + "Resolving deltas: 100% (9892/9892), done.\n", + "Submodule path 'lanms/pybind11': checked out '8de7772cc72daca8e947b79b83fea46214931604'\n", + "EAST sample_data\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "by9zOZ4WUoxm", + "outputId": "2e663652-6aa0-400f-e0de-9e5b1e417dc2" + }, + "source": [ + "%cd EAST\n", + "!./ubuntu.sh\n", + "# fix from tensorflow.contrib import slim\n", + "!pip3 install tensorflow==1.15" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "/content/EAST\n", + "tkinter already installed\n", + "unzip already installed\n", + "gdown already installed\n", + "Downloading east_icdar2015_resnet_v1_50_rbox\n", + "Downloading...\n", + "From: https://drive.google.com/uc?id=0B3APw5BZJ67ETHNPaU9xUkVoV0U\n", + "To: /content/EAST/models/east_icdar2015_resnet_v1_50_rbox.zip\n", + "363MB [00:02, 170MB/s]\n", + "Archive: east_icdar2015_resnet_v1_50_rbox.zip\n", + " inflating: east_icdar2015_resnet_v1_50_rbox/checkpoint \n", + " inflating: east_icdar2015_resnet_v1_50_rbox/model.ckpt-49491.data-00000-of-00001 \n", + " inflating: east_icdar2015_resnet_v1_50_rbox/model.ckpt-49491.index \n", + " inflating: east_icdar2015_resnet_v1_50_rbox/model.ckpt-49491.meta \n", + "Deleting east_icdar2015_resnet_v1_50_rbox.zip\n", + "Downloading resnet_v1_50.ckpt\n", + "--2021-03-17 16:02:05-- http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz\n", + "Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.31.128, 2607:f8b0:400c:c02::80\n", + "Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.31.128|:80... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 95073259 (91M) [application/x-tar]\n", + "Saving to: ‘resnet_v1_50_2016_08_28.tar.gz’\n", + "\n", + "resnet_v1_50_2016_0 100%[===================>] 90.67M 145MB/s in 0.6s \n", + "\n", + "2021-03-17 16:02:06 (145 MB/s) - ‘resnet_v1_50_2016_08_28.tar.gz’ saved [95073259/95073259]\n", + "\n", + "resnet_v1_50.ckpt\n", + "Deleting resnet_v1_50_2016_08_28.tar.gz\n", + "-- The C compiler identification is GNU 7.5.0\n", + "-- The CXX compiler identification is GNU 7.5.0\n", + "-- Check for working C compiler: /usr/bin/cc\n", + "-- Check for working C compiler: /usr/bin/cc -- works\n", + "-- Detecting C compiler ABI info\n", + "-- Detecting C compiler ABI info - done\n", + "-- Detecting C compile features\n", + "-- Detecting C compile features - done\n", + "-- Check for working CXX compiler: /usr/bin/c++\n", + "-- Check for working CXX compiler: /usr/bin/c++ -- works\n", + "-- Detecting CXX compiler ABI info\n", + "-- Detecting CXX compiler ABI info - done\n", + "-- Detecting CXX compile features\n", + "-- Detecting CXX compile features - done\n", + "-- pybind11 v2.6.2 \n", + "-- Found PythonInterp: /usr/bin/python3.7 (found version \"3.7.10\") \n", + "-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython3.7m.so\n", + "-- Performing Test HAS_FLTO\n", + "-- Performing Test HAS_FLTO - Success\n", + "-- Configuring done\n", + "-- Generating done\n", + "-- Build files have been written to: /content/EAST/lanms/build\n", + "\u001b[35m\u001b[1mScanning dependencies of target lanms_library\u001b[0m\n", + "[ 9%] \u001b[32mBuilding CXX object CMakeFiles/lanms_library.dir/lanms.cpp.o\u001b[0m\n", + "[ 18%] \u001b[32mBuilding CXX object CMakeFiles/lanms_library.dir/include/clipper/clipper.cpp.o\u001b[0m\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:378:2:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kextra ‘\u001b[01m\u001b[K;\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wpedantic\u001b[m\u001b[K]\n", + " }\u001b[01;35m\u001b[K;\u001b[m\u001b[K\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kvoid ClipperLib::Clipper::FixupFirstLefts3(ClipperLib::OutRec*, ClipperLib::OutRec*)\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:3665:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[KfirstLeft\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n", + " OutRec* \u001b[01;35m\u001b[KfirstLeft\u001b[m\u001b[K = ParseFirstLeft(outRec->FirstLeft);\n", + " \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n", + "[ 27%] \u001b[32m\u001b[1mLinking CXX shared library liblanms_library.so\u001b[0m\n", + "[ 27%] Built target lanms_library\n", + "\u001b[35m\u001b[1mScanning dependencies of target adaptor\u001b[0m\n", + "[ 36%] \u001b[32mBuilding CXX object CMakeFiles/adaptor.dir/adaptor.cpp.o\u001b[0m\n", + "\u001b[01m\u001b[K/content/EAST/lanms/adaptor.cpp:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KPyObject* pybind11_init()\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K/content/EAST/lanms/adaptor.cpp:54:31:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kpybind11::module_::module_(const char*, const char*)\u001b[m\u001b[K’ is deprecated: Use PYBIND11_MODULE or module_::create_extension_module instead [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n", + " py::module m(\"adaptor\", \"NMS\"\u001b[01;35m\u001b[K)\u001b[m\u001b[K;\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "In file included from \u001b[01m\u001b[K/content/EAST/lanms/adaptor.cpp:1:0\u001b[m\u001b[K:\n", + "\u001b[01m\u001b[K/content/EAST/lanms/pybind11/include/pybind11/pybind11.h:947:14:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n", + " explicit \u001b[01;36m\u001b[Kmodule_\u001b[m\u001b[K(const char *name, const char *doc = nullptr) {\n", + " \u001b[01;36m\u001b[K^~~~~~~\u001b[m\u001b[K\n", + "[ 45%] \u001b[32mBuilding CXX object CMakeFiles/adaptor.dir/lanms.cpp.o\u001b[0m\n", + "[ 54%] \u001b[32mBuilding CXX object CMakeFiles/adaptor.dir/include/clipper/clipper.cpp.o\u001b[0m\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:378:2:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kextra ‘\u001b[01m\u001b[K;\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wpedantic\u001b[m\u001b[K]\n", + " }\u001b[01;35m\u001b[K;\u001b[m\u001b[K\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kvoid ClipperLib::Clipper::FixupFirstLefts3(ClipperLib::OutRec*, ClipperLib::OutRec*)\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:3665:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[KfirstLeft\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n", + " OutRec* \u001b[01;35m\u001b[KfirstLeft\u001b[m\u001b[K = ParseFirstLeft(outRec->FirstLeft);\n", + " \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n", + "[ 63%] \u001b[32m\u001b[1mLinking CXX shared module adaptor.cpython-37m-x86_64-linux-gnu.so\u001b[0m\n", + "[ 63%] Built target adaptor\n", + "\u001b[35m\u001b[1mScanning dependencies of target main\u001b[0m\n", + "[ 72%] \u001b[32mBuilding CXX object CMakeFiles/main.dir/main.cpp.o\u001b[0m\n", + "[ 81%] \u001b[32mBuilding CXX object CMakeFiles/main.dir/lanms.cpp.o\u001b[0m\n", + "[ 90%] \u001b[32mBuilding CXX object CMakeFiles/main.dir/include/clipper/clipper.cpp.o\u001b[0m\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:378:2:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kextra ‘\u001b[01m\u001b[K;\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wpedantic\u001b[m\u001b[K]\n", + " }\u001b[01;35m\u001b[K;\u001b[m\u001b[K\n", + " \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kvoid ClipperLib::Clipper::FixupFirstLefts3(ClipperLib::OutRec*, ClipperLib::OutRec*)\u001b[m\u001b[K’:\n", + "\u001b[01m\u001b[K/content/EAST/lanms/include/clipper/clipper.cpp:3665:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[KfirstLeft\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n", + " OutRec* \u001b[01;35m\u001b[KfirstLeft\u001b[m\u001b[K = ParseFirstLeft(outRec->FirstLeft);\n", + " \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n", + "[100%] \u001b[32m\u001b[1mLinking CXX executable main\u001b[0m\n", + "[100%] Built target main\n", + "Requirement already satisfied: Shapely in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 1)) (1.7.1)\n", + "Requirement already satisfied: Flask in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 2)) (1.1.2)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 3)) (3.2.2)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 4)) (1.4.1)\n", + "Collecting plumbum\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/6c/fc/6cdaf59a001c707333869b054daf1e0df02978d261f20f8b082afcf189c3/plumbum-1.7.0-py2.py3-none-any.whl (116kB)\n", + "\u001b[K |████████████████████████████████| 122kB 4.4MB/s \n", + "\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 6)) (1.19.5)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from -r test_requirements.txt (line 7)) (7.0.0)\n", + "Requirement already satisfied: Werkzeug>=0.15 in /usr/local/lib/python3.7/dist-packages (from Flask->-r test_requirements.txt (line 2)) (1.0.1)\n", + "Requirement already satisfied: click>=5.1 in /usr/local/lib/python3.7/dist-packages (from Flask->-r test_requirements.txt (line 2)) (7.1.2)\n", + "Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.7/dist-packages (from Flask->-r test_requirements.txt (line 2)) (2.11.3)\n", + "Requirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.7/dist-packages (from Flask->-r test_requirements.txt (line 2)) (1.1.0)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->-r test_requirements.txt (line 3)) (1.3.1)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->-r test_requirements.txt (line 3)) (0.10.0)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->-r test_requirements.txt (line 3)) (2.4.7)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->-r test_requirements.txt (line 3)) (2.8.1)\n", + "Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from Jinja2>=2.10.1->Flask->-r test_requirements.txt (line 2)) (1.1.1)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib->-r test_requirements.txt (line 3)) (1.15.0)\n", + "Installing collected packages: plumbum\n", + "Successfully installed plumbum-1.7.0\n", + "DONE\n", + "Collecting tensorflow==1.15\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/92/2b/e3af15221da9ff323521565fa3324b0d7c7c5b1d7a8ca66984c8d59cb0ce/tensorflow-1.15.0-cp37-cp37m-manylinux2010_x86_64.whl (412.3MB)\n", + "\u001b[K |████████████████████████████████| 412.3MB 30kB/s \n", + "\u001b[?25hRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.15.0)\n", + "Requirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.19.5)\n", + "Collecting gast==0.2.2\n", + " Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz\n", + "Collecting tensorflow-estimator==1.15.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)\n", + "\u001b[K |████████████████████████████████| 512kB 44.6MB/s \n", + "\u001b[?25hRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.32.0)\n", + "Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (0.8.1)\n", + "Collecting tensorboard<1.16.0,>=1.15.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)\n", + "\u001b[K |████████████████████████████████| 3.8MB 47.1MB/s \n", + "\u001b[?25hRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (0.10.0)\n", + "Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (0.2.0)\n", + "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.1.2)\n", + "Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (3.12.4)\n", + "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (0.36.2)\n", + "Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.12.1)\n", + "Collecting keras-applications>=1.0.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)\n", + "\u001b[K |████████████████████████████████| 51kB 7.1MB/s \n", + "\u001b[?25hRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (3.3.0)\n", + "Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15) (1.1.0)\n", + "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (54.0.0)\n", + "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (3.3.4)\n", + "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (1.0.1)\n", + "Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15) (2.10.0)\n", + "Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (3.7.2)\n", + "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (3.4.1)\n", + "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15) (3.7.4.3)\n", + "Building wheels for collected packages: gast\n", + " Building wheel for gast (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for gast: filename=gast-0.2.2-cp37-none-any.whl size=7540 sha256=0fa10686831077f9794200cdb177a8a2bc4079cfcf48381ce2c5f88f7c6ee9fe\n", + " Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd\n", + "Successfully built gast\n", + "\u001b[31mERROR: tensorflow-probability 0.12.1 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible.\u001b[0m\n", + "Installing collected packages: gast, tensorflow-estimator, tensorboard, keras-applications, tensorflow\n", + " Found existing installation: gast 0.3.3\n", + " Uninstalling gast-0.3.3:\n", + " Successfully uninstalled gast-0.3.3\n", + " Found existing installation: tensorflow-estimator 2.4.0\n", + " Uninstalling tensorflow-estimator-2.4.0:\n", + " Successfully uninstalled tensorflow-estimator-2.4.0\n", + " Found existing installation: tensorboard 2.4.1\n", + " Uninstalling tensorboard-2.4.1:\n", + " Successfully uninstalled tensorboard-2.4.1\n", + " Found existing installation: tensorflow 2.4.1\n", + " Uninstalling tensorflow-2.4.1:\n", + " Successfully uninstalled tensorflow-2.4.1\n", + "Successfully installed gast-0.2.2 keras-applications-1.0.8 tensorboard-1.15.0 tensorflow-1.15.0 tensorflow-estimator-1.15.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Z7Z6OLQ0SGyK", + "outputId": "b2ad5896-ecc2-4849-c690-723036e96111" + }, + "source": [ + "!cp -r models/east_icdar2015_resnet_v1_50_rbox models/east_resnet_v1_50_rbox\n", + "!python3 multigpu_train.py --checkpoint_path models/east_resnet_v1_50_rbox --restore --batch_size_per_gpu 1 --num_readers 1 --max_steps 1 --save_checkpoint_steps 1 --save_summary_steps 1" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "\u001b[1;30;43mStreaming output truncated to the last 5000 lines.\u001b[0m\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_122 (IsVariableInitialized) /device:GPU:0\n", + " cond_122/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_122/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_122/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_122/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_122/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_122 (AssignSub) /device:GPU:0\n", + " save/Assign_671 (Assign) /device:GPU:0\n", + " save/Assign_672 (Assign) /device:GPU:0\n", + " save/Assign_673 (Assign) /device:GPU:0\n", + " save/Assign_674 (Assign) /device:GPU:0\n", + " save_1/Assign_117 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.516677: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_679 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.517177: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_680 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.517709: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_123 (IsVariableInitialized) /device:GPU:0\n", + " cond_123/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_123/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_123/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_123/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_123/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_123 (AssignSub) /device:GPU:0\n", + " save/Assign_695 (Assign) /device:GPU:0\n", + " save/Assign_696 (Assign) /device:GPU:0\n", + " save/Assign_697 (Assign) /device:GPU:0\n", + " save/Assign_698 (Assign) /device:GPU:0\n", + " save_1/Assign_122 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.518226: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_124 (IsVariableInitialized) /device:GPU:0\n", + " cond_124/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_124/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_124/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_124/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_124/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_124 (AssignSub) /device:GPU:0\n", + " save/Assign_689 (Assign) /device:GPU:0\n", + " save/Assign_690 (Assign) /device:GPU:0\n", + " save/Assign_691 (Assign) /device:GPU:0\n", + " save/Assign_692 (Assign) /device:GPU:0\n", + " save_1/Assign_121 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.518758: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_125 (IsVariableInitialized) /device:GPU:0\n", + " cond_125/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_125/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_125/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_125/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_125/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_125 (AssignSub) /device:GPU:0\n", + " save/Assign_685 (Assign) /device:GPU:0\n", + " save/Assign_686 (Assign) /device:GPU:0\n", + " save/Assign_687 (Assign) /device:GPU:0\n", + " save/Assign_688 (Assign) /device:GPU:0\n", + " save_1/Assign_120 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.618844: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_693 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.619513: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_694 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.619962: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_126 (IsVariableInitialized) /device:GPU:0\n", + " cond_126/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_126/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_126/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_126/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_126/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_126 (AssignSub) /device:GPU:0\n", + " save/Assign_709 (Assign) /device:GPU:0\n", + " save/Assign_710 (Assign) /device:GPU:0\n", + " save/Assign_711 (Assign) /device:GPU:0\n", + " save/Assign_712 (Assign) /device:GPU:0\n", + " save_1/Assign_125 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.620378: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones (Fill) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_127 (IsVariableInitialized) /device:GPU:0\n", + " cond_127/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_127/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_127/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_127/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_127/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_127 (AssignSub) /device:GPU:0\n", + " save/Assign_703 (Assign) /device:GPU:0\n", + " save/Assign_704 (Assign) /device:GPU:0\n", + " save/Assign_705 (Assign) /device:GPU:0\n", + " save/Assign_706 (Assign) /device:GPU:0\n", + " save_1/Assign_124 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.720658: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros (Fill) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_128 (IsVariableInitialized) /device:GPU:0\n", + " cond_128/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_128/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_128/read/Switch_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_128/read_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_128/Merge_resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_128 (AssignSub) /device:GPU:0\n", + " save/Assign_699 (Assign) /device:GPU:0\n", + " save/Assign_700 (Assign) /device:GPU:0\n", + " save/Assign_701 (Assign) /device:GPU:0\n", + " save/Assign_702 (Assign) /device:GPU:0\n", + " save_1/Assign_123 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.723612: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros (Fill) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_707 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.724217: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones (Fill) \n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block3/unit_6/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_708 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.724838: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_129 (IsVariableInitialized) /device:GPU:0\n", + " cond_129/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_129/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_129/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_129/read_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_129/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_129 (AssignSub) /device:GPU:0\n", + " save/Assign_765 (Assign) /device:GPU:0\n", + " save/Assign_766 (Assign) /device:GPU:0\n", + " save/Assign_767 (Assign) /device:GPU:0\n", + " save/Assign_768 (Assign) /device:GPU:0\n", + " save_1/Assign_137 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.725472: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_130 (IsVariableInitialized) /device:GPU:0\n", + " cond_130/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_130/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_130/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_130/read_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_130/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_130 (AssignSub) /device:GPU:0\n", + " save/Assign_759 (Assign) /device:GPU:0\n", + " save/Assign_760 (Assign) /device:GPU:0\n", + " save/Assign_761 (Assign) /device:GPU:0\n", + " save/Assign_762 (Assign) /device:GPU:0\n", + " save_1/Assign_136 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.824788: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_131 (IsVariableInitialized) /device:GPU:0\n", + " cond_131/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_131/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_131/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_131/read_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_131/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_131 (AssignSub) /device:GPU:0\n", + " save/Assign_755 (Assign) /device:GPU:0\n", + " save/Assign_756 (Assign) /device:GPU:0\n", + " save/Assign_757 (Assign) /device:GPU:0\n", + " save/Assign_758 (Assign) /device:GPU:0\n", + " save_1/Assign_135 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.825628: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_763 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.826200: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_764 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.826680: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_132 (IsVariableInitialized) /device:GPU:0\n", + " cond_132/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_132/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_132/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_132/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_132/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_132 (AssignSub) /device:GPU:0\n", + " save/Assign_723 (Assign) /device:GPU:0\n", + " save/Assign_724 (Assign) /device:GPU:0\n", + " save/Assign_725 (Assign) /device:GPU:0\n", + " save/Assign_726 (Assign) /device:GPU:0\n", + " save_1/Assign_128 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.926932: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_133 (IsVariableInitialized) /device:GPU:0\n", + " cond_133/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_133/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_133/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_133/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_133/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_133 (AssignSub) /device:GPU:0\n", + " save/Assign_717 (Assign) /device:GPU:0\n", + " save/Assign_718 (Assign) /device:GPU:0\n", + " save/Assign_719 (Assign) /device:GPU:0\n", + " save/Assign_720 (Assign) /device:GPU:0\n", + " save_1/Assign_127 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.928005: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_134 (IsVariableInitialized) /device:GPU:0\n", + " cond_134/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_134/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_134/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_134/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_134/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_134 (AssignSub) /device:GPU:0\n", + " save/Assign_713 (Assign) /device:GPU:0\n", + " save/Assign_714 (Assign) /device:GPU:0\n", + " save/Assign_715 (Assign) /device:GPU:0\n", + " save/Assign_716 (Assign) /device:GPU:0\n", + " save_1/Assign_126 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.928531: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_721 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.928987: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_722 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:16.929808: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_135 (IsVariableInitialized) /device:GPU:0\n", + " cond_135/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_135/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_135/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_135/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_135/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_135 (AssignSub) /device:GPU:0\n", + " save/Assign_737 (Assign) /device:GPU:0\n", + " save/Assign_738 (Assign) /device:GPU:0\n", + " save/Assign_739 (Assign) /device:GPU:0\n", + " save/Assign_740 (Assign) /device:GPU:0\n", + " save_1/Assign_131 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.028839: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_136 (IsVariableInitialized) /device:GPU:0\n", + " cond_136/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_136/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_136/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_136/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_136/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_136 (AssignSub) /device:GPU:0\n", + " save/Assign_731 (Assign) /device:GPU:0\n", + " save/Assign_732 (Assign) /device:GPU:0\n", + " save/Assign_733 (Assign) /device:GPU:0\n", + " save/Assign_734 (Assign) /device:GPU:0\n", + " save_1/Assign_130 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.030852: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_137 (IsVariableInitialized) /device:GPU:0\n", + " cond_137/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_137/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_137/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_137/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_137/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_137 (AssignSub) /device:GPU:0\n", + " save/Assign_727 (Assign) /device:GPU:0\n", + " save/Assign_728 (Assign) /device:GPU:0\n", + " save/Assign_729 (Assign) /device:GPU:0\n", + " save/Assign_730 (Assign) /device:GPU:0\n", + " save_1/Assign_129 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.031284: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_735 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.031610: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_736 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.032032: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_138 (IsVariableInitialized) /device:GPU:0\n", + " cond_138/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_138/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_138/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_138/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_138/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_138 (AssignSub) /device:GPU:0\n", + " save/Assign_751 (Assign) /device:GPU:0\n", + " save/Assign_752 (Assign) /device:GPU:0\n", + " save/Assign_753 (Assign) /device:GPU:0\n", + " save/Assign_754 (Assign) /device:GPU:0\n", + " save_1/Assign_134 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.133348: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_139 (IsVariableInitialized) /device:GPU:0\n", + " cond_139/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_139/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_139/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_139/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_139/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_139 (AssignSub) /device:GPU:0\n", + " save/Assign_745 (Assign) /device:GPU:0\n", + " save/Assign_746 (Assign) /device:GPU:0\n", + " save/Assign_747 (Assign) /device:GPU:0\n", + " save/Assign_748 (Assign) /device:GPU:0\n", + " save_1/Assign_133 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.134443: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_140 (IsVariableInitialized) /device:GPU:0\n", + " cond_140/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_140/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_140/read/Switch_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_140/read_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_140/Merge_resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_140 (AssignSub) /device:GPU:0\n", + " save/Assign_741 (Assign) /device:GPU:0\n", + " save/Assign_742 (Assign) /device:GPU:0\n", + " save/Assign_743 (Assign) /device:GPU:0\n", + " save/Assign_744 (Assign) /device:GPU:0\n", + " save_1/Assign_132 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.134987: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_749 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.135410: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_1/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_750 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.234354: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_141 (IsVariableInitialized) /device:GPU:0\n", + " cond_141/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_141/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_141/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_141/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_141/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_141 (AssignSub) /device:GPU:0\n", + " save/Assign_779 (Assign) /device:GPU:0\n", + " save/Assign_780 (Assign) /device:GPU:0\n", + " save/Assign_781 (Assign) /device:GPU:0\n", + " save/Assign_782 (Assign) /device:GPU:0\n", + " save_1/Assign_140 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.235109: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_142 (IsVariableInitialized) /device:GPU:0\n", + " cond_142/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_142/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_142/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_142/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_142/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_142 (AssignSub) /device:GPU:0\n", + " save/Assign_773 (Assign) /device:GPU:0\n", + " save/Assign_774 (Assign) /device:GPU:0\n", + " save/Assign_775 (Assign) /device:GPU:0\n", + " save/Assign_776 (Assign) /device:GPU:0\n", + " save_1/Assign_139 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.235483: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_143 (IsVariableInitialized) /device:GPU:0\n", + " cond_143/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_143/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_143/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_143/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_143/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_143 (AssignSub) /device:GPU:0\n", + " save/Assign_769 (Assign) /device:GPU:0\n", + " save/Assign_770 (Assign) /device:GPU:0\n", + " save/Assign_771 (Assign) /device:GPU:0\n", + " save/Assign_772 (Assign) /device:GPU:0\n", + " save_1/Assign_138 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.335464: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_777 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.336105: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_778 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.336568: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_144 (IsVariableInitialized) /device:GPU:0\n", + " cond_144/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_144/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_144/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_144/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_144/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_144 (AssignSub) /device:GPU:0\n", + " save/Assign_793 (Assign) /device:GPU:0\n", + " save/Assign_794 (Assign) /device:GPU:0\n", + " save/Assign_795 (Assign) /device:GPU:0\n", + " save/Assign_796 (Assign) /device:GPU:0\n", + " save_1/Assign_143 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.339909: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_145 (IsVariableInitialized) /device:GPU:0\n", + " cond_145/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_145/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_145/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_145/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_145/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_145 (AssignSub) /device:GPU:0\n", + " save/Assign_787 (Assign) /device:GPU:0\n", + " save/Assign_788 (Assign) /device:GPU:0\n", + " save/Assign_789 (Assign) /device:GPU:0\n", + " save/Assign_790 (Assign) /device:GPU:0\n", + " save_1/Assign_142 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.436380: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_146 (IsVariableInitialized) /device:GPU:0\n", + " cond_146/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_146/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_146/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_146/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_146/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_146 (AssignSub) /device:GPU:0\n", + " save/Assign_783 (Assign) /device:GPU:0\n", + " save/Assign_784 (Assign) /device:GPU:0\n", + " save/Assign_785 (Assign) /device:GPU:0\n", + " save/Assign_786 (Assign) /device:GPU:0\n", + " save_1/Assign_141 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.437379: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_791 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.437774: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_792 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.438233: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_147 (IsVariableInitialized) /device:GPU:0\n", + " cond_147/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_147/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_147/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_147/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_147/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_147 (AssignSub) /device:GPU:0\n", + " save/Assign_807 (Assign) /device:GPU:0\n", + " save/Assign_808 (Assign) /device:GPU:0\n", + " save/Assign_809 (Assign) /device:GPU:0\n", + " save/Assign_810 (Assign) /device:GPU:0\n", + " save_1/Assign_146 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.438686: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_148 (IsVariableInitialized) /device:GPU:0\n", + " cond_148/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_148/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_148/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_148/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_148/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_148 (AssignSub) /device:GPU:0\n", + " save/Assign_801 (Assign) /device:GPU:0\n", + " save/Assign_802 (Assign) /device:GPU:0\n", + " save/Assign_803 (Assign) /device:GPU:0\n", + " save/Assign_804 (Assign) /device:GPU:0\n", + " save_1/Assign_145 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.439103: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_149 (IsVariableInitialized) /device:GPU:0\n", + " cond_149/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_149/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_149/read/Switch_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_149/read_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_149/Merge_resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_149 (AssignSub) /device:GPU:0\n", + " save/Assign_797 (Assign) /device:GPU:0\n", + " save/Assign_798 (Assign) /device:GPU:0\n", + " save/Assign_799 (Assign) /device:GPU:0\n", + " save/Assign_800 (Assign) /device:GPU:0\n", + " save_1/Assign_144 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.539202: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_805 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.539978: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_2/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_806 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.540445: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_150 (IsVariableInitialized) /device:GPU:0\n", + " cond_150/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_150/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_150/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_150/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_150/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_150 (AssignSub) /device:GPU:0\n", + " save/Assign_821 (Assign) /device:GPU:0\n", + " save/Assign_822 (Assign) /device:GPU:0\n", + " save/Assign_823 (Assign) /device:GPU:0\n", + " save/Assign_824 (Assign) /device:GPU:0\n", + " save_1/Assign_149 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.540861: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_151 (IsVariableInitialized) /device:GPU:0\n", + " cond_151/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_151/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_151/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_151/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_151/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_151 (AssignSub) /device:GPU:0\n", + " save/Assign_815 (Assign) /device:GPU:0\n", + " save/Assign_816 (Assign) /device:GPU:0\n", + " save/Assign_817 (Assign) /device:GPU:0\n", + " save/Assign_818 (Assign) /device:GPU:0\n", + " save_1/Assign_148 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.641087: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_152 (IsVariableInitialized) /device:GPU:0\n", + " cond_152/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_152/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_152/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_152/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_152/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_152 (AssignSub) /device:GPU:0\n", + " save/Assign_811 (Assign) /device:GPU:0\n", + " save/Assign_812 (Assign) /device:GPU:0\n", + " save/Assign_813 (Assign) /device:GPU:0\n", + " save/Assign_814 (Assign) /device:GPU:0\n", + " save_1/Assign_147 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.642148: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_819 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.642680: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv1/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_820 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.650812: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_153 (IsVariableInitialized) /device:GPU:0\n", + " cond_153/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_153/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_153/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_153/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_153/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_153 (AssignSub) /device:GPU:0\n", + " save/Assign_835 (Assign) /device:GPU:0\n", + " save/Assign_836 (Assign) /device:GPU:0\n", + " save/Assign_837 (Assign) /device:GPU:0\n", + " save/Assign_838 (Assign) /device:GPU:0\n", + " save_1/Assign_152 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.743005: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_154 (IsVariableInitialized) /device:GPU:0\n", + " cond_154/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_154/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_154/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_154/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_154/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_154 (AssignSub) /device:GPU:0\n", + " save/Assign_829 (Assign) /device:GPU:0\n", + " save/Assign_830 (Assign) /device:GPU:0\n", + " save/Assign_831 (Assign) /device:GPU:0\n", + " save/Assign_832 (Assign) /device:GPU:0\n", + " save_1/Assign_151 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.743790: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_155 (IsVariableInitialized) /device:GPU:0\n", + " cond_155/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_155/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_155/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_155/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_155/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_155 (AssignSub) /device:GPU:0\n", + " save/Assign_825 (Assign) /device:GPU:0\n", + " save/Assign_826 (Assign) /device:GPU:0\n", + " save/Assign_827 (Assign) /device:GPU:0\n", + " save/Assign_828 (Assign) /device:GPU:0\n", + " save_1/Assign_150 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.744119: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_833 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.744424: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv2/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_834 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.744831: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "TruncatedNormal: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal/shape (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mean (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal/stddev (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal/TruncatedNormal (TruncatedNormal) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal/mul (Mul) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Initializer/truncated_normal (Add) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_156 (IsVariableInitialized) /device:GPU:0\n", + " cond_156/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_156/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_156/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_156/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_156/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_156 (AssignSub) /device:GPU:0\n", + " save/Assign_849 (Assign) /device:GPU:0\n", + " save/Assign_850 (Assign) /device:GPU:0\n", + " save/Assign_851 (Assign) /device:GPU:0\n", + " save/Assign_852 (Assign) /device:GPU:0\n", + " save_1/Assign_155 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.845423: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_157 (IsVariableInitialized) /device:GPU:0\n", + " cond_157/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_157/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_157/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_157/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_157/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_157 (AssignSub) /device:GPU:0\n", + " save/Assign_843 (Assign) /device:GPU:0\n", + " save/Assign_844 (Assign) /device:GPU:0\n", + " save/Assign_845 (Assign) /device:GPU:0\n", + " save/Assign_846 (Assign) /device:GPU:0\n", + " save_1/Assign_154 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.851676: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_158 (IsVariableInitialized) /device:GPU:0\n", + " cond_158/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_158/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_158/read/Switch_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_158/read_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_158/Merge_resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_158 (AssignSub) /device:GPU:0\n", + " save/Assign_839 (Assign) /device:GPU:0\n", + " save/Assign_840 (Assign) /device:GPU:0\n", + " save/Assign_841 (Assign) /device:GPU:0\n", + " save/Assign_842 (Assign) /device:GPU:0\n", + " save_1/Assign_153 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.852691: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros/Const (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean/Initializer/zeros (Fill) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_847 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.853375: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "Assign: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/shape_as_tensor (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones/Const (Const) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance/Initializer/ones (Fill) \n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_848 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.947160: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_159 (IsVariableInitialized) /device:GPU:0\n", + " cond_159/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_159/Switch_1 (Switch) \n", + " feature_fusion/Conv/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_159/read/Switch_feature_fusion/Conv/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_159/read_feature_fusion/Conv/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_159/Merge_feature_fusion/Conv/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_159 (AssignSub) /device:GPU:0\n", + " save/Assign_12 (Assign) /device:GPU:0\n", + " save/Assign_13 (Assign) /device:GPU:0\n", + " save/Assign_14 (Assign) /device:GPU:0\n", + " save/Assign_15 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.947929: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_160 (IsVariableInitialized) /device:GPU:0\n", + " cond_160/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_160/Switch_1 (Switch) \n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_160/read/Switch_feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_160/read_feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_160/Merge_feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_160 (AssignSub) /device:GPU:0\n", + " save/Assign_6 (Assign) /device:GPU:0\n", + " save/Assign_7 (Assign) /device:GPU:0\n", + " save/Assign_8 (Assign) /device:GPU:0\n", + " save/Assign_9 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.948331: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " beta1_power/initial_value (Const) /device:GPU:0\n", + " beta1_power (VariableV2) /device:GPU:0\n", + " beta1_power/Assign (Assign) /device:GPU:0\n", + " beta1_power/read (Identity) /device:GPU:0\n", + " beta2_power/initial_value (Const) /device:GPU:0\n", + " beta2_power (VariableV2) /device:GPU:0\n", + " beta2_power/Assign (Assign) /device:GPU:0\n", + " beta2_power/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " Adam/mul (Mul) /device:GPU:0\n", + " Adam/Assign (Assign) /device:GPU:0\n", + " Adam/mul_1 (Mul) /device:GPU:0\n", + " Adam/Assign_1 (Assign) /device:GPU:0\n", + " IsVariableInitialized_161 (IsVariableInitialized) /device:GPU:0\n", + " cond_161/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_161/Switch_1 (Switch) \n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_161/read/Switch_feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_161/read_feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_161/Merge_feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_161 (AssignSub) /device:GPU:0\n", + " save/Assign (Assign) /device:GPU:0\n", + " save/Assign_1 (Assign) /device:GPU:0\n", + " save/Assign_2 (Assign) /device:GPU:0\n", + " save/Assign_3 (Assign) /device:GPU:0\n", + " save/Assign_4 (Assign) /device:GPU:0\n", + " save/Assign_5 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:17.948616: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_10 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.049258: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_11 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.051189: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_1/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_1/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_1/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_162 (IsVariableInitialized) /device:GPU:0\n", + " cond_162/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_162/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_162/read/Switch_feature_fusion/Conv_1/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_162/read_feature_fusion/Conv_1/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_162/Merge_feature_fusion/Conv_1/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_162 (AssignSub) /device:GPU:0\n", + " save/Assign_26 (Assign) /device:GPU:0\n", + " save/Assign_27 (Assign) /device:GPU:0\n", + " save/Assign_28 (Assign) /device:GPU:0\n", + " save/Assign_29 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.051962: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_1/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_1/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_163 (IsVariableInitialized) /device:GPU:0\n", + " cond_163/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_163/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_163/read/Switch_feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_163/read_feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_163/Merge_feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_163 (AssignSub) /device:GPU:0\n", + " save/Assign_20 (Assign) /device:GPU:0\n", + " save/Assign_21 (Assign) /device:GPU:0\n", + " save/Assign_22 (Assign) /device:GPU:0\n", + " save/Assign_23 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.052584: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_1/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_1/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_1/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_164 (IsVariableInitialized) /device:GPU:0\n", + " cond_164/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_164/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_164/read/Switch_feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_164/read_feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_164/Merge_feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_164 (AssignSub) /device:GPU:0\n", + " save/Assign_16 (Assign) /device:GPU:0\n", + " save/Assign_17 (Assign) /device:GPU:0\n", + " save/Assign_18 (Assign) /device:GPU:0\n", + " save/Assign_19 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.053093: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_1/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_1/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_24 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.151254: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_1/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_1/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_1/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_1/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_25 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.152039: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_2/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_2/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_2/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_165 (IsVariableInitialized) /device:GPU:0\n", + " cond_165/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_165/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_165/read/Switch_feature_fusion/Conv_2/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_165/read_feature_fusion/Conv_2/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_165/Merge_feature_fusion/Conv_2/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_165 (AssignSub) /device:GPU:0\n", + " save/Assign_40 (Assign) /device:GPU:0\n", + " save/Assign_41 (Assign) /device:GPU:0\n", + " save/Assign_42 (Assign) /device:GPU:0\n", + " save/Assign_43 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.152530: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_2/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_2/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_166 (IsVariableInitialized) /device:GPU:0\n", + " cond_166/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_166/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_166/read/Switch_feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_166/read_feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_166/Merge_feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_166 (AssignSub) /device:GPU:0\n", + " save/Assign_34 (Assign) /device:GPU:0\n", + " save/Assign_35 (Assign) /device:GPU:0\n", + " save/Assign_36 (Assign) /device:GPU:0\n", + " save/Assign_37 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.152907: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_2/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_2/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_2/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_167 (IsVariableInitialized) /device:GPU:0\n", + " cond_167/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_167/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_167/read/Switch_feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_167/read_feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_167/Merge_feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_167 (AssignSub) /device:GPU:0\n", + " save/Assign_30 (Assign) /device:GPU:0\n", + " save/Assign_31 (Assign) /device:GPU:0\n", + " save/Assign_32 (Assign) /device:GPU:0\n", + " save/Assign_33 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.153212: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_2/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_2/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_38 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.253579: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_2/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_2/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_2/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_2/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_39 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.254790: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_3/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_3/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_3/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_168 (IsVariableInitialized) /device:GPU:0\n", + " cond_168/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_168/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_168/read/Switch_feature_fusion/Conv_3/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_168/read_feature_fusion/Conv_3/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_168/Merge_feature_fusion/Conv_3/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_168 (AssignSub) /device:GPU:0\n", + " save/Assign_54 (Assign) /device:GPU:0\n", + " save/Assign_55 (Assign) /device:GPU:0\n", + " save/Assign_56 (Assign) /device:GPU:0\n", + " save/Assign_57 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.255401: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_3/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_3/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_169 (IsVariableInitialized) /device:GPU:0\n", + " cond_169/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_169/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_169/read/Switch_feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_169/read_feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_169/Merge_feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_169 (AssignSub) /device:GPU:0\n", + " save/Assign_48 (Assign) /device:GPU:0\n", + " save/Assign_49 (Assign) /device:GPU:0\n", + " save/Assign_50 (Assign) /device:GPU:0\n", + " save/Assign_51 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.255931: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_3/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_3/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_3/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_170 (IsVariableInitialized) /device:GPU:0\n", + " cond_170/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_170/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_170/read/Switch_feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_170/read_feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_170/Merge_feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_170 (AssignSub) /device:GPU:0\n", + " save/Assign_44 (Assign) /device:GPU:0\n", + " save/Assign_45 (Assign) /device:GPU:0\n", + " save/Assign_46 (Assign) /device:GPU:0\n", + " save/Assign_47 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.256369: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_3/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_3/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_52 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.355821: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_3/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_3/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_3/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_3/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_53 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.356761: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_4/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_4/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_4/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_171 (IsVariableInitialized) /device:GPU:0\n", + " cond_171/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_171/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_171/read/Switch_feature_fusion/Conv_4/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_171/read_feature_fusion/Conv_4/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_171/Merge_feature_fusion/Conv_4/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_171 (AssignSub) /device:GPU:0\n", + " save/Assign_68 (Assign) /device:GPU:0\n", + " save/Assign_69 (Assign) /device:GPU:0\n", + " save/Assign_70 (Assign) /device:GPU:0\n", + " save/Assign_71 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.357321: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_4/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_4/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_172 (IsVariableInitialized) /device:GPU:0\n", + " cond_172/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_172/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_172/read/Switch_feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_172/read_feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_172/Merge_feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_172 (AssignSub) /device:GPU:0\n", + " save/Assign_62 (Assign) /device:GPU:0\n", + " save/Assign_63 (Assign) /device:GPU:0\n", + " save/Assign_64 (Assign) /device:GPU:0\n", + " save/Assign_65 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.357781: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_4/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_4/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_4/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_173 (IsVariableInitialized) /device:GPU:0\n", + " cond_173/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_173/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_173/read/Switch_feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_173/read_feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_173/Merge_feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_173 (AssignSub) /device:GPU:0\n", + " save/Assign_58 (Assign) /device:GPU:0\n", + " save/Assign_59 (Assign) /device:GPU:0\n", + " save/Assign_60 (Assign) /device:GPU:0\n", + " save/Assign_61 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.358130: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_4/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_4/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_66 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.459438: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_4/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_4/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_4/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_4/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_67 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.460770: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_5/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_5/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_5/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_174 (IsVariableInitialized) /device:GPU:0\n", + " cond_174/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_174/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_174/read/Switch_feature_fusion/Conv_5/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_174/read_feature_fusion/Conv_5/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_174/Merge_feature_fusion/Conv_5/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_174 (AssignSub) /device:GPU:0\n", + " save/Assign_82 (Assign) /device:GPU:0\n", + " save/Assign_83 (Assign) /device:GPU:0\n", + " save/Assign_84 (Assign) /device:GPU:0\n", + " save/Assign_85 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.461572: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_5/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_5/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_175 (IsVariableInitialized) /device:GPU:0\n", + " cond_175/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_175/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_175/read/Switch_feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_175/read_feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_175/Merge_feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_175 (AssignSub) /device:GPU:0\n", + " save/Assign_76 (Assign) /device:GPU:0\n", + " save/Assign_77 (Assign) /device:GPU:0\n", + " save/Assign_78 (Assign) /device:GPU:0\n", + " save/Assign_79 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.462139: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_5/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_5/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_5/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_176 (IsVariableInitialized) /device:GPU:0\n", + " cond_176/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_176/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_176/read/Switch_feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_176/read_feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_176/Merge_feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_176 (AssignSub) /device:GPU:0\n", + " save/Assign_72 (Assign) /device:GPU:0\n", + " save/Assign_73 (Assign) /device:GPU:0\n", + " save/Assign_74 (Assign) /device:GPU:0\n", + " save/Assign_75 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.560254: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_5/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_5/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_80 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.560963: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_5/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_5/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_5/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_5/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_81 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.561423: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Fill: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_6/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_6/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1/Initializer/zeros/shape_as_tensor (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1/Initializer/zeros/Const (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1/Initializer/zeros (Fill) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_6/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_177 (IsVariableInitialized) /device:GPU:0\n", + " cond_177/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_177/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_177/read/Switch_feature_fusion/Conv_6/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_177/read_feature_fusion/Conv_6/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_177/Merge_feature_fusion/Conv_6/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_177 (AssignSub) /device:GPU:0\n", + " save/Assign_96 (Assign) /device:GPU:0\n", + " save/Assign_97 (Assign) /device:GPU:0\n", + " save/Assign_98 (Assign) /device:GPU:0\n", + " save/Assign_99 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.561868: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Initializer/ones (Const) \n", + " feature_fusion/Conv_6/BatchNorm/gamma (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_6/BatchNorm/gamma/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_178 (IsVariableInitialized) /device:GPU:0\n", + " cond_178/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_178/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_178/read/Switch_feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_178/read_feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_178/Merge_feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/gamma/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_178 (AssignSub) /device:GPU:0\n", + " save/Assign_90 (Assign) /device:GPU:0\n", + " save/Assign_91 (Assign) /device:GPU:0\n", + " save/Assign_92 (Assign) /device:GPU:0\n", + " save/Assign_93 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.562269: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_6/BatchNorm/beta/Initializer/zeros (Const) \n", + " feature_fusion/Conv_6/BatchNorm/beta (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_6/BatchNorm/beta/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_179 (IsVariableInitialized) /device:GPU:0\n", + " cond_179/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_179/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_179/read/Switch_feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_179/read_feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_179/Merge_feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/beta/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_179 (AssignSub) /device:GPU:0\n", + " save/Assign_86 (Assign) /device:GPU:0\n", + " save/Assign_87 (Assign) /device:GPU:0\n", + " save/Assign_88 (Assign) /device:GPU:0\n", + " save/Assign_89 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.663181: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_6/BatchNorm/moving_mean/Initializer/zeros (Const) \n", + " feature_fusion/Conv_6/BatchNorm/moving_mean (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/moving_mean/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/moving_mean/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg (AssignSub) /device:GPU:0\n", + " save/Assign_94 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.667501: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Assign: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_6/BatchNorm/moving_variance/Initializer/ones (Const) \n", + " feature_fusion/Conv_6/BatchNorm/moving_variance (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/moving_variance/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_6/BatchNorm/moving_variance/read (Identity) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg_1/sub/x (Const) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg_1/sub (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg_1/sub_1 (Sub) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg_1/mul (Mul) /device:GPU:0\n", + " model_0/feature_fusion/Conv_6/BatchNorm/AssignMovingAvg_1 (AssignSub) /device:GPU:0\n", + " save/Assign_95 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.668423: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_7/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_7/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_7/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_180 (IsVariableInitialized) /device:GPU:0\n", + " cond_180/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_180/Switch_1 (Switch) \n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_180/read/Switch_feature_fusion/Conv_7/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_180/read_feature_fusion/Conv_7/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_180/Merge_feature_fusion/Conv_7/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_180 (AssignSub) /device:GPU:0\n", + " save/Assign_104 (Assign) /device:GPU:0\n", + " save/Assign_105 (Assign) /device:GPU:0\n", + " save/Assign_106 (Assign) /device:GPU:0\n", + " save/Assign_107 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.669016: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_7/biases/Initializer/zeros (Const) \n", + " feature_fusion/Conv_7/biases (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_7/biases/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_181 (IsVariableInitialized) /device:GPU:0\n", + " cond_181/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_181/Switch_1 (Switch) \n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_181/read/Switch_feature_fusion/Conv_7/biases/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_181/read_feature_fusion/Conv_7/biases/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_181/Merge_feature_fusion/Conv_7/biases/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_7/biases/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_181 (AssignSub) /device:GPU:0\n", + " save/Assign_100 (Assign) /device:GPU:0\n", + " save/Assign_101 (Assign) /device:GPU:0\n", + " save/Assign_102 (Assign) /device:GPU:0\n", + " save/Assign_103 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.669554: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_8/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_8/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_8/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_182 (IsVariableInitialized) /device:GPU:0\n", + " cond_182/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_182/Switch_1 (Switch) \n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_182/read/Switch_feature_fusion/Conv_8/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_182/read_feature_fusion/Conv_8/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_182/Merge_feature_fusion/Conv_8/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_182 (AssignSub) /device:GPU:0\n", + " save/Assign_112 (Assign) /device:GPU:0\n", + " save/Assign_113 (Assign) /device:GPU:0\n", + " save/Assign_114 (Assign) /device:GPU:0\n", + " save/Assign_115 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.670106: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_8/biases/Initializer/zeros (Const) \n", + " feature_fusion/Conv_8/biases (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_8/biases/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_183 (IsVariableInitialized) /device:GPU:0\n", + " cond_183/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_183/Switch_1 (Switch) \n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_183/read/Switch_feature_fusion/Conv_8/biases/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_183/read_feature_fusion/Conv_8/biases/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_183/Merge_feature_fusion/Conv_8/biases/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_8/biases/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_183 (AssignSub) /device:GPU:0\n", + " save/Assign_108 (Assign) /device:GPU:0\n", + " save/Assign_109 (Assign) /device:GPU:0\n", + " save/Assign_110 (Assign) /device:GPU:0\n", + " save/Assign_111 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.769012: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Assign: CPU \n", + "IsVariableInitialized: CPU \n", + "ApplyAdam: CPU \n", + "RandomUniform: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "Mul: CPU XLA_CPU \n", + "Sub: CPU XLA_CPU \n", + "Add: CPU XLA_CPU \n", + "RefSwitch: CPU \n", + "Identity: CPU XLA_CPU \n", + "VariableV2: CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/shape (Const) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/min (Const) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/max (Const) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/RandomUniform (RandomUniform) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/sub (Sub) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform/mul (Mul) \n", + " feature_fusion/Conv_9/weights/Initializer/random_uniform (Add) \n", + " feature_fusion/Conv_9/weights (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_9/weights/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_184 (IsVariableInitialized) /device:GPU:0\n", + " cond_184/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_184/Switch_1 (Switch) \n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_184/read/Switch_feature_fusion/Conv_9/weights/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_184/read_feature_fusion/Conv_9/weights/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_184/Merge_feature_fusion/Conv_9/weights/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/weights/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_184 (AssignSub) /device:GPU:0\n", + " save/Assign_120 (Assign) /device:GPU:0\n", + " save/Assign_121 (Assign) /device:GPU:0\n", + " save/Assign_122 (Assign) /device:GPU:0\n", + " save/Assign_123 (Assign) /device:GPU:0\n", + "\n", + "2021-03-17 16:04:18.769747: W tensorflow/core/common_runtime/colocation_graph.cc:983] Failed to place the graph without changing the devices of some resources. Some of the operations (that had to be colocated with resource generating operations) are not supported on the resources' devices. Current candidate devices are [\n", + " /job:localhost/replica:0/task:0/device:CPU:0].\n", + "See below for details of this colocation group:\n", + "Colocation Debug Info:\n", + "Colocation group had the following types and supported devices: \n", + "Root Member(assigned_device_name_index_=-1 requested_device_name_='/device:GPU:0' assigned_device_name_='' resource_device_name_='/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]\n", + "AssignSub: CPU \n", + "Merge: CPU XLA_CPU \n", + "Switch: CPU XLA_CPU \n", + "Const: CPU XLA_CPU \n", + "ApplyAdam: CPU \n", + "IsVariableInitialized: CPU \n", + "Assign: CPU \n", + "RefSwitch: CPU \n", + "VariableV2: CPU \n", + "Identity: CPU XLA_CPU \n", + "\n", + "Colocation members, user-requested devices, and framework assigned devices, if any:\n", + " feature_fusion/Conv_9/biases/Initializer/zeros (Const) \n", + " feature_fusion/Conv_9/biases (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam_1/Initializer/zeros (Const) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam_1 (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam_1/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/Adam_1/read (Identity) /device:GPU:0\n", + " Adam/update_feature_fusion/Conv_9/biases/ApplyAdam (ApplyAdam) /device:GPU:0\n", + " IsVariableInitialized_185 (IsVariableInitialized) /device:GPU:0\n", + " cond_185/read/Switch (RefSwitch) /device:GPU:0\n", + " cond_185/Switch_1 (Switch) \n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage (VariableV2) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/IsVariableInitialized (IsVariableInitialized) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/Switch (Switch) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/switch_t (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/switch_f (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/pred_id (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/read/Switch (RefSwitch) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/read (Identity) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/Switch_1 (Switch) \n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/cond/Merge (Merge) /device:GPU:0\n", + " cond_185/read/Switch_feature_fusion/Conv_9/biases/ExponentialMovingAverage (Switch) /device:GPU:0\n", + " cond_185/read_feature_fusion/Conv_9/biases/ExponentialMovingAverage (Identity) /device:GPU:0\n", + " cond_185/Merge_feature_fusion/Conv_9/biases/ExponentialMovingAverage (Merge) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/Assign (Assign) /device:GPU:0\n", + " feature_fusion/Conv_9/biases/ExponentialMovingAverage/read (Identity) /device:GPU:0\n", + " ExponentialMovingAverage/AssignMovingAvg_185 (AssignSub) /device:GPU:0\n", + " save/Assign_116 (Assign) /device:GPU:0\n", + " save/Assign_117 (Assign) /device:GPU:0\n", + " save/Assign_118 (Assign) /device:GPU:0\n", + " save/Assign_119 (Assign) /device:GPU:0\n", + "\n", + "Generator use 10 batches for buffering, this may take a while, you can tune this yourself.\n", + "2 training images in ./training_samples/\n", + "Step 000000, model loss 0.0031, total loss 0.0048, 0.85 seconds/step, 1.18 examples/second\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "5uBVeo6612FC", + "outputId": "2cead132-cde0-4a09-ab3a-41bfba3d41f8" + }, + "source": [ + "# modified test_images.py to comment out cv2.imshow\n", + "!python3 test_images.py --checkpoint_path models/east_resnet_v1_50_rbox/" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "text": [ + "path to images: ./training_samples\n", + "Wrote out ./outputs/img_2.jpg\n", + "Detecting text boxes for img_1.jpg\n", + "Loading model from models/east_resnet_v1_50_rbox/\n", + "WARNING:tensorflow:\n", + "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + " * https://github.com/tensorflow/io (for I/O related ops)\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/run_demo_server.py:49: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/run_demo_server.py:50: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/nets/resnet_utils.py:236: The name tf.GraphKeys is deprecated. Please use tf.compat.v1.GraphKeys instead.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/nets/resnet_v1.py:181: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n", + "\n", + "WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow_core/contrib/layers/python/layers/layers.py:1057: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Please use `layer.__call__` method instead.\n", + "resnet_v1_50/block1 (?, ?, ?, 256)\n", + "resnet_v1_50/block2 (?, ?, ?, 512)\n", + "resnet_v1_50/block3 (?, ?, ?, 1024)\n", + "resnet_v1_50/block4 (?, ?, ?, 2048)\n", + "Shape of f_0 (?, ?, ?, 2048)\n", + "Shape of f_1 (?, ?, ?, 512)\n", + "Shape of f_2 (?, ?, ?, 256)\n", + "Shape of f_3 (?, ?, ?, 64)\n", + "WARNING:tensorflow:From /content/EAST/model.py:12: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", + "\n", + "Shape of h_0 (?, ?, ?, 2048), g_0 (?, ?, ?, 2048)\n", + "Shape of h_1 (?, ?, ?, 128), g_1 (?, ?, ?, 128)\n", + "Shape of h_2 (?, ?, ?, 64), g_2 (?, ?, ?, 64)\n", + "Shape of h_3 (?, ?, ?, 32), g_3 (?, ?, ?, 32)\n", + "WARNING:tensorflow:From /content/EAST/run_demo_server.py:55: The name tf.train.Saver is deprecated. Please use tf.compat.v1.train.Saver instead.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/run_demo_server.py:57: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", + "\n", + "WARNING:tensorflow:From /content/EAST/run_demo_server.py:57: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", + "\n", + "2021-03-17 16:43:50.837949: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n", + "2021-03-17 16:43:50.843114: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2299995000 Hz\n", + "2021-03-17 16:43:50.843342: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x56090a0f72c0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n", + "2021-03-17 16:43:50.843375: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n", + "2021-03-17 16:43:50.845361: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n", + "2021-03-17 16:43:50.856426: E tensorflow/stream_executor/cuda/cuda_driver.cc:318] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n", + "2021-03-17 16:43:50.856461: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (72a38a5066d8): /proc/driver/nvidia/version does not exist\n", + "233 text boxes before nms\n", + "Process took 2.82 seconds\n", + "Wrote out ./outputs/img_1.jpg\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 517 + }, + "id": "cW1zYxepErPd", + "outputId": "4aa5f1a1-ae7a-494b-d62d-0a2ffc776127" + }, + "source": [ + "from IPython.display import Image\n", + "Image('./outputs/img_1.jpg')" + ], + "execution_count": 9, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD6M+2QQ/6RAPMjlmqSb/XfjVfz76aX/T54v+2X+qqx5P72vJqVa2LpH3/X3y5Z+R5XNSQzfuqrww1Y8/2pez9iMuab/q5PpUf/AC1qOG88mLNRw3om/wCen/f6u39zVMyxPeQQ9aks9SgP/HxPVOaGq803lVn9Xq0vjL9qank/u/OzVzTf+PST6Vh+d+6qxZzDyq2Gbn2zzrXyKjmhn61T06XyZa0Ly886KsqQFeGbyqJryCaWqc0372iGGsqgGpDeXBixBVj7b+66fvPpVP8A5ZVJB3o9rW/h0xUkaFneTweZ5B/1v7vrReT+V5dR0eT5tZe1rUv+XZ2+zJIJ/OlkuP8AlpLUnke9Rww/uqkrm/fB7NhUfke9WP8AllVfzZvWtfZGdUjmhooorIPZh/qqPP8Aajzv3VU5rz97XUqhmaFFV7P99zj95VjyJ/8AlvWP7k0Cyhg83z7irk00HlVTg70T9q1qVDP2hHDD+9qxB3qn50EMvn+fS/bLf0P51zU/bGhrwzUTTVTs5vOqx5sPrWtOrWpB7IkqP/llUf2z3/SpPP8AatfaB7MP+WVR0ed5tSeR70U6hn7NBP2qSo5v+mH4UQ/vpc1nVAuQzebVe8Pky+RUfneR/qKj/wBbWIHmn7R//ImT/Wvh7XpvNv5K+5P2ipvsfgi4xBFL/wBda+D/ABJN/wATS4n/AOetedialJHwHGX/AC7Kc/aq9R+f7UV5HtPan51+5R9xfAG9vPD37GdrqujXBhubXQtSuLeXaGKSiS4cNhgQcNzggivHPhH8bP2xPjL4o/4Rzwn468zym3Xkv9j2f7qP1/1Neu/Brj9iBf8AsV9T/ncV6J+xbN8MtN+H1nB4A0q2to5YfMml/wCWvm1+48T47NMHl2T08JiJ008PG6hOUU7Rhuk1c/lbwK4M4Y4x4v4qnm+Bo4iUcdUs6tKFRq9So2k5xdr9bHp/w3+G62/hm0Pja4m1C7MeZp3xG8p9ljCqPyrqYvhv4Lfpo2763En/AMVUmseJND8K6Nca5rl99ms7WHzJpa5f4Y/H3wd8VLX7d4Un8yPzvL/7a08NnGa06XL9aqN+c5P9T+sKfhL4VQp8jyHBN/8AYLQ/+Vm74q8AeDdK8O39/aaPslhtpHib7RIcEdDgtivhL4x/tF/GDwrdJBoHi/7PnzM/8S+3fpJgfejPavvzxVN5/hLVB/05y1+Yf7SE3+nyf9Mpq83Oc8z+nhb0cTUT/wAcv8zyc38KfC6lSvDIsGvTC0F/7YQ/8NlftE+Zt/4WF/5SbT/41UkX7Yn7RLR7j8Qsn1/si0/+NV5LD+9qxD+6r85/1n4u9tyfX63/AINn/wDJHyf/ABDLw3/6EuE/8JqP/wAgfY/7FvjD43fGK5vtf+IHjFrrTE/d2cAsLeLfL7mONT+tfX+ifDfwfNYJNf6RuY9T9okH8mr5R/4Jp/vvhpcT/wDUS8uvsiGb7Ho8s/8Azyh8yvucqz/O40ryxlWXrUm/zZ9jlHhF4X1MLzTyLBv1wtD/AOQPEv2j/i/8Jvgppsum6R4bjvtXP3vNu5RFb/XDc18rap+3b8Qmvsaf4W0+Nf8AniUc/qTmsX9pzxVfaxr1x58//H1+8mrx+vLzbjfiKOJth8RUt/jl/meRmPh94XxrezpZBgv/AAlof/Kz6m+Ff7dNrqesx6T8QvB9jGJOlxBPII/zDV9feC7b4TeMrBJ7LRB5h6j7VL/8XX5LzTV9mf8ABP34tX2saDJ4cvp/NksJvL83/plXXkvGvEGIrezq4if/AIHL/M0yrw68MK9b2dXIMF/4S0P/AJWfRvxw+GUekeCm8QeBJpbKe3/4+BgSD/x/dXi2pz6nrnwA1iTX7w3FxcaBqCzS7FQsNkoHCgAcYHTtX1heQwax4cksb797HLZy18weL7F9K+E2vWJcu0OkX43Hqfkkr9H4ZxWPx2LxSr1pzg6M/dlKTV7x1s3a9r6+Z+D/AEouCODeFuH8nxeTZbh8LUljqMXKjRp05NclR2bhGLauk7XtdJ9D8+v+WtSef7UTQ/vajr+YKmG+q1v3Z/RpJ9s9/wBKk+2e/wClR0Vr9YxhmWIvJ/7aUebD60eR71H/AMta09nWpUQLH7qo6KOJpq86pT1Ogks/3PSiftUfke9WK7KWxzezQQd6sT9qpwd6sVRrSpkkM1SVTg71YpL90B0Hgn/kKS/9ca09R/1X4VkeA5v+Jpcf9ectampTf6LXpUqtGxrV/hFOabyqIZoOlV5+1EHeun2vtTyix5/tVe8n8mL9xVjyPeq95D/otar2oH0R+yj4w+OHgPwHHqvhzSdAlt7+bzIf7Q82vVP+FkeP/FUtxP4q8Oab9o/6dLz/AFtcv8K7KDTfhz4fsf8AnlpsVbn/AC+1eI/c0T7TDYmrSwdOmdZ4b17w5o9/9t1XwrHc/wDTHzqp/HjxV4W8SaVHqsHg6Sykih8v91N/qqx/tk/nZrUzBrHw+1SxvoIpZIv3kMVZ0qtUHif3Psz5H8bQQDVJPIrn4f8AQpeld58SNNgs5v8AR4K4Cb/j7p0/+nh8div4pboqvRWlSoM0NNmnhik/55y/66qd5/z3q/pv+rk+lZl5L5MskGK4cTTO2j/CKc/apLP/AFp+tRz9qLP/AFp+tedhv3Ra/jH1R+y7ZwTeHLeDyP8AVf66vqDR4f8ARa+b/wBlGznh0G3nn/5a+V+9/wCetfSmjf8AHsK+owVL9z7Q/ZOH/wDc6ZY/e1H9juPUflVyGHyqk8/2rX2VL7Z9R7Upw2dFXP8AW1H5HvV/vgK801V/Pn/5b1c+x+dxTPsdv6n8qdSpWArQ3lammjzoqp/2PD61oWcP2OKhPFgWPI96IYf3tSef7Uef7UjSlVCGHz5asf6qiHyIf9RUc01aGYf8tased+6qnVjz/anUqGYUVH+9op+0NDQhm8mLyP8AlnUkHeo4Yakg70VP3ppSpokg71JVO8vJ/wDlgaj+2XHoPzq6X7n+GZ1S5UdRwzUTTQeVXPp7b2gElHn+1U/P9qKP+vZoWPP9qk8/2rPooqGhHrHkC1k86evO9el8iWTNd54km/0CvK/FWpT5k/551rhjnxNM5/WLz97WXP2qPUrzzpelV/tnv+lesv3p45JP2qP/AJZVTvJu9EM1ZVP3oEk00HSub1iYfapMitiaasPWIT9qkn+0Vr7SsZhDNWhZ1j+f7VoQzVl++qmhY87yqsfbP3XSqk3+p/Covtnv+lFOn7KsZliab/WVX+2fvelV5pqr+f7VqBcmvKrzXlV5ryqc15R/FH7IkvJoOlZ801R3l5VOa88msxBeXlZ95eedFUd5eVl3k3eilUrARzTeVWXeal+9qS8vB5VY95N/rK6OSsc4TXgnlqneXh82o5rzyP8AlvVO8vK5qntgJJrysvXpv9FkommrP1ib/QJKzp1QMf7YIYvIo/tPzovIx+7rPvKrwzeRLQsT7I5zcgmnh/1NbmmzTzRfv65vTZ/NlroNNmPm81oqtaqaL2J1EPkTRR1cg71l2cI8qtOz/wBUfpXFVqP2vsztpEsM3m1JNDP1qvND5VSQzT9KKX/Tyma+zI5+1V5+1WP9bVeaGs6lP2T/AHYezI5pp+lSQd6rz9qPO6eRXRSq/VapnSPqCbz6IJf+WGKLybvVeuKpU/59nrGpZ3lSed+9qvDDUnnc/wDTSiliH/y8D/l8Sef7UsP+p/CqXnfvakh8+umlTFT2LlRzf678aPO/dVX/AHtZXftTP2RY/dUQzVHDDR5HvWtSoFK5chvP9KrQh/fRYrMs/wDWn61fg71n7SsahND+6qxZ0f8ALKpIYYOtFOnf+IHtESVJVepP3VaUzQsQd6PO/e1X8/2qTz/atfZgWPP9qP8AlrVfz/apP+WtcX76oa/wjQ8791JgVBN/qfwpIZqjmmg6VNT2tU5qlQrzTUQyzzGiftRUezZpSCo4bP8A0vz6k8j3qSuj2i+wHsg/1VWPO82q8MP/AE3qTNvjyPPp+zAk/wBVRNNB0qOb/nhis+8/570qoEl5DfTVXh8/zakhvJ6k8j3rmqe1MySG8uIZev7upPO82o/I96kh/dU1Bh7RFipIO9Rw/wCu/GrHMMNdi/egH/LKq801SUTfvazqhTI/Nm9aIZp+lR0Q/va4vafvgLH+tqSH91UdSef7V1UkBwf7QkJl8B3n/XGvgPxV+5v5K/QD48fvvBGof9Moa+A/Hv7m/wDrXhZl7X23uH5/xj/COfn7UQd6k8j3o8qH0ryqVKtVPzf2nsvjPtz4Mn/jCBT/ANSxqf8AO4r5j+A/x48VfB/xHHfWM/mWf/LaGvpv4Lf8mQJ/2LGp/wA7iviiv1fxFr1sNleTTp9MPH/0mB/Nn0eqtSlxVxY4/wDQdP8A9Lqnunxm/bY+J3xa/wCJTfeVHo8Xm+TaRV7p/wAE2Zp/+FafuP8An8lr4fh/f/uM19qf8Ezbzzfh9eQf88tYlr5DJMdjMVi/Z1D+s8Djq1XF2mfXGsefN4b1D/rzl/8ARVfmP+0h/wAhS8/6azV+oE0X/ErvB/z1s5a/L/8AaS/5Dg/67V9Jn/tlgzuzr+EeR8QzVY8/2qOivzf2Z8atz7g/4Jkf8kvn/wCwxLX2Lef8i5ef9edfGX/BMGb/AIoO8gx/x66xLX2LL/yArz/rzr77IlfLqZ+h5b/uR+ZX7RU/k6zcf9dq8j+2fveleuftLQ+TdSf9dq8bvIe1fD5l7alX9mfFY3/fCx5/tX05/wAE3/IvPEesQT/9Mq+X4O9fTn/BNOb/AIrLWP8ArjFXRkl/7RRpgX/tlM/RGz/5BZ/686+a/iV/yIHib/sHah/6DJX0fo832yKODP8Arf3dfN/xNIb4e+Jx6abqC/ksgr9+4P8A94xH/Xmf5xPwj6X6vwjks/8AqPo/+m6p+e95++lxR50EP+oqOab97Ufk+VX86Vf4XtKZ+yljzv3VR+d+9qPz/aisalWrVMy55/tUf+qqPz/apP8AW0VKtWpRAKkhhqOGb97Viuf2ftffpnQFFFR+R70VKbMw8/2qSDvRRDDWVP8Ae1Q/hEnn+1SVHR5HvRUw9UPam/8AD7/kMyf9ectbF5/qh9KyPhvB5uqXAz/y5y1oXkw8qu7L1+6OnEfwUZ8/ao6fN/rvxplexY8osf8ALKo5pvOuo4P+etEP+p/CrGg+RN4o0/z5/L/fUv4n8MVL+MffHwr0f4O3nhzS9K8cTX0VxFZxR/6JXaal8Jf2bLyL/inPi3c2Nx/092fmf+1a8b0fWNK1KK3n0q48z9zFHVyGGfza9GvVwlJfvKftD7GlUo+xPULP9nX4c6lL/wAlwtpP/IVdJD+zr4Os/Dl5B4c+Ldlc3EsPl+TN/wDva8Pm8+GtTwh/yGpP+vKWs/rWX+19n9XD2tE8D+P3g+fwdfyWM9/FLJFXjk3/AB91638YPP8A9Mgn/wCe1ePzTeddSVliPa+1/dnzeO9j7Ykm/wBT+FR+f7Uef7VJXNUpVap5vtDU0399YVl6n/x8yVqWcA/suSesPWJv9Krmxvsz0sMVpv8AXfjS+bN61H/raIYf3tcNL2tJmi3Psj9lH/kTNO/64RV9IaP/AKk189/s0w/Y/C+nwH/njX0BZ/8AHqfpX2eB/wB0P2Ph7/czQmu/+WFRzTVXoqT6Ukhm/e1cmvKpww1J5P7qtqdQDQs6jm/1341Xg71Yhh82ioAef7UTTUUTTVnrSAj+2e/6VYhmn6VTqSGbyJakDQhmon7VHB3qStaWJNCTyPepP9VUcM1Sef7VrU1MyTz/AGohho87935OKPI96XtFSNCSGapPtnP7j/V1H5HvRNDUe0/dBSCaapIO9EMNHkz+bQqdIAmhqPyfNqSaaq/neVWVSnSOmnUJJoaKki/fRefUfn+1FKp7EyD/AJZUVJ+4mqSGGirU9qBz/jCbybCvH/GE3+sr1j4kXnk2vkV4v4wvP9ZWtKmc9TY5O8vP9KkqP7Z7/pVe8mnN10qvNeV6SOMuTTVH5/tVP7Z7/pRWYEk15WPqV5i68+tCaaub1i8H2qSuikc5oQ3nnVoQd65/TZv9KrYhmo9odBoTTfuqpzTVHNeVXmmoqM5FuSTTVXmmn6VXmmqOa8/dVznUtgmvKr3mpf8A6qjn7VnzXldBw1NyS8vKz5pv3VWLyaDyqx5pqyq1CyO8m71n3l4PKqS8m71l3k0/Suf2jpAU7y8rPvJvOqS8m71nzTVrdmZXvJj5tV5pqJpqpzTVlSAsVh+Nv3NhH/12rU8/2rl/G95PMY/P/wCe1aVKlL2X7sCn53+ie1Upv9d+NLNNUcM3m1zf8uwNzTZ/NlrpNCrj9M711ng/F55c/wDyzlrTDVK1Iz9kdJafwfhWpo95+6jrPs4T5tXIYfJlxWf732poW9R/49fwqpD+9qSabzajrPEP/n3UOqkH+kf8sOtRzTT9Kk+2e/6VTmm82kqlL/l4FUjmvKj8/wBqJv3P/bWiaGCGKSeua9X2v7sk+sJpvPlqvD5FWLyz8ms+GHyZc/8ALOun/l0d9I2IO9STQ1Xhmq5NNB5VdNKmjUz/ACf3tWIYf+ulSQ+RViDvUVMRSpC9miv5I/57yS/9dakhh/dUeT+9qTyPelTpf8vB+0RHB3qxN5HlVXm/6YfhRDNWfszop1EFXLObvVfyYPKqSy8itKdP96c5cqP97Uf72pIbys/4rAsw/wCp/Ckg71H537qo/P8AatPaGhYqPyf3nnZqSH/XfjUkM1a+1MySz/c/v6jvLypPP9qrzfvaVTb3DQkh1PvipIZp+lZfk/vPOzVz7Z7/AKVzqn7UCxPDP/ywqSDvUcM1STeR5VKpUCnTI5pqPP8Aao6jg71zAWPNm9akhm82o4O9SQ/678a6DMP9bUl5D50WTR5/tUk037qgDLh/4+vIzRNeVY/c+bR9ig/5YVh+5AjhvJ5u1XP+WfkVHDD5VSf6qj+EHs0WIZv3VHnebVPz/apPP9qPaM0LEHerH7nyqpwzVJ5/tSAP+nf9Kkhh8qo/P9qj+2e/6VrStczLE037qo4byDNU5vPqOH7Rn99WvtKpmc/8bP8ATPBmof8AXGvgP4k/8h+T8a+8PizNPN4ckg/5Z5r4L+J0vk+I5BXgZqfB8a/wTn5pv3tFR+f7Uef7V5lPEfbPzepT0Puf4M/8mPKf+pX1P+dxXw/D/rvxr7d+Cz5/YYV/+pW1T+dxXw/5sPrX6b4kTgsqyW//AEDx/wDSYH80eAH/ACUvFv8A2HT/APS6poQd6+zP+CYN553g3VP+wxLXxXDNX2J/wTN/c+FtYg/55axLXwnD+I/4UT+rss/3s+4IIfOi/wC2Nfl3+05/yN1x/wBflfqRDN5MUZxX5h/tUQ/YvFFxAf8AllqUtfe59/uZ7Wd/7oeL+f7VYhmqn5/tR5/tX5rVxNGkfFUtz7c/4Jg/udG1SD/qPV9k/wDMCvP+vOvib/gmbeeda65x/qtY8yvtjzj/AGDef9edfofDOuXQqH6Hl3+5n5j/ALUX/Ie4/wCWU0teR+d5tewftRZ/4SO4g/55XkteN2dfJZ3b64fE5l/vhYhhr6I/4JyzeT8S9Yh/6c4v/atfOfn+1fRH/BOr/kqGq/8AXnF/7VrPA+y+uU/ZmeWf75TP0g0ebzrCOvm34mR+V8PPFUZ7WGp/ylr6K8NzedYcivnb4qP/AMW98WOf+gfqR/8AHZa/eeFK3PWxH/Xmf5xPw76Xn/JJ5L/2H0f/AE3VPzsmm/e1JUc0372pIf3tfzXTq+2P2gj/AHtFH+qo8/2rLkpB7Ik/1tSf8sqr1JB3p/uQEh/1341Z8/2qvR5/tXNhgqliDvViq8M1SebD612/uTMkoqPz/ao/9bXOaFiaaDpUlnN3qvViDvSp+2A6j4b/APITuf8Arzlp95/qh9Kg8B/8fd5/16VJrH+pFetgVVXvmmJ/gleDvRP2qP8A5ZVHP2r1vrNL+GeItySDvUmj+G9b8Va9b6Vof728upvLhi86o4O9eifsx6F/b3xfs4IIPNkihlkhrjpbnXE9I8E6P8cPCtrHB/wh2my/89v9M8vza6iHxt8W4f8AmQLb/tjeebXpn/CvfH5i/wCRO1KT/rlDWXeeD/HFnL/yIGr/APgHXbNOx9NTT9gYdl4wvdS/4/rHy/8AtjXSeCZoJtZkEH/PnLWPeaD4jh/fz+HL7/vzW54J0e/03+0NVvrKSLyrP/lrXN7J1a1MqzPn/wCM00E0t5/12rxuaH/SpK9M+MF7+6uJ/wDnrXmc0372umoeHjivUlR1JWX+A4aR0Fn/AMiv/wBtq5/UoT5vFbBm/wCJNHB/02rH1H/W/jXNialH/l4elSpoz/8AlrVjTv8AW/jVf/lrViz/ANafrXk4af772gLc+2PgPDANG0+eD/njFXuGnf8AHr+FeH/AL/kAaf8A9cYa9w07/j1/CvrcNU/cn7Nkn+50yx5372jz/aijyPeun2Z7PtSSGarkV5B/y8T1l+f7VJ/18fjR7Q09qaE00HnVYhvIM1j/AGz3/SizvPOlrL2ns2M1JZv+eFRzTTzdTUcHepIYfNru3APJ86L/AKaVH5M8PWrnk+VRWXs0aEcN55NSG886Wqf/AC08iiDvXNTp0QNSzm71Y8791WfZ/uZcVcrX/r2BJRDN+9qOb99LiiDvRU9iaGxZ2fnVJNZ+TVOy1I2Z8irn9pedzWP7kzI/I96jqSabzarz9qdPEGnsgn7VXg71H50//bOpKf8AFNfZs0Yf9T+FPmhqvZ/6o/Si8m711GRJ+48n/X1JVOaD9159Rw3n7qo9p++NDk/idefuq8X8VXn72SvWPidefuq8X8YTDzZKdLQ56mxhzTVT/cTVHNN+9qPz/au32iOMJpqPP9qrzTfvakhmrnpfxjnJLybzq5u8xNdSV0E03kRVy/M11XSdBYi/cy+fWhDeVnwd60IYaDnJPP8Aaq803ky5omm/e1XvJu9ZU7gRzTVXmmon7VXmmrSlUH+9C8vKz5pqJpqpzTUVcT7IyW4TXk9U7yYeVUk01Z95eVzYerS/5eHWtiveTd6z7yYeVVi8m71l3k3etadT2tU5CneTd6y73z6uXk3esu8m/wCm9Op7KkbezI5pv3VV5poOlRzTCGLP/LSq801ctOpSMSSftXL+KryCG6jgroPO/dVyfjD99rNaezX8QzK80w8rz6ks6p+f7VJB3qP+XXtANyzrrPB/+qj+tcXZwz+VkV3Hg+zPlR0vrLq/wzWnUOohhqTz/ao4YakrT2d1+8Nam5GPImljn8+X91NUc00HSiaGq801eNVpUqTNVsE0372jzYfWq9H/AC08it6f7qrqZfxQn7VX87/lh+lWIbOxhijgggiiji/1NV/J/e11YanSq/vDY+uLy8nm/Oq9F5eedRZw+dF9orSp/wBOzoJIO9WPPn/5YVXg71Yg70ezdQCSG8qT+0f3tV/+WVRzTVrZGVO5qQ3lHnfupMCs/wA/2qx/yyrT2iVE6KexX86fzfs9SfvaKKzVUZY8/wBqkhz/ANtarz/6rz8VJZefWns6VUDU/wCWfn1HSw/6n8KSub2XsjQseT+6qOaao/Nm9aP+vj8a1/cmZJ5/tUkM1V4O9SUqi9qBJUk01V6KzqAH7nyqKKj8797RSqVftgWPP9qJ+1Hmw+tSUezNCv5/tR5372o5pqjrEDQ+2e/6VJDN+9/6Z1nwd6sVtTM6hYmm/e1J537qq/n+1Hn+1aezogWKPI96rwzVY8/2ri/c/bNA/wCWtSeTPN0qnDqUHm1c+2e/6UU6ZmR0VJNNUfnebWVTc0CpKjqOaafzaPaeyAued/yw/SpJv3VV4Z/+W5o87zoqLmYef7Uef7UebD60T9q1pVPZGZz/AMSIfO8L3n/XGvz6+Kf/ACMclfob42/feHLyx/5aSw1+fXxa/wCRik+tebmWp8Zxr/ulM4yb7Rn9zUf+kf8ALfrR5/tRXy/tPZVT8yPu74Kf8mJr/wBirqn87ivhXz/avur4KNn9hJW/6lXVP53FfCv+tr9d8Ro03lWTX/6B4/8ApMD+YvAL/kpeLf8AsOn/AOl1SxDNX2J/wTBm87S9cg/6iVfHcHevrT/gmPef8TTXLE/6uvgcg/3s/qfLb/XD78P+pjr8y/2xf+Sg6p/2Epa/SjTZh5XNfmn+2N+5+I2qfaP+gxLX2Of/AO6H0+dJ/UzwuGb97VyHyKp+bD61JZ1+cVKtGn+7PgqR9if8EzbzyYtY/wCmt55lfcFnN/otfBf/AATTvPJ17WLHH+tmikr7ws/+PU/Sv0zIMSv7IPvst/3M/NP9qn/kd9U/6/Ja8b87yq90/bA02+034g6hB5H7z7ZL51eHzQ18lm38c+SzL/fCOvfP+CeM08PxkuIIP+Wum18/19Cf8E5YZ7z4yahPBB+7i0399XPgaf8AtlMzyz/fKZ+knhuGCHS4/s9fPHxVRv8AhAPFafxf2fqQ/HbLX0XoM3/Erjr55+LoX/hDfFwHT7Hqf8pa/d+DF++xX/XqX6H4h9L/AP5JXJv+w+j/AOm6p+cM/ajzv3tRzTfvaj/5a1/NFWn7I/YqlTUsVJ/qqj8/2opUvamhJUlR+f7VJ5/tXpfuaqMyOpKr/bPf9Kkrm6/uzMkqSo6jpgWKkqvUnn+1ZeyNPaIkqTyPeq8HerEP7qun2fsgOk+Hsx+1Xn/XnVjUp/Niql8Pv+PrUP8Arzqxef6ofSvSwwsT/CKH/LWrEHeq8/ajz/atalPU8xbmhB3r2T9jOGeH4g3muQDy/sGmyyV4nB3r6U/4J+6PBNdeKPEk/wDx7/Y/LropfxT1Mt1xh9IeD/2lvib4b0uPyNW/8g1oTftjfEezl4+zS/8AbGuDvPiFrln+48jTf+2tnWpZ/FTSodGj+3aHpH2z/lt/odaVMbiqL9w+x9odhD+2x4xmH+neHNNl/wC2NU/iF+1pBqPw+1Cx1XwdY/6VD5fmw15/rHjbSte/cX3g/Tf+2MNcH8bLyez8L+RPB5X2qb9zWv8AalX7CMqmM/dHjfxa8SWN5LJBY/6uvO/O/e1qeMJvO1SQVjw/8981xVKtz47HVPa1iSpPO/dVXpIf9d+NZ06hzUqhuwzf8SG3n/5aS1j3kx82tdv+QFb/AErE1Kb/AEqsqtv4lQ7qexWh/wBd+NaNn++ljg/5Z+dWfVzQf32s28H/AE2rmVQ1o7o+4PgPZ/8AEms/+uMVe0Q/ubX7OK8n+DMHkWFvb/8APKvVJpvNr6jBU/3J+1ZX/udMPtn73pVj7Z7/AKVn1JDNWtT2yPVNCo4f9d+NRwzVJB3rEv2pJNDUcMPky+fRNN5tHn+1R7Q1LkHerEM372qcM1SQzVr9ZNDQ8/2qnez/APLCpIZvPiqOaGtPZ+1/eGXtSvD+6qSGaq8/aiDvWVQZoQzVchmqn/yyqOGb97Wf8IDUqSqcM1XPP9qs0I5vP82rFn5/m80T9qkh/dVHsv3wFzzYfWo/O82o6PP9q0qU18BpSC8/1UkFV4Yakmm/dVX86eHpR7NUg9qaEHepJ+1Zf2y/86rnnebW5mRz9qjomhorH2dW5oeb/FqaaG68ivF/FWpT+b0r1j4wal9sv5D/AM8q8X8STfva6kctUz/P9qjmmqv5/tUc01V/COf2hJ5372o/P9qj82H1qv5/tVGK3NC8m/0CTNcv53+l+9bmsTiHS65+D99L51L2ZtU2NCGb97WhDNWHD5EMtaBvPJirSlUOUlvP9aPrVaab97Uc15Veaas6lQ29mSTTVnzTUfbPf9Kr3k3es/akhNNVOaao5pqp/bPf9K13AkvJp+lZc01aE037qsu8m71l7ICO8m71l3l5Ul5MfNrLvJu9aVKljMr3l5WfNNUl5WfNeT1l9YALyafpVOaafyakmvKpzTU/3IEd5NP5WDXL3l5511W5eTDyq5Oab97WlKmcZJ53m1JDNWfNN5MuaIZqKlT2Rodho83rXd6D/wAesf1rgPDc3+rzXoHhv7kdKnb+IaHQQzDyvPpJv9d+NMh/1P4VY/1Vc7dWqaaleb9zFio6kqOs/ZUTb2hHND+6qvVi8m71XhmpVKftTSlUK80372jz/apLyGDrVeaH91RSp2qmftD6s/5dpKlh/wBT+FP8nyYqrzTfvadKp7L+IdtIuQzVJ5/tVezhnmijng/5azVY8n91XR7P2qGLD/qfwqKo/wB/5v8Ao89SQf8AHzHXD++uaEnk/wDLf9akqSq/22f/AJYVriKbAkh8+pKjqSDvWtL90aEn7j7L5E9WIfIhrPqSH7Rn99XUZ+09kaP2y39D+dJ5372q8MNST9qxqmhY87935OKkhigmFU4O9WIbzyakzLE37qXH/LOjz/ao/OgmiqOH/XfjWgU9ixNDP1omhqSaaq800HlVjUNAmmqn9sHneRUfnfvajm/1341zfvv+XYeyNCH/AFvn4qTzvNrPhvJ6sQ3ldNO9L+IBJUkHeq803m1Yh/cxZrmqb8gFio5+1Hmw+tSeVD6Vr7Qy9nVK9Sf8taJ+1EP+u/GioMIfP82rHnfu/JxUdR/8taPZoCvN/rvxq5Z/6o/So5oak8n97UmZY/1tE/ao4fPo8/2rD917U0mSUUVJ5HvS9maLYjqSGajyPein9WMw8j3pZv8AU/hSUVpTp/uTMx9ezDYST1+f/wAZ/wBz44vIP+eU1foR4kg87S5Oa+A/j9Z+T431D/rtXFjf4R8bxZ/uh5/P2og70UV83Up/vj8tPur4Kf8AJia/9irqn87ivGf2Of2RfCnxysLjxH4xnl/dTeXD5Ne3/s9aRfeIP2K7LQdLiD3N94e1C3t0ZgoaR3nVRk8Dkjmrv7Dfwq8Y/BPwjc6P8SdOFpO9/JOn2e4SYOD0yVav3bizIMwzrLcolh6MpqNCN3GLaV4x3sfyl4GcY8HcLcY8UxzzHUsO542o4qpUjC6U6l2uZq9vIP8Ah2P8B8ef5995n/XavQP2e/2RfA/wN1681XwrfXMsd1D5U3m16MfGmiD/AJ6/9+//AK9TWfjrw/B99Z/wj/8Ar15uG4IzGhV9pDDT/wDAWf05Hxf8F6Vb2jzzDf8Ag2D/ACZuf6qvm/45fsB/8Lm8Zah4jn8cRW0d1N9phh8nyvKr3Z/H3htn3CG4A/65D/GiLx74bX/WRXP4RD/4qvRxHCecYyj7OeHl9xpjPHfwXr0fZyzvD/8Agd/yR8i3n/BJexh/1HxUii/7Yy14X+0V+zTffAHX5NJg1z+0vK/5beT+6r9Ip/GeizdbaT/vj/69eAftUfAvxH8dda+3+G9QtbWP7OiYvJXJyPopr5TFeGmc05c9DDSb+X+Z8vmHjT4JQpWo5zRb8nL/AORPlz9lH4hf8IH8Wre+1WfyrO6h+zTV+nHgPXv7e0GObz/Nk/5bV8Er/wAE9viMX3P400MfQzH/ANkr6U/Z+tPin8K9JTQPGXiGy1W2iTbC0G8PGPbctejkvBvFOEjyVMJK3rH/ADMMt+kN4NYaPJWzenb/AA1H+UDe/ai/Zp0P4zeHPt1hBFFqkX+pl/5618N+N/2Rf2hdB16SCDwBc3Nv/wAsZYq/RhfiZZBcNpkpz94BwKlPxU0/f5n9hyk/9dh/hXRm3hxn+P8A4dFr5x/zMMX9IDwHnW9p/bEP/BdZ/wDuM/MrR/2Xf2mtev8A7DB8Fdbi/wCms1n5cVfdH7H/AOyLP8DfDkn9uTxSahdfvJpfJr04fF5E+5orfjP/APWoX4wCMbodDwx+9mfKn8NtLK/C/iHAVvaVIc/zj/mc+H+kT4CYSt7T+1k/+4OI/wDlR1WpTQeG9GuNWv5/KjtYfMr5r1fWo/EPwN1vWopN63Gj6iwb14lH9K7n43ax4h+K3hOXwfomp/2LbXX/AB+kAy+d9RlcV53r2g2Xwv8A2ddW8O6tr8TpaaFex/bJ8RCRpFkKjBPUlwoGck49a/QcjyXNsqr4mriafJT9jNJ80Xro+jfRM/C/pBeMHh74lZdlWXcN4t160cZSm4+zqx93lnG/vwin70krLXXY+BZv9d+NMqSftUf/ACyr+U6lOlV/iH9ZhUkMNFFai9qFFSUVNIYf62pKjg71JW1PYzCijz/apPI96Xs0aBRRNDUn/LKnU2NCODvU8P8ArvxqCDvUlQtzKkdR4D/4+7z/AK9KNY/1IqPwfzLeT/8ATGjWJvOir2MLsi8T/CKdSVXqStzzSxB3r6o/YJ8bfCvwr4X1jSviN44i0n7feeXD5v8Ay1r5Tg71Ymm/dVeGqeyre0O/BYj6rW9ofop4w8N/A/xJYSX3gD446RL/AM8Yf+etY+jajfeD9H8iD+wL24i/57f8ta+C7O88mXFaEPjDxHpo/wBB1y+i/wC3yWu39ziqvtD1qWb0fbfvD7Q0fxVB428WxweMdKsdJj87y4fJ/dVuftdTeFdY0HT7HStcsb37LD+++yXkUnlV8JzePPEd5/x/a5LL/wBdqj/t6fp/y0p/ufZezmduJznCW/dljxtPB/wkdxBBP+7rL/5ZUTfv/wB/io4f9T+FcVj5erVDzv3tWPP9qr0eT5tKpSrUjlNy8m8nRreCufl/fS+fXQakfJ0u3/641zd5Xm4xs76QTTVqeFZhNr9n/wBfkVZc/atTwTD53i3T/wDr8iripVP3x00f46Pvz4Pw+To1vPXeef7Vwfwl/wCQX+FdxX2q/dUT9syz/dKZJ5sPrUkP+u/Gs+bz6sQzUe09qeoaHn+1FU4ZvNq5B3rX2Zzkf/LWrEM3lUfuqjn7VzVToLEM0HSpPP8Aaq9FZAXIZqJryD92IKrwzVJNDWyqfugJP9bR5P7qo/8AVUed+6qjQsQd6khh/e1XhmqxDNQBchhq5DD+6qnB3qxZefVfujQszf6n8KSGGiH97ViGGtjMr0VYmhqv/qq5PZ+1ZoV7yYeVUdnN3qS8mPm1X/1VZz3NC55/tUkHeqcM1WIO9dtKnSCpULE/aq802IpKkmmqneTf6LJ5Fc8/4poeL/FTWP8AT7ivH9Y1Kea6kr0j4tTeff3FeR6vN5N/JiuynUuebUI5rz97Uc15Veaao/P9q09nWOUsef7UQzVX8/2ohmp0hrcNYvP9AkrLs5vOsPPqxr0w8qsuDvWns0aVDUg71J537qqcM1STTfuq56Q/Zkk01V5pvKqOaaqc95PN1ro9ojYkmvKz5rzzqJpqrz9q5AJJpv3VZ801Sef7VTmmrQx9oSTXnk1n3k3nRdKjmvJ6pzXlL96HtSOaas+8m71JeTd6z7z991rmqkle8m71lzTfuquT9qz7ybyaz9nSJqleftVO8m71JNNVe7m/54GtPZ1he1Keozf6LJPXLzXn72ug1ibydLknrk/P9q0p3OYsT9qks/3MuKz5pp/N4nqxpsx83mipU9kQtzsPDcPnSx29emeFYf3McFebeD/+PqP6V6b4bh+x2scHnyyf9NpazVNnoUjYl8iGL/X/ALz/AJ5UVJ5Pm1HNN5MvkVlU/dfGbkc37qj/AFtE372o5v3VZVKnsqoFeftVfz/ark/aqc1nUYe9/aE1COaaiaGo5+1U5pp8f6+u2nUpUv4hn/FPsz9x/wBdI8ViXn+tH1q//wAs/IrMvP8AWj60nOr/AMuzuHw6lP8A6iD/AFdanM0NZ8NnWjD/AKn8K1wVPF/8vC6mxW/5a1Yg70VJ+6rWpT1M6RYhFx/zw82q8/aiftVf9/NV1NTUsQw/uqkqODvUkHen7P8A59gFSedP5VHk+dL59SeR71nUp/YNAhmqTzvNqv8A9fH40TTVl7MzLsP+p/Ckg71X8791Unn+1dSAk87yZak8/wBqr1Yh8iasvaBT2JIZqjvP9UPpR5/tVebz/KpVTT+EU5+1WIO9Rz9qkg71zU/3IEnke9EHepPPg/5YVH9s9/0rWm6tUC5DDR+6qT7ZB5X+j1Tmmqg9qSTTfvased5MVZfn+1WPtkE0RqvaI5y5Szf6n8Kihmo8791XKBHPN/03qxZzHzap/wDLWrEN5VezNDQ/5ZVH5/tUcHepP3VZ+zMyP7Z7/pUcN550uaPsfncUGz8mWlUQEn2z970qxB3qv9j9v1qxD+6pGhJNN5VHn+1HnebRB3oMyTyPeo5oasfuqjqqQEd5/wAgu4/65V+f/wC0h/yO95/12r9ALv8A48JIM18D/tOWf2Px5eQVy43+EfG8Vq+Xnk801EM1WJoZ8/8AHvJ/35qv50EPWvm6ntfan5T7Kv2PWfgn+2J8Ufgr4Zk8IaNFZ6jYeb5lrDqQkf7LnJZY9rjapJyR65I6nPZTf8FHPjGsfmR+GfD4Hvbzf/HK+dPtlv6H86sebD619ThuLuLcFhYYfC4uUYRVkrrRdtV9x+d5n4ReHGc5jUx+NyulOrUfNKTTvJ9W7NK76u2r1erPfx/wUY+Nkg+XQfDy/Szl/wDjtPf/AIKE/G9vu6boK/Sxk/8AjlfPvmw+tSfbLf0P506fHfGT/iYyf3nNLwW8LYbZNR/8Bv8Aqe7yf8FBPjs5+RNET6ac39XqF/2/vj833ZdKX6WC/wBTXh3nW/8Az3jo8+H/AJ/Y6dbjfiuXwY2p/wCBMcPCHw1p/FkuH/8ABcX+aPbR+3h+0NLJtGsaev8Au6dF/UU1v25v2hm6eK7NfppUP9VryCGGj/VVy1eLuM/ZX+u1f/A5f5m8fC/wxjtkuG/8E03+cT1h/wBtf9omcll8bRrk/wAOmwf/ABFMT9s79oyTr47/AC0+D/4ivKvtkHm+RxViDvXPQ4o4rl8WYVf/AAZL/M6Y+G3hzHbJ8L/4Ipf/ACJ6hD+2V+0LFKxPjx5dnWI2Nuf/AGnWpB+3N8dofv61ZSf9dLGMfyFeN+R70ef7V1T4y4ro7Yyb9Zy/zOuPAXAEPhyfC/8AhPS/+QPbZv26Pjxd27wxa1psTjo0ekwk/rWBe/tZ/HrV5T9q+IF5HEfvraLFAV+hC5rzHz/apPP9ql8XcSy3xc//AAKX+Z0Q4M4Op75Thl6UKX/yB3A/aQ+OczeUnxT1kn1F4R/KsfxP8V/iZ4008aT4s8earqFqJA/2a6vXePcOh2k4JFYcMPlVHN+9ryqucZriIOFXETknunOTT+TZvS4Z4dwVeNbDYGjCcXdONKCafdNRTT9COiipK8v2fsj2iSo/O/6d6k8/2og710fV61X+GZe0I6k8/wBqP3VSQd65vZ2rajJIO9Hn+1FFej7Nf8uwCpP9bUdSf62uf2f700CDvRRRXT7MzDyPepPP9qKjrmqgdZ8PbOfWJdQsYIJJZPJ/cwxVoXnw98f/APQj6v8A+AdcXZ3k9nL+4nki/wCuVaEPirXIf9Rrl9/4GS13Yar+6LqfvTY/4QP4g/8AQg6v/wCActL/AMIT4/8A+hH1f/wClqlD488R9f7cvv8AwMlo/wCFk+Kof9R4jvf/AAMq/a0ft1DDQk/4RXxUf9f4V1KL/rrZy1X/ALN1WH/mFX3/AIB1oQ/Frxx1/wCEqvv/AAMqx/wuDx/N/wAfHjG9/wDAyj2tA3+r0rGP9j1XPOlXMX/bGpP9On/5cbmX/tjWp/wtrx//AMsPEdz/AN/qIfi/8Run/CRyVrUq/wDPsKeGomP508P+vsZP+/NHn+1dB/wuDx/n9/rnmf8AXWpP+FteMf8AlvPbSR/9NbPzad0H1Kl/z8Obgmn/AOmVE80//TKugm+LXiP/AJ8dN/8AAOKo7P4t+I5v3E9jpskf/PH7HFS9rb+IFTDUjC+2XHpF+dWYZp/N/wCWVbn/AAs++/6FzSP/AAWxVIPiR/z38HaTJ/25xVr7RVf+Xhn9XZHrv/IMs/8ArjXPz9q2PEnir/hJZfO/sq2tv3P+ptKw5pq83E2NNCOaaug+G/77xtp8A/57Vz9dJ8H/APkpWl/9dq5qP8Y6aP8AHpn3x8K5/J0uOu08/wBq4v4b/ubCP+ldpDNX2NLWiftmE/gFOaaiGaib/XfjRB3o9n7I0NCzqx5/tWfDNVjzvKrX+KaEnnfvaPP9qr/bPf8ASjz/AGrm9kaFyG8qSaas/wA/2qx5/tXRS/ggXKPP9qp+f7VJD+9qPZle0LFR+f7UeR71JWJJJZzd6sQd6p/8tauQd60Og0LObvVzz/asf/VVJ9s9/wBKdIn2huWc3epPP9qz7O8rUh/fRYrX2hRJ+48n/X1TvKPO/e0s3+p/CsTQzpvPog71HNN+9qWH/U/hWHs2aCf8tak8/wBqjmmqPz/aun+EL2pc+2fuulU9Sm8mKSisvxJeeTayT0Kpqanz/wDEjU5/t9x59eX6leeddV3nxIvIJrqTyK83vJu9dlSn7I4agTTVXmmqP7Z7/pR5/tVHN7MsQzVJ5/tVPz/aj7Z7/pXQIp69eT54qvZ3lR69N+9t/wDrtVfzv3tc3szb2huQzVJNNWfDNRNeVrSpeyNPaBNNVOa8qT7Z+6/f1TmmoqHKRz9qr/bPf9KJrzyapzXlcv7r2poE01U/O/dUTXlV/P8Aaj2pPsiOaafzar3k3epLyY+bWfeTd6zp/vfjF7P2RHNNWfeXlXJpqy7yY+bWTp1aRZXmmrPvJu9SXl5VOftWtKmT/FI5pqz7ybvVi8rLvJu9L978BjU3M/xJeeTYeRXN6bPPNa/vq2PFU3+i1z9nN3rqp7+zOc0PP9qIZv3tRz9qNNPnS0VKZod54Dhn+1R+f/q69Q0eb/Ra838B/wDHzHXpGn/8s65qlQ6qRsef7UUf8sqr1lU1Ookm/wBT+FR+f7UVHL5HlfuPSiovZI5yveTTw/6ijzvOiqPz/aiab91XNhnVqlVSPz/aq832fP76io5+1dvtESfYk037quf1L99dVcmmn7zyy1T1L9z1rqnb2R1+1LlnN3qx5/tWXpsM/wDr560IYa5qZ0/xaRJ5/tRDefvar1HVBsaE03nRZos6pwzVYs5vOqac/ZVeczJPNm9asQzfuqIYakhhoXtvamhJR5/tR/raj/1VdNSmHtESQd6jqT/llSzf6n8KzX8IBIO9H/LWo4O9SVl/E/hh7NEkHeio/P8Aaq/9o/va6P4oGhB3o+2e/wClY8+sT+b5Bq5B3rn9mBJP2qvD5/myf9Nauzf6n8Kih/dVl/y9/eGlSoE0PlVTmm8qtDypvSqd5Wv/AC8Mws7zzpfIrQ8j3rn/ADvsk3nmtCz8SWE3StKRmWLz9z/r6y4dS/e0axrFvN+4grPs656n72qB0EN5VyGbz4q5+GarkOpT+VWwGp/y1qTyZ/Nqnpt4Jv8AX1qQ+RWOIFSI4fPqx+9qT91UlKlTNfZop1JB3qSiaYfZaRzhUfn+1RwzUed+9rD2bF7Uk82b1qSDvUdSQ/uq3GaEHeio/P8Aaia8qfaAR6n/AMe0ldh8Dfgz8OfGGjSeI9d0OykuPO8vzrv/AJa1wesTH7LJkV+d/wDwUa/aQ+NPwf8AiNHofgf4jalZWd1D5n2SKbyov+WVXhqdHFVf3hwYmlRa/eH7Ef8ADPnwi/6Ejw3/AN+aP+Ge/g7/ANCBokv/AFxhr+dv/huT9rCH/UfHDW4/+3ypLP8Ab8/bE03/AFHx+1v/AL/V2/UsJ/z7PM+rYL/n2f0MTfs0/Bab/X/DLSJP+3Oq/wDwyv8AA/8A5b/CvSf+/Nfz9/8ADyD9tKHj/hf2t/8AgZVyz/4KWftsw/6j9oXW4v8AttTWVZfV+OmHscF/y7R++n/DHP7Oc3T4SaT/AN+aj/4Yh/ZlvP8AX/B3Sf8AvzX4Nw/8FPv254un7Rmt/wDkKpP+HqP7dH/Rwmt/9/oq0WVZT/z7Ob6jgz92P+GA/wBmWb/UfBbSJf8AtjUc3/BOX9la8/1/wB0j/vzX4Xw/8FYv29rP/UftC63/AN/oqk/4e6f8FA/+i/6l/wB+YqP7Ky7/AJ9h9Rwf/Lymft5ef8Ezf2UJv+aH23/bKo/+HYP7KH/RFpI/+uVfin/w+Y/4KMQ/83GX3/fmKtSz/wCC23/BRizPPx3k/wDAOKn/AGVl3/PsP7Jy3/n2fsYf+CXX7JOf3Hw5vov+2NV5v+CV/wCyvN/zKurR1+R9n/wW8/4KMQ/813l/8A4qsf8AD8j/AIKMf9Fp83/rrpsVL+zco/5eUzm/sjLv+fZ+sH/DqP8AZQ/5ePDl7/4GSx1H/wAOl/2Xv+eHiD/tjeV+VcX/AAXa/wCCjEP/ADVu2/8ABbFWhD/wXy/b9/6HHTf/AAW1z/2Tkgf2Rl3/AD7P1AvP+CRf7Lsw/wBfr/8A4GVXP/BHP9l6bpfeJP8AwZV+a/8AxECft+Qy/wDIc8P/APgnq5D/AMHC/wC35DL/AMf3huT/AK7aPFR/YGUP+H/7eH9kZf8A8+z9EJ/+CM/7Of8A0NXiT/wMom/4Iw/s9TRfuPGXiT/v9FX59w/8HEX7esP+vt/CUv8A3B60LP8A4OQf257P/X+HPBsv/XXTZaP9WOH6T/ef+3h/ZGXf8+z7om/4Iw/A/P8AoHxN8SR/9+qpzf8ABE/4V/8ARW/En/fmKviv/iJG/bS/6EPwR/4LZauQ/wDByl+2J/y2+FfgmX/tjL/8drT/AFYyL7dT/wBLOf8AsDL/APn2fZH/AA5V+GX/ACx+NPiSL/tjFRN/wRb+H/lf8lw8Sf8AgHFXx/D/AMHLH7W0P/NHfBMv/gV/8dqT/iJd/ai/6IR4J/8AJqj/AFcylfw6n/pYv7Ay7/n2fWn/AA5P8D+b+4+NOrSf9fcMVR/8OT9K/wCWHxv1L/wDir5X/wCImX9o3/o2zwZ/4GXVaGnf8HNX7Rn/AEbn4J/8DLquL/VzKf8Al5/7eH9gZd/z7Ppj/hypP/0X2X/wUf8A22j/AIcqT/8ARfZf/BR/9tr5vH/Bzh8fv+W/7MvgmX/t8uqk/wCInb40w/6/9l7wl/2yvLqn/q5kv/PwP7Ay7/n2e+f8OSfEfm+f/wAL+tv/AAD/APttB/4Iq+KukH7Qumy/9wf/AO214X/xE+/Fv/o2bwt/39uqX/iJp+I3/Rs3h/8A8GV1S/1cyr/l3UD+wMu/59nuf/DlX4jdYPjhon/bbTZY/wD2rVOb/gi38Tf+i0+H/wDvzLXi/wDxE4fEbzf+TXvD/wD4GS//ABqrkP8Awc7eMf8Ao0nRP/BlL/8AGqX+ruE/5+VP/JP/AJAP7Ay7/n2euf8ADl34q/8ARYvD/wD4By1H/wAOVPjR/wBFU0D/AL8y15v/AMROHiqaL/k1DTf/AAcS/wDxqpP+In3Vv+jVov8AwYy0f6p4T/oIqf8Akn/yAf2Bl3/Ps9B/4cvfGn/oqvh//vzLUX/DmH47/wDLD4m6BL/2xlriP+InGf8A6Nmsf/BlLT4f+DnCfzf+TbLL/wAGUtZrhPCf9BFT/wAk/wDkDn/sDL/+fZ2n/DmH4/8A/Q/+H/8AyLUf/DmH4+/9D/4frl4f+DmqCbmf9mW2/wDBxLRN/wAHNX73yIP2Xrb/AMHFH+qmE/6CP/SP/kBf6sZd/wA+zoD/AMEbf2jP+WHjHQJf+21L/wAOc/2mv+hk8P8A/gZLWV/xE1aVDFmf9l62/wDBxR/xE36F/wBGvxf+DKn/AKqYX/oI/wDSP/kB/wCruXf8+zU/4c5/tQ9YPFXh+L/ttR/w5y/ao/6HDw//AN/qr/8AETtoX/RskX/gzpf+Infw3/0bKf8Awc0LhTCf9BFT/wAk/wDkDT/VzLyf/hzx+1f/ANDT4f8A+/1H/Dnj9q7/AKGPw/8A9/ak/wCImrwBN/r/ANmy9/8AAyj/AIia/A//AEbZff8AgZR/qphf+gn/ANI/+RD/AFby8p/8Oef2vv8AoK+H/wDwNo/4c8/te/8ALC98P/8AgZWh/wARNXw4h/1/wBvYv+21H/ETh8LP+iIX/wD3+o/1Twv/AEEHP/q3hCh/w6F/bD/6Cvh//wADKP8Ah0L+2H/0FfD/AP4GVt/8RNXwrm/5ohff9/qZ/wARNfws/wCiH33/AH+pYjhPC/8AQQL/AFbwhhTf8Ei/2xIf+W+if9spqp/8Onf2vf8AqCSf9cbyuw/4iavg5N/x/fAi+/7/AFXIf+Dlj4A/9EP1Kuf/AFTwv/QQR/q5l/8Az7OL/wCHVP7V8MX/ACCtN/8AAz/7VUmg/wDBPH9pP4e69b+MfEfhyx+x2E3mTeTeV6BD/wAHNX7Of+on+C2pRf8AbGus+G//AAXa/Z6/aW1OT4c2Hw51eykuof8AnjXQuGMKv+XhouHMInc6TwfD5NhH/ra6WH/U/hVabWND1jy77SoP3f8Az286pK6fZexPtcLpQsHke9FV/P8Aajz/AGo/wAH+qq5DNVOpIO9VqBY8/wBqKPI96kqalI19qEHerH+qqv5/tUnn+1FMyJKks/3PSq8M372pIO9Z1NwNDz/apIZqz5pvNo8/2pVDX2poVJB3rP8AP9quQd6KdP8AeiCaaiGb97R5/tUf/LWt7GhsWcw8qtSzvK5+DvWhDN5VV7I29oak/aqc01Sef7VTvP33Ws/ZlFeaapIZqr0VlSOguef7VHR5HvR5/tWn72qZkn/LKub8bXnk6Ncc/wDLGty8m71yfxUvILPwlcT1Hsyab0PnPx5qfbH415/eTHza6DxtqXpXDzXgnlrtp+yt+8POe5c87yZaPtn73pVPzv3tR+d5VH8L+GI0Ptnv+lR+f7VT8/2o+2e/6UzX/GV9SmPm8UQzVl6lef6VUlneDyqDI2PP9qj8/wBqr+f7VH5/tV06ht7Rkk15P5vFU7ybvUd5eVXvJu9Mkk8/2qvNNR53m1Xmz/21rhqUjQjn7VXmm8qpPP8Aaqd5N51c/tGATTVnzTVJeTd6z5pq0pGZJez/APLCsu8m71JNNWXNeVpV/e/ux/wiOftVO8m71JNNVO8vKzpaCI5pqz7ybyaLy8qneTd66aRnU3MPxtNPNF/o/wDrKx4Zp+lXPEl550tZ/n+1L2X772hz+0RoQzfuquaZ/wAhSP61l+f7VoaDN/p8dOpV9kB6Z4PhHmx16Jo8P+rya4PwfCfKjrvNNhHlc1zfxjupGpVf9z5VSTfuYsVX/wCWfn1iURz9qPKm9KPO8qrEM1VrVpajpFOaGo5oasTeRVeabzaVKnSHVKc0NE0P7n/X1JVf/lrWv1Yk+sO3kf8ALSoNR/1v41PVfUj50VdGH/hG1Qjs/wBzLmCtCDvWZZ/60/Wr8M1PnpHTSI5pqjh8+aXpUlSWdHs/amfs3VJIYasQ/uqj83/SpJ/+Wfnfuar/AGzyZvPrSyNKdP2RqQzVYh/fS+R5/lf9dqz4byrEM1FIKpY/69/wo/5a0UV0jJJpvKqP/W1X4mmqx53kxVw+0/5dmlPYj8/2qOftUc0372iGaj2dl+7MvaklV5rz97ViDvReWcE35U6dT2VIKlMp+T+9jnFalV4tN/5eBUn+qrKpU9qMsfvakqn/AGnP1nqT7Z7/AKUe0Ased+6qnNDP5tWPP9qP9bWwGPe2c8wkrP8A7NnhlrqJofJ/1FU5ofOlxXLUqUqQvZHP/wDLWrkM3lf8sK0Psft+tSf2b53FaKpRM/ZmfCbibkW9XIYa0LOyghiqOaGDrWdTc1I4Ya0IZvKqn/qqk86DyqVQzNSG886pPO82suzq55/tT9oX7Usef7VHP2qv5372pKkgIf3VSUVH5/tQBY8/2o87zaj87zaP+WtbUzlqljz/AGqOiii37wkj1H/VfhX5h/8ABXrTfJ+Kunz5/wCWPl/9/a/Ty8/1Q+lfm3/wWGh/4rzSL7/nr5X/ALSrKkvZYsjEnwnNN+9qOGai8qP/AJa169/anjXP00/4Jyf8E5/2XvFv7Ldn8ffj/oVt4hn16Oe8DX9zNbQaRawSyxlcxyqHz5ZkZ2AwCAAApLe3Tfsb/wDBLfQdNGqXngPwRb2rQpIJ7nxA+wxj7rZafGPQ1ifskxSz/wDBH2KGGNnd/h34gVEUZLEve4AHc15DoOkfBX4i/D7SvG/xTsIb2Kx8N2vkI8cpu4rr/rkFr9VxmMweQ5dg40sHTqOpTUm3G7vZN62b1bP8/uFOF+J/GDjfiarj+J8Xg4YTGVKUIUqrjDkU5xilHnjFcsYJaLXd3bbft9v8F/8AgkzFbG7tvD3w5kh/ikW/EqfnvIqC40D/AIJCaP8A8fWkfCxMf89IY3/nmsrVfGvhm7sfDVtpngq3u9MOoxSqII5jKo9T8vFUvBfiOPxT4wj1rxp4AvJptL1+6srOcaC8Ylt/3XlN9zp15rlXEmJt+6y+n/4D/wAA+1n9HnLl/vHGmNfriEvzkzoJtY/4JC6RK0Uuh/DJXT7wHh9XI/KI1auvFv8AwSc0TS7fX5/Dfw4jtrnd9mnTwgG8zZ1wFgJOKZqPh/SvEvxAkh8efCuFdHksI2L6dock7/bRjJyG6fux+dc54R8Ga/bazpn2v4W3DyiS5S9lbRLgLg+bg52dDkfnVLibMn8OAp/+AM46v0e+Co/x+MMW/XF0l+aZ7n8Ofgv+xv8AEzwLa+Mvh78FfBV9oeqoWtbhfCcMYlVHZD8skSsMMrDkDpWB4u+HX/BPHwdazah4s+Gfw5tIoTiaR/Dtudn12xmvWPhppCaF4IsNLj3bY1cgPEUK7pGbbtPIxnH4V8u6d4H+LvhOXxZqekeBdRkudS8R2wtkuPDtxMGtWPzyghO3evpM5zDGYLA4WrQwkJyqRvJcjdnaLsrbat7n4N4XcDZFxbxfxBlua8R4jD0sFW5KM1iYQdWPtKsOZud1J8tODvCy17NGrP4h/wCCQ10f9MsPhYhb/nto0cR/WMYqutn/AMEcNV+ZH+Efz+k8Ef8AUYrZt/hZcWnh/WbnSfgtcX99fajdTSHUdMeFsS+XyoKc9D09KpeGfgV4h0T4X6dfyeC3Ot6ZfwzWUEOhyW0saH/WDdEmc18t/rBm0viyym/+4b/4J+3x8EuCIfweOsVH/ucp/pJDdX+C3/BJK0l+yazpXw3s3P8Ayyl1kW5/LzVq54M/Y2/4JdfFm5uNP+HXgjwfrk0EXmXMWh+JJpWiQnG4iK4yoycZ4rl/ib8PfjDb6f4t8SQ/Ck6teX2to2kQL4Ze5YWT/eGNnUdxX0F8DvBy+Gzc3LeC7fSi8CJCyWIhkaPOdrYUdCOlehlGYVMyzOnhsRlVOEJXvLk2sm+sfkfEeJXBFHgXgjF51lPHOKr1qKjyUli7uTlOMPsVebRSvouh+On7ZPwS0v8AZx/aQ8S/CHw5rcl/YaZcxvaTTRbHSOWJJljb5myUDhd2fm25wM4HmHn+1e1f8FJdY/tv9tz4hXW7Ii1lbcf9soI4/wCa14jB3r82zSFLC5nXhSVoqckl2Sbsf3b4f4vMc04DyrFZhNzr1MNQlUk7JynKlFybskrttvRFjz/av1V/ZT/4Jtfsm+H/ANnbw54x+MXhWy8RarrOmW2o3mr6jcTW6RfaURo7dUWXaAu9UDdXPPGQo/Kav258PaPqmvfsT+DdL0Wwlubl/Cnh1khhXcxC/ZGY49lUk+wr6zgynhsTLF16tFVJU6d4pq6vq9u7sltfsfz19KTO88wVPh7KsFmNTA0sZi1TrVKc/ZyUG4Ru5pxajFTcmnJRdk5bK2Dov7Jf/BOiZ5YNE+Fvgm4NvL5UyxTebtf+4fnPPtWnZfs8fsDW8Mk1t8GvAnl24O+R9EiIX15ZefwrxzwT+zZ8ZNag1XTLnwpc6ao+I66uJL+NohPap08rbnGfeu/g/Zw1rxR4bfwv40+F9lqNkddvppTf3LJLHDz5c1vjO2WTCZzjGOa9iOcZm/4eVwX/AHDf+SPhq3hJ4ep/7d4h13646kv/AEqcjrPh18Lv2FfiRAX+G3wp8B6lFJCzs9n4ag2lAQpyTGB1YcHnmvy3/b++DPg39n79q7xJ8M/h8twmlRfZ7q0guXDGDz4ElaNSAPkVnIXPIUAEkjJ/Sj9hb4A+PPghpFxp3jDw6dPjE18LZTcRyExyTRMhOxjjIVvyr4D/AOCt0f8AxnR4nb+/p2mj/wAk4qjiejPG8M0q9ehGlV9payjy6Wl311sg8C44Ph76QeYZDkmc1MwwH1L2nPKtGtGVRTo9Ye5eHPJK2qu0z5tmm/e1H5s3rUf72pIf3VflZ/eRY82b1ommqOib97RYCSDvRNN5P+oqnUnn+1KlUAsfbPf9KkmvKr1HWtNmXtS5BeT+VUnn+1U/P9qj82b1rOYy59s9/wBKPNm9ar+f7UQzVoti/ZGhBeTw9KPOn82s/wA2b1qSGagz9mXPO82pPP8Aaqfn+1Hn+1LQZc+2e/6VF9suPQfnUHn+1FYGhP8AbLj0H51LDeVTog71p7IXtDQ8/wBqPP8Aaq9SeR70c5qWIbyj7Z+96VT/AHtSeR71ehmXIbzyaP7T/wBmqdFZ/wAIC55083WvoD/gmz9om/aCjnx/qtNlr57/AOWVfSH/AATHh8748XH/AGDaPaIy9kfrp4Dh/wCJDHj+VdJB3rE8Ef8AICj+tbf/ACyoqbHTT2I5+1Rz9qkn7VH/AKquQ0JKP3tR1H53kf6+q9pV9qZmhB3o87/WVX8/2o8791WntTQkg71J+9qv5/tViH97Wf8AFCkSQ/678auVTh/1341cqgCaaCHqaIZqPJgm/wBfUcP7qsaX70PalypPtnv+lU/P9qIO9aVNDQuef7VYh/e1XqSGas/rBr7MuWcParEM1V4Zqk/5ZVrSEH2y49B+dRzTedFmiftUfn+1ZezNCvNN+9rQs5vOqnVizrOpT9kdBYqOo8/9N/0qT/llWlM5yOftXD/Ga8nh8LyQf8s/+W1dxP2rzf45Xnk6N5H/AD1o9rRNvZnzH48vIPtUlcfNNW54wvPOv5J65uaatVTOKpuWPP8Aaq/n+1QfbLf0P51F9s9/0q6mxkXPtnv+lRzXlU5ryq/n+1Z+1AZN/wAfdT+d5VZcN5591WhB3qgW5c+2e/6UTTVXqvNNWP8ACO32hJNNUc01RzTVTmmrS9WkZfuST7Z7/pVeaaq800/m1H5s3rR9YHSJJpqp+f7VFN/rvxqvN/rvxo0/5diLN5N3rPvJu9F5N3qneTd6zqYmxt7MjvJu9Z8/ai8m71TmmrNYilUre+c3s2Rz9qy7ybvVia8rPvJu9dNSlS/iGXsyOaaqc01STSzwmqc01C/hGnszm9e1L/T/ACaz/O/e0axL51/Vfzv3taLY4zQhmn6VseG4pvt8fFc/DeeTXUeD/wB9dRisqv8ABNKR654Pm8m1jFdpZzDyq5PwfZ/6uuws4f8ARa5l7U9BbFyiX9zF59R1HP8A63yM1r/CKqFe8mPm0QzUT9qjrKpiBUhJv9d+NMn7VJ5/tUc03nRZqPaG5T/5a0UT9qjmmrop1PZGFU+sIZqkmh/dUQw1LN/qfwrTDYatRNf4plVYg71HViDvWlOxsRwd6sf9e/4VX/5a1JDNXPUqgH72j/W0T9qIO9a0hVSx/qqkhm8iWo/P9qkh8itRlyGbzakhm/e/v6r/ALiGpIO9KpUq7CpFiaH91Vf97ViG886X99Ufmw+taU6ftTUjmh/dVHD59Sed+6qODvXFUAk4gH/TSjzZvWpPP9qPP9q0p0zKqSQzVJ+58qo6PP8Aaj2fsjSkZ95+561HDNVys+aGeHqKKRgaEM1Sef7VTg71YhhqTb2Row/6n8KreT+9o8/2qTz/AGrnKK803lVJDNUc/ajzYfWnSqL/AJeGZY86Cb9xBUdV4O9WIO9IzLHk+bUfk+VUcHerHnebVfxTQIO9E/aij/VVlUqeyMwh/wBd+NWPP9qjh/1340Tf678a0pfwTnCa8ogmnml4o8n91RxDNT9mBcg71Yqn5/tUkM1XUqlezJKJ+1FRz9qhVKxsF5/qh9K/O/8A4LJQ+dL4fn/66/8Ao2v0QvP9UPpXB+JP2G/hJ+2lqdxY/GKC5+z6XD9m037JN5cv73za0nU1ODE7H4X3kPaqdfup/wAOAf2H5v8AX2OrS/8Ab5LF/wC1aP8AiH7/AGCvN/5BWv8A/g4l/wDjtdyqVv8An2eRUpnxR+wD/wAFbfhr+z38ArT4JfGnwhqsn9gSumh3ug2iSefbySPKwm8yVcOruQCowVxnBBLe0y/8F1f2T0H7vwL43f6afbD+dxXt3/EPT+wj/wA+PiD/AMHMv/x2pP8AiHr/AGCv+fHX/wDwcS//AB2vscDxxn2EwsKEJLlikleOtlt9x/OWe/RT8I+Js9xObYujVVWvN1JqNVxjzSd5NKztzO8nru3ay0PA5v8AgvH+y+n+p+GPjd/ra2g/9r1Tn/4L2fs8IM2/wc8ZP/vG1X/2qa+hf+Iej9hL/nh4g/8ABxLUX/EPH+wj/wA8fEH/AIOJf/jtbVfEHiZ/w0v/AAFHnw+h94KR3w1V+taf6NHzlN/wX4+CK/8AHv8AAzxO/wDv31sv8iapTf8ABf74bLn7P+zlrj+m/XYV/lGa+lv+IdH9hH/nt4p/7Y6xR/xDpfsH/wDP74u/8HFYPjzi7uv/AAGP+R0Q+iR4Hw3wE361636TR8xt/wAF/wDwSCyr+zVqZIxsz4mjGfr+44/WkT/gv94Qf/m2bUB/3NMf/wAj19O/8Q6P7CPm5+3eLf8AwcVch/4Ny/8Agn5/zw8Wy/8Acerl/wCIhcX/AMs//AaX+Z0r6JvgUv8AmWS/8KMR/wDLT5X/AOH/AJ4O/wCjadQ/8KhP/kenxf8ABffwbL0/Ztv/APwqE/8Akevq3/iHR/4J+f8APv4t/wDB9R/xDr/sB4/0f/hLY/8AuPVvT454yt78v/JYf/ImP/EqvgP/ANCt/wDhRiP/AJafLH/D+nwp/wBG16h/4VEf/wAj1W1n/gvLpkmk3Ueg/s4XMV80Di0lufEqtGkhB2syiAFgDgkAjOMZHWvrj/iHS/YP/wCf3xd/4OKP+Icz9hH/AKCvjL/wcxf/ABquN+IPF7Vrz/8AAKP+Z0w+ij4EQmpLK27a618Q1817U/Ebx94/8SfEzxtqvxC8YXgudV1q/lvNQnWMIHlkYsxCjhRk8AcCsiv3M/4hzP2Ef+g342/8HEX/AMao/wCIcv8AYR/6CvjL/ttrEX/xqvk5ZnicRJylh6jb3+D/AOTP6Jw9ChhKEKFGKjCCUYpKySSskktEktEj8M/O/wCm9fcv7O3/AAWX8U/CX4PaT8NPGvwuj8S3mjQLZ2OqR6qtputkULDE6CFhuRRt35ywAJGck/cf/ENz+wj5uP8AhI/G3/g4i/8AjVXIf+Dbr9hHr/wkfjb/AMHEX/xqvUyviDN8rqurg6VSMmrPSm016OTR8Xxz4dcGeJGX08FxFhFXp05c8felFxlaztKEoySa3V7PS60Vvjq8/wCC8OogYsv2boA392fxKw/lb1l3n/Bdz4jOx+wfs96LGO3na3K/8kFfbP8AxDc/sFeb+/13xtL/ANx6L/41Vz/iHA/4J9/89fG3/hR17j454vl0qf8AgNL/ADPzSl9GXwHo7ZNF+tWu/wA6p8DXX/BdT46yOwsfg34TjHOwSPdP+olWvkH4pfFXxl8ZPH2o/Ebx/rU2oarqlwZbm4mcnHZUXJO1FUBVXoFUAdK/bf8A4hxf+Cfn/U5S/wDceo/4hv8A/gnP/wBAPx3/ANsderyMz4mz7M4qNaFWSWyfJa/ykfecHeGXh/wBiKmI4fy+GHqVEoylHmcnFO9rycmlfVpNXaV72R+EfnW//Pc0ebD61+8P/EN9/wAE4P8AoBeO/wDwcS0//iG+/wCCb/8A0Lnjr/wcV5lSpmX/AED/APpH/wAmff8AtD8G/Ot/+e5qTz/av3o/4h0f+Cc//QqeNv8Awey1JD/wbsf8E4Mf8iP4yl/6669LR9YzF/8AMPU/8kNPan4J0V/QBD/wb0/8E0sfv/hX4kk/67axL/8AHak/4h6f+CaR/wBf8K/FMv8A3HpaOXGf8+g9qfz/ANFf0G/8Q+n/AATD/wCiL+IP/BxL/wDHatw/8EDf+CYkH/NAdS/8HF1/8dp8uN/59juj+efz/ajz/av6JP8Ahw1/wTE/6N81L/wc3X/x2j/hwz/wTE7fs9Sf9vesXX/x2tfZYz/n2HNHufzt/bPf9Kk8/wBq/o0/4cnf8E2f+jc7H/wMl/8AjtH/AA5V/wCCbP8A0bZpMv8A12mllrX2GI+wjH62u5/OP9s9/wBKPP8Aav6SP+HPX/BNeH/m1Hw/L/12pP8Ah0B/wTW/6NL8Lf8AfmuxYLF/8+zL61E/m4+2e/6Uef7V/Sh/w6K/4Jsf9Gk+Eqkg/wCCS/8AwTgh/wBR+yh4Jj/67Q+ZXPUwON/59h9bj3P5q/P9qPP9q/pch/4Ja/8ABNmH/X/sveAP/BbFViH/AIJv/wDBNmzH7n9mXwB/4BxVl9RzJfBTOilXj3P5nxqUEJ/fz0Q6lYQy/v56/pk/4d+/8E54f+bbPhv/AOQqP+GDv+Cc8Mv/ACQj4b/9+bWsvYZx/wAu6X/k/wD9oa/Wo9z+aeHWdK6+fR/aeh/8/B/Sv6aB+xn/AME54f8AmgPw3/782tA/ZL/4J62fE/wW+G8f/bna0/q+cf8APr/yf/7Qy+tx7n8y/wDaeh/8/B/SpP7T0n/n5r+mz/hmj/gnb/0Rz4b/APgHa0Q/s6/8E9Yv9f8ADn4Zx/8AXKzta0+o5x9jD/1/4AH1+Hc/mT87zakg71/Tx/wqz9gPTf8AmU/Av/fmKl/4Qn/gnb/0LfgT/wAA4q0ngM4f8SkFPGQ7n8yUMM8w/cWMsn/XKvpz/glTps83x4vBfWMkX/Et/wCWtfuh/wAId/wTu/6AfgT/AMA4q5P4neCf2QrzRv8Ai1eh+G/tEU377+ybP/0bWdTBYykr1KZqq8Hszj/Cv7rRo4BWp5/tVPTIbez/AHFWLybya5Kh106hJNNB5VZc00/m1Ym8+ao/sft+tStzQkhmqOb97Unke9Fbz2Mw8j3qSiiub2dYKexHViDvUdEHes/aGhoWdaMP+p/Cs6zq5/yyrpp/vQ9qFRz9qk8/2oo/hUgIJv8AU/hT7Pz/ACv39FSQd65qntjQkqxDDVOH91VyGaqRmWIO9WPNm9az/P8AarEM1dAe1JJ+1R+f7VHN+9qv5HvXL7WsaEkM372tGH/U/hWND/rvxrRh/wBT+FFPU6qQ/wA7zaKr+f7VJDN5tFMxI5pq8z/aEvPJsI/+uNemTQ/uq8T/AGkNYvof9B8//VQ1zVKf746v+XZ81+MJv9Krn5pq0PEk3+lSVhzTebXo0tjzfaknn+1V/P8Aaq/n+1R/bPf9KP4xnT2LkM1Rz9qPP9qkp/VzoMuGb97VyGas+Gbza0LOYeVWfs/ZGZY8/wBqj8/2qvNNUc15V/xToJJpqz5pv3tHn+1V5pqj90wLE01V/P8Aao/O/wCm9R+f7Vl7P2VYAmmqnNN+9ovJu9U5ryetPaGZHeXlZ801WJ+1U5+1cX8WsaEc01U7ybvViftWXeTd66bKkR/FI5vIrPmmqxNN+6qnNN5EVar2pkE3+p/CsPWf9VJ9a1Jpqw9Ym/dSUVP4qA5e8m/0qoof9d+NQSzeddVYg710ezRzlyuw8E+ebq3/AHH7v/ntXHwd67z4e2X+r/wrOpTVzSnseweG/wBzFmunh/49K5jw3MYYs11FneedFisqlSlSOgKr+VN6VYqOpNadS5X8/wBqKkmhqPzvKo0EV5vPo/5ZVJP2qOftXJ9Xqm3tKRHNNVeaajz/AGqObyJpa6jOmfYE0NV7w3EMX/TOia8/dVTvLyaaLyK7aj0OimU/tk/nZqxDNWf53ky1cs5u9cv8T+IbFirEHemQ/wCp/Ckhm8quf2XsmaEk/aiGao5pqkg711GZJB3qxVPzYfWrEP8ArvxrH2gFiijzv3VR+f7VRfsixef8eo+lV4f9d+NSf+iqPI96te2qi9qWPKh9KIO9U6k8/wBq1/clFiib97Uf7+b/AFFWKzNAqv5s3rUnnfuqP3ENL2RlUqEdSeRB/wAt6j8/2opkB53ky1Ys5oOlZ8/+t8/FSQzVjTqe1A0J+1V6PP8AapKKhoFR0ef7UT9q4qd7gHn+1SVXhmq5DN+6rvOcPP8Aajz/AGqPypvSjyPeuNGhYhmqT/W1HUfnfuq6vZ3MyxUkHeq9nN51Sed5VZ+zAkmm8iKq8N551STeRNR5MEPSj+KBJRDNRDN50vkVJN+5lzR9XpGPtA8/2qSGb91Veo/+WnkVr0MS5NNXnfxI/aE8cfs96zHqvgCeKOS6h/febXeTQz9a8P8A2tbPybW3vv8AnrXTklKli8yp06h5ueYmthMoqVKZYvP+Con7Rk3/AC3sf+/NV/8Ah5v+0Z/z+2P/AH4r5zqPz/av6BXDmU2/hn86vjTiG/8AFPpT/h6V+0ZB1nsv+/NM/wCHqH7UP/Qbsf8AwDq7+w78IfAWu+Cr/wAfeItBt9SvZdQezSO/t0lihjRUbKqynDEscn0AAxzn0i51r9mG0tHv7rw5oUcKHDyv4ZIAP18mto8K5O4p1IJH4nnH0mOIsDxBisqy/Lq+Llh2ozlTTaTfkoyaV7pN2vZ2PJYf+CpX7UMP/Mcsf/AOpP8Ah6j+1DN01yx/8A67+X4q/sXRN++XwwD7+G//ALTTP+FvfsT/AN7wv/4TZ/8AjNaf6qZB/wA+1955v/Ey/iF/0IcZ/wCAT/8AlZxf/Dz79qL/AKGOx/8AAOq//D0X9qn/AKDlj/4B16R/wsX9kHH/AB7+G8f9i7/9prE/ak+Fnwt1f4Gz/EPw1oFnZy2MEN3p91plokAnjldFw4CgspVsgHBBx7gj4aylptxTsaYL6TnEks3wuCzHLcRhliJqnGVSLS5pNJaOMb6tXs9E7nHf8PPv2ov+hjsf/AOiH/gp9+1D/wBDHY/+AdfOfn+1Hn+1Zf2Bkf8A0Dn7R/rrxF/z8PpD/h5j+1F/0Nlj/wCAdH/DzH9qL/obLH/wDr5z8/2ohm/e0f6uZR/y7pi/104h/wCfh9CD/go1+1D0/wCE/ki/64w1HD/wUa/aohv457H4myf9tbOKWvC4ZqPP9qP9X8u/59D/ANdeIv8An4fRH/Dy79rb/op3/lNi/wDjVR/8PLv2tv8Aop3/AJTYv/jVfP8A5/tR5/tXQuH8tq/8uw/114i/5+H0BN/wUm/a2h4/4Wp/5TYqYP8Agon+1vfHavxTySeANMir5+8/2r2X9h7Q9E8R/FK5utWsVuJNL01riy3k/uZRIihsZ5IDHGfXPUA1quG8pb9+meDxP4p55wxkGIzSq3NUouXKtG3sld7atXetlrZ7G037e/7bPm/uvGt03+7oqH/2jU0f7dX7ck3+q8ca03+7ooP/ALQr3bWPifoWia1caHd28okt2RS5ZFV2dQwAyeuCK57SP2mPA2ttdW1jbzC7tBK01jLNEsoWP7xxuNRLI+H4uzgrn5HgvHTxtzTB08Vg8hnKnUipRl7TRxaumvdWjWp5dcftk/t73hxF4w8Qt/u6NL/8TTk/au/4KFSDKeJvEh+miS//ABNdRf8A7cfgKx1CTTG8H6uZY/vAmIf+zV1A/aEsG0pdSHhxkfyPtElrNfIsscP/AD0IweKiXD/Da+KkvxNY+L/0g6n8LJLetWP+aPNrf9pr/gofP/zNviuL/rnpEg/mKyfEP7an7cXhO5Sz8T/FjxFp8sib447uBoyy9MjPUV6xq37Sui2F5p9vZaOl7Hf3v2VZ7e7YiOT0P7umftUeCdM8a/DiGS6EcMltfRut4bcO8SEMGA7kHIyPYelXHIsglFyUE0iML43+L+A4mwGWcQYJYeOLm4QkpqTvor6SlonKN7230PGv+Hgf7Xn/AEXDVqjn/bq/aol/5rFq/wD3+rl/FXgPwPNoNxqvgie++0WH/IShu/8AnlXH/wCqq8PlGUP+HTP3j/WTiij/ABMQesf8NsftUf8ARadb/wC2N5R/w2x+1R/0WnX/APtjeV5P5/tUnn+1dH9kZb/z7Ob/AFt4i/6CD0j/AIa6/aS/6LV4l/8ABlLR/wANa/tGf9Fq8Sf+DOWvN/I96PKm9KFk+W3/AIZzf60Z7V/iYk9Am/ai/aFvP9f8afEn/gylqL/ho/48f9Fh8Sf+DmWuEh8irFd31TBf8+ji/wBYM4/6CTrP+F/fGqb/AI+Pip4kk/7jEtH/AAvf4uf9FN1v/wAGUtcXNNUfn+1L6phv+fSF/rBnH/QQdh/wuz4t/wDLf4ja3/4MpaP+FzfFT/lv8Rtb/wDBlLXH0ef7VH1HB/8APsP9YM4/6CDpIfiF44m/1/jHUv8AwMqT/hPPHHT/AISq+i/7bVz8M1STTUfUKH/Pof8AbGZ/8vKpsf8ACwfGX/Q1Xv8A3+o/4WB4x/6GnUv/AAMrm5pqIO9aVMDgv+fZz085zOl/y9Og/wCEw8Rzf6/xHfSf9vlSQ+Ktc/6Dlz/3+rn/AD/arFZrL8H2NP7ZzOr/AMvTU/4STVP+gpc/9/qT+39W/wCf65/7/VmeT+9qTz/ar+r4fsZ/2pmf/P00Ide1XH/H9L/3+qf+3tV/5/5f+/1ZEM1Sef7VH1Vdg/tDG/8AP00P7Zvv+e0v/f8AqP8Atm+/57S/9/6pwzVJ5/tS+qUA/tDG/wDP0ufbbj/n4l/7/V6Z+y7eX3/CZSeRPJXk9eufsow+d4ouJ/8AnrDXicQYKh/ZFR+zPoODsZja3EVOm6p9caP/AMesc9XPP9qx7P7RDFHmpPtnv+lfzu9z+qFsalR/63/lhVP7Zceg/OrEN5PUe1IDzvKo+2e/6VHP2qPyPeueoBJ5/tUnn+1V6j86fzahVdS6exoef7UQd6jg71JXYiC5DNUn2z3/AEqnDNUkHeoNC59s9/0qxD+9rPg71J9s9/0qfagXLz9z1qOG8qOa886q/n+1HtTT2poQzVYhmrL8/wBqkhmn6U/amZqef7VH/qqjg70TTfuqZoRzajPCasC886Ks+H97Vj/VVNI0LEM1XIZv3VY9aEM1FIftGSTfvaj/ANV/y3ommqv5372nUpiLk01fPf7Tt5/p9xB/y0ihr3yvmf8Aai1LzvFF5bisqhofP/iS8nmv5P8AnnWXNeeTUmsTD7VJkVn+f7UUzj9miTzvNoqPz/aqc01dP8IzNDz/AGovLz/RZKpwzVHrF55NhJTuaFOzvPJirUhvP3Vc/ZzDyq0LObvXL7X98aGhNNVeaao5pqpzXldFSwfvix/qqjmvKr+f7VHNNWf/AF7AkmvKr/bJ/OzUc01V5pqzqVQLE03m1TmmqOabyqr+d5tFSp7U6AmmqnNNRNNVea8/e1ygF5N3rLmmqxNNVO8m71rU/e/xAK8/aqc01E01U7ybvWtKmc9SoRzXlY/iSWb7BJzWhNNXP+MLyf7L+4/1ctdNP96HtTCh/wBd+NWYZqy4f3VaFnMPKrWpTdU4valyzm/0qvSPhvNiWOevM9Nm86/jr1DwHD+6jriqU/ZHTT2PVNHrcs5u9c/o486KtTz/AGrzPaVrno0jU/tH97R5/tWf5/tUnn+1dNL96R/CNDz/AGrPvJj5tSTTVXml8+14rSpTrL94UH2z3/So7ybvVeDvUk01a0qvtaX7wCOq8/arE/ao5+1Zez/emZ9Yed+9qOaHzaj4mmqxD+6rqq061U2pVDLmh/0ryKsWcPaq95N/pVXLObvXNUqWpfvDuNCz/wBV5FHke9Rw3lE01aKpoBHUk037qq8M1WIYazp1KoEf/LPz6uWf/HqfpVeb91UlnMPKrSoT7Usf62rEPkVXhmg/efv6KKVMk0P3VV5pqj87yYqp+d5tdHtQLH2z3/So4ZvOlzVeftVyzhHlVz+19qXSLHn+1V/tk/2ryKkonh/6YV1L+CQWPO/dVH5/tVeHz6khm82pqbGZY4mmo8j3qP8A1VHn+1SaEnlQ+lV5oasebD61XmmrMVUIbyrEM3m1n+TPDLUlnN3qagyxP2o82b1qOpPP9qrQA8/2qxZ1X/5a1YhmqfZmZcqvN/rvxqSab91VOH/XfjWWlKsc5J/y1qSftRP2qvNNR+9AsWcPapJ+1Z8N5WhzNDTp0wCpKIZqKyxH8XQzCj7Z7/pUc01V/O/e0iKm5chm82pIO9V4Zqkhmqv4Rp/FLE01eJ/tgf8AIr2c+P8AltLXtE/avG/2rof+KD8//nl5te1k/wDyOKZ4nEH/ACJ6p8x1HRP2qPz/AGr+lI0/3CP5Nrfxz7P/AGBv+SIXH/Yfn/8ARcNY899pusWU2l6fp9zLfy6ci3c10rYMeenSvNv2Uv2r9C+DXh698GeMtIuJbCS4a6tLiwiDSCVgqsrhmA24UEEcg565GPVj/wAFBvg8VLR+Hteb0/0eEf8AtSt6tCniaUYydrH845TjvEDw947zjG5fk8sXTxc4SUlNRSUeZrW0rt89mtLW6mVq3hXTdHOlTab4UaVI7z95LHC7S3EXvgcUzRvhfe6xquoapqvg1Nl3N5nky2j/AOFXpf8Agop8J0+54R10/wC8sI/9qVE//BRP4flytv4B1hvQtNCv/sxrzf7Lw/8Az+Z9qvF/xaqfweF0vWqv+AVLb4S65Y6TrKab4euka506SJF+zvliew4rsvjXZXNj+xnPp17bvDND4c0+OaKRcMjBoAVI7EEYrj5P+CjHhnJ+zfC/UH9N2oxj/wBlNcF8d/20dS+L3gxvBfh3wu2k2ly4OovLcrM06KQyoPkGwbgCSDk4x0zn0KNChhYS5ZXuj47iF+KXiLxBlE8xyeGFpYWtGo5KpF3ScW7rmb2jZWW71stTxTz/AGqSGaq8Henw/wCu/Gj2n7k/e1uaNRzTeVUnM0NRw/uqVzSoSVJVfz/apKr2pn7JkkM1Hn+1V4fI6faI6k+22X/PeL86m9Y19kySDvXu37BKOvxN1ct0Ohvj/v8AxV4T9sgh/wBfPHHXV/Bb41Xnwe8ap4n0rbPFJEYb60cgefCSCVDEHacgEEenoSK6KclGKbPiuPslx3EHCmNy/BpOrUg1FN2Taadr7K9rK+l92lqfVXi2x1iP4g32oR6XqyxLeQSpLZ6QblLmMQIpXPQYIIrjB8L9afwte3GieHNW0/U76XNxHNpfmCQe5rOk/wCCh2mg/u/hy/8AwPUsf+06im/4KGhP9X8NE/HVD/8AG646mAwlSo5uT1dz4PIOKfHXIMhwuV0MkouNCnCmnKpG7UIqKbtWSu7a2OYtfgd8Y5NNm068+E89xK08ki6hPJGJTnp/FXo9j8KfGWq2L3mqeHL+1vbzR4dOuoTBBJGnl9JR8/NctJ/wUP1Mj918M7Yf72pMf/ZKhl/4KG6+n3vAWnRf7945/oKc8Ngqm8n/AF8johxh9ICL93KsMvWd/wAq51Wq/BX4i3EXh7Sbexur1dM1JpWurowwJFEv+rwquScV6L8c9Vfw98KJtYuGKrayWzXI9U8xQ4/ImvCW/wCCh3ihgdnhLRwew3yt/wCzCuU+Kn7W3ir4uaJH4d1IWun2Qk3zw2BkH2gjoH3McqDyB64PYY1hQw1KnKMH8SseNi8s8W+NOKMpxmeYahRp4Osql6bd7Xi5K3PO9+RJWtq9dDd+OXx+8HePNFt9K8HaV9m8r93efufK82vI5ryjyZ5v9RY3Mv8A2xlqOHR/Ec3/AB7+FdSl/wCuNnUYaMML1P6HzHFTxWyD7Z7/AKVJ5/tVj/hCfH83/NOfEH/gnlq5D8N/ipN/qPhX4kl/65aPLXV7SHc86nCdtinDNUk3+p/CtSH4S/GKb/jx+DvimX/uDy1cP7Pf7Rk3+o+B/ij/AME8tZ/X8F/z9NPYYj/l3SOXqSftXaQ/sr/tQzf80I8Sf9trOrkP7HP7Xs3/ADQHxB/35rm/tjKP+fh0/wBl5nV/h0jz+ivWLP8AYb/a9vP+aH6lF/22iq5Z/sB/te3h/wCSSeX/ANdtSip/2xlH/Pw3/wBXs4/59HjdFe4f8O2f2vrz/mQLGL/rrrEVXIf+CY/7W3/Lfwdpv/g4qf7cyj/n4H+r2cf8+jwOrFfQn/Drz9qGaL9/Y+H4v+4xWhZ/8Enf2k5j/wAhzQIv+3yX/wCNU/7bylf8xA/9Xs4/59HzP5HvR5HvX1BD/wAEkf2hZpf3/j/QIv8Av7Wp/wAOefjh/wAt/ipon/gH/wDba5/9ZMk/5eVDo/1Xzz/oHPkvyPerH+qr64h/4JC/E7P7/wCJum/9srP/AO21c/4c5+Mf+X74xW3/AILf/ttH+uGRL/l4dP8Aqfni/h0z4/qOftX2hD/wRtP/AC/fGK5P/XKzrQh/4Iz6V/y2+MWpf9sYa5f9b8k/5+B/qdxF/wA+z4fqSDvX3JD/AMEbvDcP+u+JurS1qab/AMEc/hz/AMxbx/q//f6n/rZlX/PwpcFcQv8A5dnwP/qqk8/2r9BIf+COnwP/AOW/jHxJJWhZ/wDBIv4EQ/8AL9rcv/f2ub/WzKjf/UHiL/n2fnfXtH7HP77XrjyP+WVfXFn/AMEnP2eof9fBq0n/AG+f/bay9e/ZL+H/AOzrrNvfeAPMit7qH/TIZf8AnrXiZ3xZl+Ly6pSpn1PC/AubZdmNPE4gIf8AVJ9KP3Pm1X8/2qSvxyofuy0RJ/qqj8797Uc/apP9bXOX7UuQzVJ/yyqnB3qSq9kQFFFFQvZAST9qk8/2qOpIfImroqbGhJDNViDvVf8A1VSQd656oFjz/aq800HSo5vPqn+9rL2hoXPtnv8ApUkM3m1X8j3orT2Rl7Uuef7VYhvKp+f7UQd6oZofbP3XSo5pqp/bPf8ASpIZvNrM0JIf9d+NXKrwd6uzf6n8Kun+6OgSiqfnT+bR5s3rWX/LwPaFyo4Zv3tR+d+6qPzvKrSqBoef7V8n/tOal9s1m8nr6Um1GeE18f8Axs16C81S85/eSzVJoeR3kx82q/n+1R3k3nS4NV5pvKrH+KcZJ5/tR+6rP+2e/wClEN5XRTqmZoef7Vn69qUENh+/qx5/tWP4km860jg/6bUVPamhoQS/8sMVY8791WfZzDyqsTTeVXP7M0JJpqr+f7VH5/tUfn+1dPsgJJpqjmmqOaaq/n+1FT91/DAkmmqPzv3tRzTVH5/tWVQKRYmmg6Vn3lSXk3es+a8rP2hoF5MPKqnNNRNNVOaas/amYTTVn3kx82rk01Zd5N3rT2dYCO8m71nzTVYmvKpzTQdK0VMzK801c/4qvP8ARea3J+1cf4wnMMvNdtPYyqVCvDeQZqxzNDWfF5Hlfv8A0qxDNWtQ5qdM1NHh/wBKr1z4bw/6LHx/y2ryfQf311XtHw9h/wBAt64cRud1Knqdxo89vBF/00rQrPs4T5tXIZq82puegWJ+1RzTUVH5HvQgLFRzXkHleR/y0qT/AJZVHXTU9t7ECOzqSftUcHej91XNT/dE1ST9xNVeftR58H/LCo5u3/tauqm/a0Sj6s8797RN59V4O9Sed+6rRfwvae0MzLvJp/NqxZ3lV7yiDvWS1qnf/wAujUhvPOqSaaq8M37qiaau3SkQXIO9SVThmqxB3rnqfvQJKkqPz/apPP8AaqWhoEHerEP+u/Gq8P7qpPP9q2pVVtUAsTY/7ZVXqxNNB5VRww/vazqU/wB6T/FD/llUln/qj9KjmhohmrP2X7409qXPJ/eedmj/AFtR+f7VJ53m120/3RiRzfv/ANxmqcMP72rE01EP/PfNRVp0gJIO9Hn+1H+tqOuX2lFmgef7UTTfuqjn7VJWlOn7Ix9oV/3tFWPO8mKs+ab97WdQo0If3tSef7VThmqSimc5Y8797VjzoIetZc37qo4tS/5dxR7P2pXtDc+2edzRWXDef8twasef7UfVw9oSfvajvJu9WKr3lUSV4Zp/NrQs5u9U4O9WIO9Yf8ujMseR71JNN5EVV6jn7VnTp0gJIZvOlzRNDPD1FR+T5VHn+1WaFiGarEM1U/8AllR5/tW37s5zUrk/iF8K5/jBFb+B7HVYrGS6m/4+5of9VXQQ3lc/8SPi1ffBnRpPibY6VFfSaX+8+yTTf62umjVpUa3tDOpGljKNSlPqYf8Aw6Xv/wDosMf/AIB1H/w6F86b/ktP/knXL/8AD/KCaL/k3qx9v9MqnL/wXhvs/wCg/AjTYv8At8lr6ilxji0v3eIPE/4g1zazw56hD/wSM0Oz/wCP74m3P/gtqx/w6S8K/wDRU7r/AMFteN6l/wAF8fH/APyw+C2m/wDgZLWXN/wXg+OE3+o+C3h//tteUYjj3MP+ggf/ABBV/wDLvBn0BD/wSF8Dzf6/4qXP/gHVj/hz/wCAPsvkf8LN1L/rtFDXzPN/wXa/aM6wfDLw3/n/ALa1T/4ftftQ+biDwP4bi/7Y1zf664v7GI/9LN14Ir/oD/8AJz6oh/4JC/CuGKSC++Iut3P/AH6q5Z/8EkfgfD/r/FWvy/8AXGaKvkeb/gt5+1tN/qND8LRf9w2q83/Bar9sub/Uf8I/bf8AXLR6f+vOMt/vBr/xA1f9Af8A5OfZEP8AwSX/AGc4f3994j8Uf9/oq0LP/glH+y9N/wAxXxT/AOBlfD//AA+k/bS/5Yaron/gnirPvP8AgsZ+3PedfGOmxf8AXLR4q4nxzW+xjDppeBtX/oDP0Mh/4JU/syj/AF+la3L/ANvlS/8ADrr9l3/oVdb/APBlLX51Tf8ABW79ufUv+am20f8A1x02qf8Aw9E/bgm6fHC5/wDAOj/XKt/0EVDq/wCIGVvsYemfpRD/AMEuv2SYf+aZal/4Mpf/AI7Vyz/4Jp/shWf/ADSS9k/67alL/wDHa/ML/h5n+3P/ANF81f8A8hVmXn/BRr9ueaXj9o3xB/3+pf6/Vf8An5UO1eBFb/n3TP1cs/8AgnL+yFD/AM0Wl/8AAyX/AOO1oWf/AAT8/ZJh/wCaEWX/AG2mr8g7z/goF+2zqX/H/wDtJ+Lf/BlVM/tpftbTf83GeLf/AAcS08Px/wD9PKh0rwIqf8+6Z+ykP7Df7K8PT4LaB/35qT/hi39l7/ohHhuX/rtDX4v/APDXX7UN5/r/ANobxbJ/3Hpa2fhp4m/a4+PHitfBfgT4l+KtU1AwNM0c3iSeNUjXGWZnmAA5A68kgCtKPHFbHVo0sNKrKUnZJatvsktzzs28GsvyDLK2Y5nOhQw9KLlOpUlGEIRWrlKUmlFLq27H7H2f7HP7Mtn/AMe/wI8LRf8AcNq5Z/su/s2aZ/zSPwlbf9dtNir8q1/YW/bt1fH9p+MbiPP/AD28YMcf985qU/8ABNj9rq9cPP8AEzToh6S+I7pv5RV7cKnGVTbD4j5xt+dj8ZxXFf0dcH/E4my9/wCGrGf/AKRzH6rw/BP9mXR/+ZO8CRf9dbOKrE2g/svab/qLH4eRf9dvKr8jT+wD+0RpfxX8PfD/AMYeKBJp+uGQvrthe3EkFv5aNI8bEqMSFFJRTw3PPDY94n/4JY/BGRNsPxE8bRnH3v7ZQ/zjrfBZZxjmftOShy8js+d8utk9O+jWu2p87xb4w/Rx4HeD9tm0MQsTB1ISw9B1UoKbheTVuVuUZpR+L3XdJWv93TTfsvWf/L98N/8Av9a1HD8Tv2V7P/mo3w8tv+4lYV8BXP8AwSY+Ebj/AET4peKk9POuEf8AoKydc/4JL6Cmn3Evhn4wX32pYWNtFd6flHcD5VZllGATxnBxnODWVThPjOmrrCp+lSP6tHn4D6Rv0V8bUjCrnM6d2l72Dr2Xq4qSS8z9H5vj9+yhZ8XHxi8Cxf8AcYtap3n7V/7Hmm/6/wDaM8Cxf9vlrX4O+KNG1nwbr9/4S8RWjwX+m3clteQC7dwkqMVYblJVuQeQSD2rBh/fS/8ALX/v9XxU+LHTk4TptNaNPdM/sXDeD+U4zDwr0MUp05pSjKME4yi1dNNTs01qmtGj96NS/bk/Yf0z/X/tGeCf+2M0UlYf/Dyb9gP/AKOM0SP/AK46bL/8ar8N/I96kg71wf63w/kOteC+Wf8AP/8A8kP24vP+CpX7BVn/AM12il/646PL/wDGqrj/AIK6fsBw/wDNW5Zf+uOjy/8AxqvxXmvP3VRw3nk0v9ZpfYgdq8IMmt78z9oLz/gsx+wVZf8AM8avJ/3B5az5v+C237DH/LDVfEEv/cH/APttfjnDqUFSfbYP+WFZf601/wDn2dK8JMm/nmfrxef8FyP2LYeYIPFFz/1y02L/AOO1nzf8F2f2Sf8Al38HeMpP+4bF/wDHa/JP7Z7/AKVYhmg6Uf601/8An2dS8IuHP+flQ/Vi9/4Lzfs5/wDLj8JPFEv/AF28qOs+8/4L2fCuGL/iVfAjX5f+us0Vfl35/tUf9pZl/f1zf601v5DtXhVw3/f/AK/7cP0svP8Agvx4ch/5truZJP8AptqUVV4f+C/195X/ACbZFL/196lX5xnUoJpased+6pf60Yv7Z1Lwu4d7H6ITf8F8vFU0X+g/s9ab/wCBlZ8//BfL4t2cXkWPwW03/tteV+e/9peTzUd5efbKz/1wzE6V4Z8J/wDPs+9Lz/gu1+0ZN/qPhLon/gZVeb/gu3+1f/y4+B9Ej/z/ANda+D/P9qPP9q5f9aMxOpeGvCa/5hz7U1L/AILhftej/UeHNE/78xVj3n/Bar9tK8l5nsov+3OKvkOaaiGaeHoa5/8AWfNvsVDtXh5wkv8AmHPrD/h73+2lN/zNUX/fmqcv/BWn9tKb/mf5a+a/7Sn8mq/mzetH+s+d/wDQQH+o3CP/AEBn0hef8FUP2y7z/mpssX/bGs+b/gpl+2XMf+Sw3Mf/AFyhrwOiuj/WvO/+fh0f6k8O/wDQOe0Xn/BQ79r28/4/vjTqUtfTH7Dnxs+JvxO8EXF94/8AGNzqXm3n/LWvz/m/1P4V9sf8E34f+LXxz/8APW8lruy3Oswxdb2eIqHwvHOQZTl+U+0w+HPrSG8qxDN+6rP/ANVUnnTw9K+hPwwuTTUQd6p+f7VJDNWVMCxDNViaaqc01R/bP3XSinUMzUhvKl+2W/ofzrKhvKPP9q1NDQm/e1JFN/z3qnDeVJ5/tQBoef7Uef7VT8/2ommoF7M0Ptnv+lV/Ogml/cCq/n+1EP8Arvxrl9rRudJc8qb0ohmg6VXu7yeGKsv+2PJl/f12mZuT9qkhm8mLNV4bwzxVHXFUNC5/rakqv5/tRDeVQFypPP8Aaqf2z3/So/tn73pVez9qBcqTz/aqcM3nS5qSqNCxNNR537qo/P8Aao5pqzNPaMz/ABJn+y7if/nlXxP8ZrzydZk/67V9qeMJvJ8OXk5/5418L/GzUv8Aic3EH/TarqUzM4Oa8PmyVXmm82o5pqj8/wBqv2RxkfnfvaPP9qr+d+9o8/2qPq9I0LnnTzdaz9SPky1YhmqnqU3+nx0qVP8AfGhcs5u9WKp+dP5VH2z3/StcThzMkmmqOa8/dUT9qpz9qwNCxNNUc01V5rzyai+2XHoPzqalMPaIl8/2qPz/AGqPzv3tHn+1FMCPz/as+8mn82rnn+1Upv8AXfjWVSmarcgmmqvUl5eVXmmrWlTpFzI5pqz7ypLy8Pm1nzTfuqzqXqnOR3n77rVOabyqsTTeVWfeTDyq7f3X/LsCO8vB5VcX4kmnmuuldJNNXJ+JJv8AS5IK1OcIO9SQ3nk1Ts5u9SVmKkdZ4Pg86Xz69w8Bw/6Bb5rx/wCG8P8AnFe4eFfuRfSuXE0/3vtDtpHQQd6seR71X8/2qxXF+6qnYHnfvaJryepKrzeRWXs61Ir+KSef7VHP2qOiaatfZv8A5eEkc/aj97UdEHeudbgSf8sqr/v5pfIzUnn+1H7quyph6VQn2p9WTfuqj+2CaLyKP+WVU/O/e1pT/dUfZncWLz/VD6VT87p5FWLybvVOsjQ0Ibz91VyHyKz7OrEHetDnLkMNSef7UVHW1T90BJ5/tUf2zyeaPP8Aaj9xNQqhoXP+WVR0UVlUw/taovaklWIZqr+f7Uf62tf4Qy55/tUf/LWq9SQzV006ntdwLEM1STTVT8/2o8/2rX2lAzJKk+2eTzVfz/ao5pqNANCGao5ryDNY800/m8T1Jn/pv+lc37m/7sC5DN5tSVTh8+rFFT2VUw/fEnn+1H2P2/Wo6kmmrL92blfiGarEM1V/9bRRUORblyb99a/ZzWPNps9akHeo5oaxOrSwad5/k/6RmrlU4YasQd6r2pyFiGGiiGGil++NCSio5pqj8/2rY5y5B3qSqfn+1SQ3nnUv4QB5/tUc/aio5pqz9oaEkHepKr+f7Ued+9pUjMuef7V53+1F/pnwR1iD/pjXeVw/x+/ffC/WIP8AnrZy1kzehbnXqfl353k2vkUQ3lRzf61/rUcM1fOVatqx/RmE/wB2R9HfsEfsfaV+1R4l1bUfGWtTWvh/QUjW8isZgl1PNKH8sIWRlCDYxYnn7oHUkfVif8EqP2UoWMkl54lYg8l9aQfyjFeff8EZP+QD8QP+vzTf/QLms39uXTfjl4h8Xy6j8JLieHS7K7u49dlW5jjhGLhx+8zX7JgY8O5PwXg8wxOAjiJ1pSTcna1pSS6PS0Vp8z/NzjDG+NPiv9J3iPhHJOK6+U4TLaOHlCNGHMmp0qMpXSqU3zOdST5m27WjayR6on/BMb9jS2/17ao//XTxDj+QFLH/AME4v2FrD5rizkOOvneKZB/JxXk99+zp8Lo/hh4XuNf8IaHNeRTWvnzRXJj82L/pr/21p3w48A/C/wAc3/iibw9+z74PNnpfiT7F++ii/wCPD/nrQuJuHY/wsno/ev8A5Wek/o+eNNX+N4m5h/27TnH8sWetj9hv/gn5p/Fxa6cPXzvF8g/9rCptN/Yv/wCCelxP5Gn6XolxIkeSieLZHIX6CfpXk3ww+H3wTP8AwlFjbx+GJdLl1i6trOKWCLzYoov+utVfDHjb4feAfGXhfSpPD+iJo/8Awjcv9sXdnYRmXzf+eXm1l/rZlq+HJKH/AJK//bDP/iWzxPl/E8SM1fpKpH/3aZ7Fq3/BPr9jf4neBb+2+GGnWVtPMrRWmv6Tq0l4LWcAEHHnFXxkZQkZB7ZBr817m0Nhfz2DOGMEzRlgOuDjP6V+tn7Lo8BS+BtR1P4a6gk+k32uyXEMaqQ1u7Qwh4mz/EGB/MV+S2u/8jFqH/X7L/6Ga8jxGwWVTyvLMxo4aFCVaM+ZQSS05Wtkk7Xetr/cj6v6GefcaYXxC424SzjOq+ZUMuq4dUamIm5zvL28ZtOUpSSkoQ9zmcU1dJNybihvPJqxD/qfwqn5HvViH91X5S6tX7B/oGmghvJ6k8/2qv8A8takrP2pd0FSVHUla+0Q7hDDX3J/wSAKtonjxwoybvTgTjnG24/+vXw3X2d/wSL8Z+F9K1Hxd4Dv9Xji1XVDa3On2jg5njiWYSFTjGV3qcZzg5AIBx9z4W16cON8LztK/OtdNXCSS9W9F3eh/I305cNjMf8ARgz2nhqcptfV5NRTbUYYqjKcmlf3YxTlJ7Rim3ZJs91vP2iPH03x91b4VaZo1rJZaZpc11E8S/vZ5FcqIyzHavT0qWX4k/GuW+vLmyspcQJA50q6ls7d4RIX+9IwIP3R2rrZf2dPhxdfEO9+I+owXV1dajaPbXtlcyq1rLG7FiCm3J5PrUr/AAA+Hc1+1xc2k01qyRL/AGXMyPbAR/dwhXP61+vV+HPELE4ycpYpqDk2kqso6X0+G3Q/gPIPFz6FmXcN4WjXyKMsVGjCNSUsJGd6iglN3lN3vK7vY6jV7JrrUNLnC5+zXzSE+mYJk/8AZ68d+OPgzwt4y+I066pothc3UtpHp5nvfEdzYywRlDJm2WEjzJsv/Kum+PX7Ufwq/Z81fw/pXj7Wlim1i+w0aB2e3ttjhrllVWJQPsXHBO4kZ2kVgTftVfsQ+KLxNU1L4g+Gbm43ebFPqGnsHBUY3Ayxg8AYyK+i4wjlOdYOeWvGU6VRTjKSlNJ6R0Vrp66H4z9HOPH/AIa8QYTjepwzjcdgauHrU6U6GHnUg71bOSlySjZNTVnZ320Pm7wv4sfXfhB4x0TRru9sr3wzBFcWt9b+Mb0akR5uPNI3/wCqxxX1F+yD4j8Q+MvhpD4t8R6hPPPfWtsxMty0gzsJJG48Zzzj0rLHxC/YI1nTLrTE8WeAlgvB/pgW5hheT/fYFW/M1pQ/H/8AZN+Cfw3lm8KfEHw8ulaTaKbfS9G1SOeVlGFSOKMOSxPAAH1OACa+Z4Y4fpZBmyx1XMKc6UIyulUv03s9ND9z8d/F2p4u8B1eF8r4NxuGx+Iq0VGUsHGO1RPlUornvJpJaWd7M/NH48auuq/G7xhqKnIm8T3zKR6G4fFcv53lVY8QXza5r19rb5BvLyWcgnkF3Lf1qvP2r+eMXV9viqlT+aTf3u5/sjw/lbyrIcJgmreypU4f+AwUf0Dz/ajz/ao5oaIbOvPPX9kSef7UUeR70T9q0uZ1EFHn+1Hke9EMP72ub2jpGl9CSpIf9T+FHn+1ST9q7U/aoLkfn+1FR1J9sgh/5bx/9/q5aoUpLuWKPP8Aaovtlj/z/wAX/f8Apf7Tsf8An4hrOqdnto9yxUdH9o2X/Pb9aITPMf3Fjcy/9crOuUX1zDdwo4hmqxDo/iP/AJd/Curyf9w2WrEPgnxxecweBtW/8FstX9YZn/aGBX/L0z5+1FnN3roIfhL8VJpf3Hw51b/wDrQs/gb8cJv9R8K9X/780TlT7GX9s5Z/z9Obg71Y8j3rtLP9mP8AaFu/9R8Mbmtiz/Y//aTvP+ac+V/2+VfPT7HP/buRL48SeZ0V7BZ/sH/tJ3nTwrYxf9vlbFn/AME5f2jL3/X2Gm23/XW8p2l/y7pnP/rTkX/QSeDz9q+6P+CdVmIfg3pd+P8Al6mlkrx+H/gmF8d9S/1/iPSLb/tt5tfUn7N/wN8R/BPwlZ+DtVvo7m3sP9TLFXt5LSf1vU/N+OeJMuzDLvq+HqHqk/apIZh9lqP7Hceo/Kj7H7frX2ntLH4p7IrzHzus9SQzeVR9j9v1qSGzg6T1nVGWJryCbtVOaarP2O39T+VUryaCGs/aE+yRJZ3h82tCs+zh7Vcg71pS9t/y8H+5JPO8qo/tn73pUlRzQ/vaKntTVexLEM1WPO/6b1T8j3qSpAk8/wBqkhmqOo/9VU+zAk1Obzqy5rObzfPrUo/1tC/emhJps/8AywxVzz/aq8MPlVJ5/tWxmFV/On82rHn+1R1maEnnebRB3qOpIO9Y0rmhcs/9UfpUlR2cPapJoa6jMr/6R/yw61JRLN/zwo8/2pUjT2py/wAYLyeHwbceRXwn8VLzztekr7k+P179j+HN55H+s/5Y18D/ABCm/wCJ9JWlT90Z1Njn5pv3tHn+1V/P9qj82H1pe1Ocj/5a1J5/tVfz/ajz/an7P2oFyGasfUrz/icx1oQzVz95L53iOTNFP9yBuWc3nRYFE01V7ObvRNNXE6ntWX7Muef7VXmm/e1H5/tUc01afwjUJpqrzTVHUdZup7UCxDNSTf678agqOe9/54VpTAPO/e1HNNUc01RzTVn7M0K83/Tf8arzTUXk3eo5pq0pVAK95VOftViaaqc01dH8IzK95N3rLvJh5VXJpqy9SmHlcUVAK03+u/GuQ1iaea/uM11E3+p/CuPvJvOupK1p7HNUJLOpKIO9WIYfNo9ojVe2O8+GM3nS29e6eG/uR14n8K7Pybq35r3DQYf9FjrzsRsdFI3IO9WIO9V4O9SQd65cNsdVUJ+1V6kvJu9V/O/dU6lSlVYezJPP9qPP9qrzTUef7VpTq+1q+zqFBN+6o8791RUfneVSsjMJv9T+FRwd6j8/2qxDNTqeQH1BDeUQ/wCu/Gq9nN51WKs76RJND+6rP/5a1Y82b1qn5372oVSlS+MZc87yZasQzY/f/wDLOqkP+p/CrkP+p/Cuu6MfZk/2z910qPzvNqn5372pPtsH/LCo9pVqmxchmqxD++k/19Z8HerEM1OkcmpcmmqPzZvWo/tnv+lE01aVKn/Ps6fZFypIZqz6k+2e/wClSh+yNDz/AGqPz/as/wC2e/6VJ/ra3qfvCC5DNUlV4Zqk8791Wa9iY+zJPI96P+WVV/tfH/TOpPP9qv8Aih7Mk8j3qOia88mq/wBs9/0qP3VIkuQd6kqvDNRNeUyvaFjzYfWj/W1l+f7VJ9t/6bVmZVKbLk/aio4ZqPP9qqqIkhmqx5/tVPz/AGqSGap0HqXKj/e1J50EMVV/tnv+lT+6H7Rlj7Z5PNV/O82lm/1P4UlnRUqmJJ537qo/O8mWiftUc/auf2bNfaEk01Rw6l5M37+o5pp/+eFRzWc83el/FM6lQ0ftlv6H86T7Z+66Vj+dPD/ywlo+2Xv/AD4SfnSEakM1Sef7VThmn6VJ+9p06gezLnnfuq4/4zD7Z8PtYsIP9Z/Zstbl59u/5YVl+KvDd9r2jXFjY/6yWGWOkzWLtJM/KvUofJv7yAn/AFV5LHVeDvX0pr3/AATZ+P13qlxPY32ifZ7qaWSGWW8qvD/wTH+P0P7+fXNEj/7fK+Yq0qzrH7NheKMpWHpqpUOi/wCCaX7WPw4/Z31/xB4W+KMrWOneIFgmj1kK8i28sAkxGyIrMQ4k4YfdK8jByPqSf9tr/gn1Jcz3E3iDRnmvHM1zK3hSUtOxPLsfI+Yn1PNfIP8Aw7g+LX/Q1abVyH/gmb8W/wDocdN/78//AG2v0XJuPc9yXK4YCFKnUhBvl54ttXd7aSXVvz13tY/k7xL+jN4MeJnHeJ4sxmZY3CYnExgqqw9aEIzdOKhGTU6U2nyximk1H3U+VSbb+rF/bj/4J9WnFtqWmjHH7nwdN/SCnJ/wUO/YZ0u1e1s9bWOJ3y8EPhiVQzepXyxk18un/gmP8Tf+W/j/AE2L/tjUln/wTB8Yzf8AH98VI/8AtjDXof8AEU+J18OHoL/tyX/yZ8H/AMSV+Ac/4ub5nP1xNL/5nPpaT/gpZ+xPacQ3N42OnleG2H8wKrSf8FTf2Oof9Vaa4/8AueH1H83FeBRf8Etb7/l++LdzH/2xrUh/4JX+FfJ/f/FvUpf+2NKXijxrL4KdJf8Abj/+SF/xJb9G2H8TE4+friIfpRR6j47/AOCt3wJ0vwhfv8MfDGtXet+Qf7Ng1DTUitTKeAZSsu7aOpCjJxjjOR+e0t1LfXk17PjfNIzvgcZJya+vP+HXXgaH/X+P9Sl/64w1Yh/4Jm/Dn/oY9S/7/V8hxJnnFXFU6csdFWp35VFWSva766uy+4/e/BngDwa8CMNi6XDFOpzYpxdSdWbqTkoJqEb2ilGPNJpJXvJ3b0t8f+f7VJX2pZ/8E0/g7D/zFb6X/t8lrQs/+CcvwPhm/wBOsbm5/wCuM1fMrLcX/wAvD9s/4iPkn/LumfD9R+R71+gFn+w3+yvZ/uP+EHuZf+u00tamm/sQ/srgefD8K45f+u01H1Gsc3/ETMv+xhz866k8/wBq/Syz/ZF/Zls/+PH4O6b/ANtf3tWIf2afgfZzf8kk0ny/+uNL+zcWH/ETMJ/y7w5+Zc2paVD/AMt6dDqcyyiawF2jqcq6RzAj8RX6fWfwZ+GUMvkWPw50iKP/AK862IfBPhzR/wDkFeFdIjj/AOeUVnFVLJcxqs5ZeKUZJp4bQ/Luz1vxpef8e/8AbUn/AFyS4/xovL/xrZ8T2+vf983H+Nfpx4ks7GHTJILHSrGL/rlDXzP+0hZwfYPP8iP91ef8sa9BZLiv+fz+883/AIiNh/8AoCj/AOAr/I+X7nQvHl7L5954R1aZ8Y3y2xY4+poh8B+P/wDoTr2vQJtevqpjUp5v9ea3q8Ot6wqGEfFbHQjyxpWS6HL2fww+Js0v/IuRxf8AXbUooqv/APCmviZ/0CtM/wDBxFW75/tUn72tKXDn/PyoYPxYzX/l3TOf/wCFG+P/ADf399okf/cS/wDtVWLP4D+Kv+X7xVoEX/baX/41Wx+9qSGafpXb/YFI4v8AiKHEP9QMuH4Gzzf8f3j/AE3/ALYwy1J/wpPQ/wDoqkX/AGy02tiG8nqPzZvWj+wKJxf8RM4n/wCfn/khT/4Ub4O/5fvipff9stHq5D8Gfhl/y3+I2vy/9ucVSef7UQzT9Kf9i4P/AJ9h/r7xP/0EFj/hTPwk/wChq8Uy/wDbaKo5vhL8JIf9RPr8n/b5FUnnTw/8sKPNm9an+ysJ/wA+zmq8c8RfbxBTh+G/wkhH7/Q76X/rrqXlVseD/hj8JNY8R2+lf8Ir5lvL/rv+JlL/APHaz/J82ug+GP8AyUHT4P8AnrTp5ThP+fZ5/wDrrnz/AOYg988N/sZ/syi1jn/4QDzPN/6bV1mm/sf/ALK8MXHwdsf+/wBXUeFbLybCOukhm8mLNZf2VhP+fZp/rBnD/wCYg4Oz/ZR/Zss/+aSaT/35rYsvgN8CLP8A48fhlpEf/bnXUZm/596khh87/lhR/ZuE/wCfZzf25jX/AMvahj2fwe+ElnxY+ANIi/7hsVbEPgnwdD/qPB2kf+C2KrlnD2qxWuHy3Cf8+zP+0cV/z9ZTm0Hw5DF5EHhXTf8AwDqSGzsc/wCoi/781JNNRB3pVMJQ7B9cxH/LxliGGCz/ANIgomm/e0ef7VH5HvXaqEbbGHPLuEM1Sfv5pfpR5HvUlFOjHsLmfcIYasQzeVVeGapKdSK7BzPuWIZqsQd6z/P9qsQzVpSSRHtS5R5/tVepK1qbBTqElR1H5/tUdZ/xTJbknn+1RzWfnS+fUlEM1c3szWpsEMPkRVY/1tR+f7VY8/2rpp0zEjoon7VH/wAtaPZAXPP9qIO9FEP+u/Gl7I6vakn/AC1on7VHRB3o/hD9qSVJD/rvxqOpKinTD2pJUf8AqqjqStKoUgoohhom/dVzfvqR0h/rasQd6jg71JWlICxDNVjz/aqfn+1WIZq6aexmFRz/AL6XyaPP9qrzTU/Zl+0R53+05qU9n8PZBB/y1r4P8YalPNqknn19qftUaxB/wiUcGf8AVebXw34qvP8AT5P8KVTDHPUqGf5/tRNN5VR/bLf0P51HNNB0rP2dL2RAebD60ed+9qn5/tVizm8ml7U0NCabyIq5sf8AIYkrcmm86LNYf7j7fJ/WiqBoQzVJNNWfDNR5/tXP/FL/AIRYhmqTz/aq/n+1R+f7VpTp1avwDJPtnnc1H5/tVeaajz/aqsAVHNNUfn+1Hn+1T7NAR/v5qjmmn6Un2y49B+dVJrz97T/5dGhHN59FRzTVXmvKypfujT2oXk3es+8mHlVYmmrPmvPOrtujmqFeaas+8m71YvJu9Z95+561n7K5qtjP168/0X9xXH/bP3vSuo16b/QJK5OGbza19mZmpZzDyq0NNmPm81lwzVY02b/So8ClUpqkZe1PZPhXZ+dLb17ToH/HpXkXwlm/dR+RXrmjxCG1jNcWJ/6dnbS6Gp/yyohmqv53/LD9Kjg715FSp+9PTWxYn7VHUc01R/8ALKumlT9qYBNNVf7Z+96VY82H1qv5P72un6t/y8JqB/y1ooorLcX8Irz9qsad/rfxqvxNNViDvTw+H9mZ1ah9KQ/uZcVJeal5PNV4O9F5Z+dFJzWf8X+GeuWPtnnc1HWf+/hqzZ/60/WlTqf8/DMtwd6sed+6qnVitVuBHUkMNEMNWIfP6zweV/zxrqWwEdWIZqjn7Uf62ub+EAfbPf8ASpIbyo/sc/7v9xVj7H7frQp1gCGaq95qX/6quQw/uqrz2cHm11AU4Zp/+PitCGapIbODNSeTB5tABPNP/wAsKPOn8qirkPkeVWNOmZmfN59EE0//AC3rU8nzajmhrRU6oEf/ACyooqT/AJZVsc5H5/tUf77zasUsP+p/CuWn7a4Faezn8qo4Yf3VXPI96Joay9nWqnQV4O9WPKm9Kr+R71oQ/vaOSsTUK9XIdN84+fUcMNSQzU/ZMz9oSf2Z/tUeR71JDNUc0372svqwe0I/I96khh8qo4Zqk8/2rX2dE5SSaGq8MP72pPP9qJ+1SaEn7qj/AFtR+f7VHNN+6oM/aB5HvViGGDrVeGapPO/e1FTczpEv2O39T+VJ9j9v1o8/2o87za6bUvYgR+R71J9j9v1o8j3o8/2rEAmmqnNNUc01R/bPf9KzsZleab97VyGafpUfk+dLVj/UxRmtaQFjzp/KohvPJqvNN5tRw3k/m80UkBoTfvaj/wBI/wCWHWq/nT+b+/q5DNB0pC9oiSDvR5ME3WiGapPP9q2pEWRHDDUlV7ybyaLO886KPisqlP2tb2YFiGGpIO9HneTFR/ra6fq1EzLEM1FV/wDVVJDN5VZ06QEn+q8vFRzTebUc15WfNNP5taKoBJrH2ea18ivmv9qKzg/sa4ng/wCe1fRF5MPKr57/AGnJvO0G8/67RVsY1D5z8/2qSqfmw+tHn+1aHCWIZqk82b1qvDNUnn+1aGZchmo8/wBqr+f7Uef7Voa3ZY8/2o+2e/6VXqT/AJZVz6iLnn+1SQzVThn/AOW5qSGajUC5NNP0o87r59V/P9qPP9qXswLnn+1dB8JZv+LjaXP/ANNq5euk+D/774jaXbkf8tq09ogsj7U0GYeVHBWxDeVzemzfuo5zWpZ3nnXVQdBuQzVcg71nwd6uQzVl/FAuef7VJ5/tVeH97Ula7AST9qKjqTzv3VY0gCpPtnv+lV6jrYzNCGaiq/n+1Hn+1aAWKsQzfuqrwzUQd6zNCxUkMNU4ZqsQzUvZFVCx5/tUdEM1Hn+1Hsw9oHn+1V/tlx6D86km/fRZqOGGmvbEliGarHnQQ9ar0eR70qh0Fjz/AGo/e1Xqx5/tS9qY+zLEPn1J5/tVfz/ao4ZqdSoSWKsVT8/2qx53TyKe4ElHmw+tV/O6+fUlY1Tb2ZJNNB0ommqnP2qxD+5lxWtIKYed+9qxD/z3zVOb/XfjUkM1QbFz/r3/AAqP/lrVfz/ajz/atANCo6jhmo8/2oAkhmomvKr/AGz3/Sqd5eHza0NDUhvIM0TTVz8OpZl/f1qWd5BN+VX9ZA8T/bGvf9At4IP+eP76vivWJvOlknr6s/bM1Lzrq3sR/wA8Za+U9S/1z1BlVMvz/apKPI96jmmrMgKj8/2qPzv3tEM372sqlM0Ll5N/ovkVz9nL5/mf9dq0NYlENrIKx9G/49hSqVANSDvUn72q8Hejz/aqAkmmqPz/AGqPz/ao5+1aGhJUdR+d+6qvNNXL/CAsVH5/tUfn+1RzTVnTqUqX8Q0JJpqpzTVYmmrPmm/e1IEc01V5pqkmm/e1Xrar/CQFe8mn6VTn7Vcn7VTn7VlOn7UDPvKz5pquXlZ9dNKpV+wZmP4kvPJirm7OY+bWx4orLs/3MWa6aexmXP8AllWhoMPnXUZrL8791Wp4P/fXUYrOr7ID3j4P2fk2Eder6P8A8eMf0rzz4V2fnaNHPBXolnN5NrHBXmHo0qZJ5372o/I96k8/2o8j3rO1Ev8AfFej91RUf/LWtf4Q/ae1CpP+WVFFa0/ZB/CKc01FE0H73z6rz9qyp+ypCLH/AC1q5DNWfD+9q5Z/vulGHp1gqVD6Qhmqx537qq8MNWP+WVZ4bY9Ajn7VXh8+rHn+1Hk/va0xNMVL2VVlf7Z+96VJ5/tUn2P2/Wo/I96KVOrVQqpYhmqTz/aq8HerEHetfZmdOoWIZftkVSQywQiq9n+56VJ/y1pUjYsfbPO5qPzunkVXoh/1341r+9+2Zfuy55/tUc01SQw0T9qoYQzfuqsf62s+rkM0/SsKdUCSDvVjz/aqcM1Sef7VrUAsfbPf9Kk+2edzWf50Hm1JB3rJe2AsVHNNUfn+1V5+1azqVTnNCGapJpqz/P8AarEHencCTz/aiGajyPeiGGl+8KqMIZquQd6z+IZqsQzebWdIyqVC5ND+6qvB3qPzv3tEM1dFQ5ixRB3o82H1og71ymhYqOaGjzvKohmp1KZmHke9E/apPP8Aao/P9qzqUwI6j/e1JR/qq1p0w9p7IsUUed5VRwzebWnsw9qSUed+9oqPyPej94Zkk00HSqd5N3on7VJ5HvXNUp1gM+aYeV59U/O8qrmo2c/lfuKy/sd9NLRUVYKZuabef6LVn7Zb+h/OsKz8+GLyJ6NY1LyYv3FFICxea9B532eCizvPOlrn/wB7Who808P+vNaU6hmdBD+9qSq9nN3q5B3rWp+9MyT/AFVSQ/var/62izopgXPJ/dVXh8iGrH/LKq/ke9dHskZljz/apKp/8sqIbz99/qKxAsef7VJ/yyqOjz/arp/vQC8rP8/2q5eVTm8itPZHPqR6lMPK4r53/adm8nQdQr6EvP8AVD6V87/tOf8AIGvIKKh0Hzf/AMtakg71Xn7VJB3pVPbHnljz/ao4Zqj8/wBqjhmqgNDzv3VHn+1R+f7UUvZAXKj8/wBqjqOun+EZlz/W1Yh/dVThmqTzv3tFOoBJ5/tRDN5tRwd6K5/+XxoWPtnv+ldh8DcXnxV0cf8ATauHruP2fP8AkrOl0vqzA+yLOH91HmtTTYfSsuH/AFSfStjTYfJijoVP+c0NSH/VJ9KuQw+VVOzqT7ZBDL0qEaGhRNDVeG8/dUed/wBN6upTMySjz/ao/P8Aais/ZGhJ5/tUnn+1R+d+9o87za1MyxRB3qv53kxVX/tPzpay9qaGp5/tUnn+1Z8M1XPP9qv2pXsyx5/tRDNUfnfuqPP9qoPZljz/AGo8797Uf/LKjz/ar9oiSxNNVeo/tlx6D86JpvNrAC55/tUlU4O9Sef7VdUhbliftUcM1R1JDNTOtbFiaaio6PP9qy9kBJDeVYg71Tg71cg71f77/l2AVJD+9o8j3qv5/tUVKZPtCxD+6o8/2qPz/aj/AJa1qbBNNRRN+6qP/W1l7IA8/wBqIZoOlEMNHke9aUhVSx5/tUnn+1R1HB3oqjI5pqpzTVYn7VTvKYEc/aiC8nh6VX/0j/lv1qTyPesfamh8z/tp6xP/AMJb5H/TGvnOabzZpBXun7Zk3/Fb3Bx/0zr57vJu9a0jKqE01U5ryiaaqc01QQXIZqk8/wBqz4ZquQ/6n8K0Ogj1i8/0CSsrTv8AVfhVnXpvJtZBVPTZh5XNYz/vmZoed+6qv5/tUfn+1FFQA82b1qPzp5utRz9qjhm/e1nT9t7U0XsS55sPrVeaapJpv3VV/O8qioAVJ5/tVfz/AGo/5ZVjar/EAJpqr+d18+iaaq9OkBHN+6qve+f5X7irE01U5+1GIp1apf8ACI5pqp3k3erE02IpKpz9q1pU/wByZ+1Kc01Zd5N3q5N+6rPvJu9dNOmQc3r1750tZ9WNf8j7f5EFV4Zq6fZmZYmmroPh5D5t1HXPz9q7D4YweddW9ZVNjVbnvnw28+ztLeD/AJZ16BD/AKn8K4vwfD5H2eu0h/1P4V4tWn++PYpVNAg71YqvUkHettDnp1SP/W1HUk808MvNEHepp1CyKb/XfjS+f7VFN/rvxpZ+1c3tKvtTMju5v+eBrPmvKuTQ1Xmhrpp1Pamv8ILObzpelakP7j9/isuGHyqsed+6rSpVrUjI+mKj87r59SVXn7VnUqUTuqBViDvVODvVyH/XfjRT/wCfgUi5DDR5HvUkM0HSiCaD/lvXZT/glEf7+Gij/lrUf/LWuOn7a/ITULFnUk/aq/8Aqqkhmg6V0/uyiOGarkHemfY7f1P5VPB3opU6vtTQk8791VP9/wCb+4q5+6qv/wAta0qmZJ+6o8/2qvUdc/sqJj7Qk8/2qTz/AGqvUlFIkIYasQzUQd6IO9dH8ICx5/tVeftVjz/ajyofSqJ9qV6sw/6n8Ki8j3qTz/auDUokqx/rap1Yg710qoT7MKKj8797R5/tRU9j9g5SSo+IZqPP9qj/AOWnn10UtgJP3tSWdR8zQ0QzeVXPUAuUef7VH/acHWeo5pYJhWVUAmvP3tWIZqx5poOlWNNvP+WANc6pu5oak01U/tk/nZqx537qqc0P72ump/07OepsaEM1Hn+1U/P9qk8/2oV/+XgFiab91R53lVHDNRDNWns/a1Q9og8/2qTzv3VV5+1Hn+1FSnVpAE8M80vNR+R71YooqGf/AC+M+8/1Q+lc/eedNdZrpNS02C8/5byVTm0Hzv8AUViBlwd61LPTZ+tV/sft+tamm/ubWq9mAQ+fDFViGaj/AFtR/Y/b9a2AsfbPf9KsQzebWf8A6qrEHerpGVU0Kjh/fRYqPz/arkMPlUVP7hBHNDUc0NSTTVXmmpgWPP8Aajz/AGqvDNViGalSAKpz9qsed5VU7ybvWpj7Qr6xXz3+1F/yLt59K98vK8L/AGlrKCbwvqk88/lyRf6mL/nrQSfLc01Sef7VX8797R537qtKhxliaaiDvVfz/apPP9qXtEBY8/2qSGaqdSef7VAFzz/ao/P9qjmmqPz/AGrQC553/TerHn+1Z8M1SQzVlUNC55/tR5/tVeGao/P9q0QGh5/tXcfs6z+d8WtLFed/6qu8/ZpmM3xVs4P+mMtagfaGmw+Tax1uabMRF5FYemzfuo6uWd5/pVZVaZodBB3on7VHDNUkHeuf2RrSCGaiiiaGtRB5/tViGaq9WLLyKDMsTTUfbIJRUd5/qh9Kp2c3k1jTqGhYvJj5tV/9VVz9xNF5/wDy0qPyPetadIr2hHZzd60PO8mKqcNnPVjyPej2Qe0LkM3m1J53lVBD/qfwpP8AW1qZ+1JPNm9ak8+f/lvUdV5pv3tBRcohmqv5/tR9s87ms/3IGhUfn+1V4byjz/atNCFuWIO9WIZqz4ZqkhmrM6lsWJryq/2y49B+dRz9qIO9T7Iy9oWIdT74q5DqXnS1lzQ/vakh/wBd+NR++NjY+2e/6VH5s3rVfz/ajz/atQLFSef7Vn+f7VJDNQBc8/2og71X8/2qzD/qfwq6exoT1H5/tVf7Z+96Uef7VAFzz/ao4O9R+f7Uef7UAF5WfNNWjN/qfwrOmhpfwgCGHzak/wBVRZwz9aj1L9z1oVNAfFf7WmvfbfHl5++8z/njXg815XpH7UWpW/8Awnl55E/+qmlryf7Z7/pWlSpRMy5NeVXn7VX8/wBqrzXlZ1OhNM0IZquQ/vax4ZquWepeTzWf/L01qlPxLN+6+z1Xs5u9V/Ek3nXUcFSWdFUKRco8/wBqrzTUef7VdOxBJ5/tUfnfvaj87yqj+2e/6ViaFjz/AGqPzYfWq/n+1R1NQCxUc01RzTUT9q09mjQKrz9qPP8Aao5pqf7kCOq83/PfNWJ+1U5pqVR0bC/hFeftVO8m71Ym/wBT+FZ95WdIgjvJu9Y95+48z+taF5N3rD1ib91XUZnN6lef8TSSn2f+qP0rMvMTXUlX9O/1X4V0a+xOcuz9q7z4Pwia6t64PzvNr1D4G6b58vn/APLOKszoR9EeD4fJ0uPH+srYg71j+G/3Nr9nrYg715tSmepT9sSQw+bRN+5lzUfn+1Hn+1Cp0TMKP+WVFE01ac+ENCvP2qvP2qTz/ao5+1c1SnRMyOftVf8A1VWKrTf678aFTOmnTFg71Yg71HB3qSG8rVVKP/Lwz9mz6Q8/2qTzvNqOpYf9T+FebT/dVj0qonnfvaPO/e1HN5FRz/63z8V24ip+6MqX7o1KkqnDNVjz/ahVaJ1Enmw+tR1HP2qPzvP/AHFWc5Y/1tWIYajs4R5VWP8AVVVKp/z8NAqxDNVPz/apPO8qtKZmWPP9qJ+1V6Kon+EE/aipPI96jn7Vj7MokqSGb/phVeDvUkM1aezqk06hY5mho/1VHnebR/qq1p/vTKqE/ao/P9qlm/1P4VWqDIsTTebRDNVfz/arEM1TTsAVYhmqv/y1qxB3rm9mBHP2ohmn6VJR/qqKX7wAomm8qjz/AGqO8ruMyxZzQdKkrH/07zf3H6VoRTf896yqGZJNZwTf6iqd5DPDVz7Z7/pReQjyq5/ZMPamRN/qfwqfTZj5vNV7ybyaNNhvvtX7+qNDchm86XNST9qpww1Y/wCWtbUznI4O9WIf3VV/+WtSUVagBPeTwy1HNN50WaJofNorOnUFU2JYf9T+FRT9qIZv3tXP3Va1CCnB3qxB3o8j3qvxDNWnQ0LE0NMm/wBT+FJ537qiH99Lmsv3ZzmXefbvN4xWhpsI8rmpJoaj87yq1/dUgLkMPlVJUkMPlVHNNTAOIZqkqvR5/tV06hlU3LE01EN5PN2qnNNUkM1Z1KZqtix/rajn7UVHNNSqbGYQd6khmqPyPejyPeil+6MapYmmrP1KfypaLyaeGKsu8vKftSS553m14f8AtRfufC+qT/8APKGvZIbyvG/2ov8AkUdY/wCuNMD5L87yqJ+1RzTUQ/uq39mjjI/P9quQzVXoop0wLHn+1Sef7VXh/dUef7Vn7KtcC55/tR5/tVOpP9bXSBY8/wBqKr1JWYFjz/ajz/ao/P8Aajz/AGoAsQd69A/Zj/5K1Z/9cZa878/2r0D9l2bz/i1Z5/541oaH2ZZ3lammd6y7OHtWpZ/6o/SszQ1IZquRTf8APes+zmPm1YrH2dYC5UlU/P8Aajz/AGp1KY6RYqSGao6K1BbliaaDpUfkwTdarz9qkg71l7I0qFzmGGpPP9qp+f7VJ9s9/wBKKlQyLkHeo5pv3VV/P9qjmvP3VFOpWKqblyGbzak8/wBqpwzVJNNTp/uhVCT7Z7/pR53nRVXn7VJDNQFOoEHepIZqj/5a1JDDWVSmWSQd6PP9qKr+f7Vp7MC5B3qSq8M1HnebS9kZliaaio4O9SVrTpgFWP8AllVepIZqz9obchY8/wBqj87zar+f7UQzUw9oWP8AllR50HlfZ6jmmox/0w/WsqpsVzqXk3fkVsQzfuqw5tN86X9/WhD58MX7+ikBYm/e1JB3qPz/AGopk/xSxNNUcM1Rz9qj/wBVV+1KLnn+1RzTVHRP2oqfvS6RJ5/tWfrEvk2FWJpqx/FV7BDoN5P/ANMazpF+1Pz3+P1553jLVJz/AM9q8z+2e/6V0Hxa1Kf/AISi88+fzK5P7Z7/AKUUzOqXJryo4Zqp/bPO5qTz/atP4pJc8/2qPz/aqf72jz/asvafvQI9SmM9/ViGb91WPeTedf1oQd66twLE01H2z3/Sq/n+1RwzebXD7P2RoXKjg71H5/tUfn+1FSoA+b/XfjS+f7VH5/tR5/tRTNCTz/aq801RzTVHP2rOpuBc8/2qnNNUfnf8sP0qv5/tXRS/eiqFiftVOaGpPP8Aaq801Hs6XtRkc3+p/Cqd5Vjzv3VZ95N3rP2XsgKd5N3rD1Kb1rUvJu9YevTDyq7aexmcvNN/p8k+KuQzfuqrww/vak/5a1p++sZl+H/XfjXtvwBh/dSV4fpp867jFfQHwTs/JtY565vak0z2jR4f9FrQg71StP8AVR/StH/llXH9Z/npnpUiOb91Vfzv3tWKrzWdZVfa1f4ZotyxDNUc3/TD8Kr/AL2ibz6ypo1qEd5NPNUdSTQ1HWvs/as5gmh8qqc0372rH+to8qH0rOpozouEHerFV6uWXkVp/FHUqH0JDNUnnfuqpw/678asVn+5qnWE01Sf6qq9EM1adPfBblyDvUkM1V/O/dVJ537qub2ftTapsRzfaM/uaks6r1ch/dVpsSaMP+p/Ck8/2qPz/ao5pq0pVKoElSef7VXs/wBz0qx53m1rSMw8/wBqIZvNqSH97R5MEPSumkY1STz/AGqOaao/O/e0ef7VJJJDNRUfmw+tEM1B0Fjz/apPP9qp/vasef7VNL+McZJ5/tRB3qOaao/P9qPaAWKkhm/e1n+f7UQzTmWOsQNTz/apPO/e1TqTz/appbmZY8/2oqODvRXUBJ/qqjhmo/1tSVjT9lcCOGajz/aiaGo5oa6jnLlnD2q5WXZzT9KuQd6zNCv5MHm/6ijyPepJof3tFBmR/bPJ5qSG886o5oYJuoqSGHyqqpUMwqTz/aiiqNKgef7VHNL9jiqSqd5N50X2esahmSWd553/AF0q55/tWfZ2Z82rlHtfZGhJ5/tRNNUcHeitP+nhmHke9SQw1HP2orP2hmWJ+1Rwd6PP9qkg70VAJPO8qo5pvNqveQzTS0QZ8r9/Wq2MyTz/AGog71XzD/z71J5/tSpALN/qfwp8P7mLNR/62pKoCx5sPrUfn+1V5pqj+2e/6UqpPtDU8/2qPz/asv8AtP8A2asfbPf9Ky9mZ1SO8/fS4rLvIe1ak01U5+1bHKU+IZq8b/ad/wCRd1D/AK417JP2ryP9oSH7Z4S1D/rjV+yND4/mmohmqO8/fS4orpOcsef7Uef7VX8/2o8/2rMC55/tRDNVPz/apPP9qXtEaFiGapKp+f7VJ5/tTMyx9s9/0o8/2qv5/tUn2z3/AErL2YEkM1Sef7VX8/2o8/2pVPbAWPOn8qvTP2S5vO+MlvBj/lzl715f537qvSP2P/8AktFt/wBectHszQ+0NNm9K2LOsuzrQh/1341r7NGtIuQzfvauQd6z4ZquQzVBkSQd6sQzVXh/e1YrKoaLcsQd6PP9qjqv5/tWqOvSxcqP/lrUf77zaKDh9oyxUlnVeDvVyGGlTpiDypvSq8/arkM1Rz9q1H7Rlfz/AGqnrGsTww/6PD5lXJrOo/Kh9Ky/hFhZ6l9stY56sTTUQwwdako9p7UzI4Zp+lXIO9U/J/e1Yg70fvb++BJ5s3rUfnfvaKIO9afxTWnULFHnfvaJpqIf3tP2aEWKKj8/2qPz/aipsZmhUcP72q/n+1Sef7UGgTQ1Xh/1340Xl5VeaagDQqxDNB0qnZzd6sVmaEkP+u/GrE/aqcHepP3tL2f7kr2hJ+9qTz/aq/n+1FL2psE01SQzVXmmog70e0QFjz/ajz/aiqdFQ0Lnn+1cn8YNYgs/hxrE/wD0510E/avO/wBpDUv7H+FWqT+T5vmQ+XUrcD85/G1552s3Fc/9s9/0qx4r1Lz9ZknrL+2edzW89jh9qXPP9qkmvP3VZfn+1Sef7VzUjT2hofbPf9Kk+2e/6Vlw3lRzXn7qtdPYmxJNef6VVyGasPzvOuquQzVAGhNeQZo+2e/6VnzTUQzVjU9sX7UufbPf9KPP9qr1J5/tWwySpP8AVVXE0H/LD93Ud5WNQ0JJpqjmmqP/AF3+vommqgCaao5pqjn7VHU06lUX8IJpqj87zaJpqjn7Ue0GE/asO8m71oXlZ801a+z9qZVTL1K89vpXP69efuq2NSmPm8VzevXn7qtKVQ5CnDNP5tWKz7ObzpelXPP9q1udBseG4fNv46+mPgnZwHS46+a/B/8Ax/xV9QfCWE2ejdKyq0zakegQ/uZcVc8/2rPs5u9WIZq8mrUpHVSLFFR+f7VJ537qtKVU0qbkfke9FH+tqPzvJlrSnUAP9bUdSVHP2rL2tWlVApzfuqPP9qKK5/4v7wz9mEM1WIZqIYf3VR104apRpfxDQ+iPO/e1Yqn/AKqjz/ainhqSOr2hJN59RwzUef7VHSJLkM1Sef7VXhh/e1JD59a06dv4Z0Fiij/SP+W/Wis6lL2j9nUAsQzVJP2qPz/amTf6n8K19l7ICeGapIO9U4ZqsWc3eipTpAWYf9T+FJ5372jz/ajz/atKdOrY5w8n97Uc/aijz/as6oLcrzefDVjTv9V+FSQTwTf68VYhmsYYv3FKnTrWNamwUeT08ij/AFtHneVWRkSQwz9ajqx5/tUddXsvZARww1Y8j3qOjzv3VYmZJUkMP72o/P8AapIZqdP97V9oZknke9Hke9Hn+1Sf62talQCOiiiqAjmmqSGb/phVOftUkM0/Sp/eHOXIf9d+NSVX8/2qSGatKQBNNRUlE0NHsgK801RwzT9KsUeR71n7QCSGaiiiDvR/FMvaBP2qP9xN/qKkn7VXh/13410eyCoSWcParlU6IO9RUpkFio5pqJ+1Z959u+1c1fswJJpr6aX9xUkEN95Xnz/6yizm71c87zoq56YFeG886rHn+1V/Jgh6UQzVp7IVOoE15ViH99Fis+ab97UnneVT9kQWP3VEHeo4O9H7qrpASSzf88Kjhmn6VH/y1qxXP/y8MaoVTmm/e1JNN5EVU/O/0v3ruJCG0/5b1J5s3rUn/LKo5+1c46QTXlZ82pX2KuQ2fnf6+o7yHtV06QVQhm82vK/jxNjwvqkA/wCeNemed5VeZ/H6Yf8ACL6pP/0xpmR8X/bLf0P50ef7VHR5/tXRTOcPP9qPP9qjorm9lWuBYg71J5/tVeGajz/atPZoCT/lrR+9qOpPP9qYElHn+1V/P9qkhmoAk8/2o8797UfneVUkHelT/egSV6x+yL/yVmL/AK8pa8jmmr1T9jj/AJK3/wBuUtamh9qabN/oseBWh5/tWPpt550XStCG8rjAuQzVYg71Xh/e1cs6v2QFjzv3tWPO/dVX8n/lv+tFagE01EN5P5vNFEMNZmhYgmnml4qSaao4f3VSTfva0MyxB3qx53m1l+f7Uf2j/wBNv1rKkBqQzUT9qx4dSnmlrQ+2fuuldIFjzYfWq/nfvajh/wBd+NSf6quar/FNCSjz/aq/n+1WKa9sP2gef7VJB3rP/wCWtWPP9qDIsef7VJDN+6rPqxD58NXT2Aued5tWIO9U6sQzVqAT9qr1Y82H1qP/AJZUfxTQjqP/AJa1Y8j3ohhrNe2AjqT/AJbf6ipPJ/e0eR70VNgJPJ8mKpP+WVFSUvZoA8j3qxB3qvViDvW1L+CAVHRB3qx+6rL2dA0Me8mPm1cs/wDVH6VXvIf9Kq7D/qfwrM6CKaaq/n+1SXlU5+1BoXK8n/bG1KDR/g3eTz3H/LavUK8H/wCCh2vW+m/BuOCf/l6mljoMz889S1L/AEqTNU/tnv8ApVe8vPOlxVP7Z7/pS9l/y8OM1Ptn73pR53m1j/bPf9KuQ3laKnQH/FLH2z3/AEo+2fuulU5pqjmvKz5KQi5Z3h82rHn+1Y+mzelXPP8Aas/ZuqaLcuVJ5/tWfDNUn2z3/StKdI1qbGh5/tR5/tVOGajzvNrOqWXPP9qJpqz/AD/arHn+1L2RoWKp+f7Uf8taj/5a1sZliaaq803lUef7UTTVPsjQj8/2qOaaiftUc037quenT/fAV5pqz7ybvVi8m71j3k373yK2MzL1K89vpXL69N2FdBqfauX1j/XCrp0jjI7OYeVVyGaqdnVinTpM29odZ8Pf32sx819aeA9Nm0zRo7Gevlv4S2f23X7evrDwf++sI/8ArjWVU7KRuQd6fD/rvxqWpIYa4cRT9qdxHxDNRUlHke9ZVMNVt+7HT3Eh/wBd+NMmhOf+mlE/apK1w2n7thU3K8/aq8/arE/ao5oa19mKnuZ8/apIO9E/ao4O9ZVKdH/l2Z+0rGhB3qvefuZc1JRmCb/XwebWQUj6Eo/5ZVH/AK2rFafvjoW5HDNRN5FH+qo8nr59R/FOtbEcM1XPP9qz/wDlp+4z0qxB3rRVK3QwLkM1SVXhhqxWip4qr/ENvaB5s3rUnnTzf6+o6kg71rTqfuiiOpPOnh6VX/5a1Yg71iaFiGapJ+1V6K6/a/ujjJKkhhqOrFQBHUkMNR/uqsQzVVP2xmSf8sqKKK53T9kzQjn7Uf8ALKo5v9d+NHneVRU9sZliH91R/wAsqP8AW0f8ta6VsZkkM3m1J5HvUcP7qpP+WVL2aD2jCGGrH+qqvDNR5/tRUX7kz9qSef7Uef7VXo8j3opfujOpsSTfvajh/wBd+NSf8sqIYf3tc16vtgCGH97ViH91RRWtSmBJNeVHNN5tZepXnk9aNNvPtsVUBoef7VYhmqnUlY+09qHs0WKPtnv+lV5pqjmmrYVTYsed5tV6IO9ST9qq9UgKsQzVTg71JD59UZmhN5E0XSqc/apPP9qPJnm6VVUCvDNViGao/I96khmrnp0wC8m71T/e1cqvNNVE+1RHxNNVjyf3VR1IP9TJW9IzqEc155NEM3nReeax5pp5brmf93WhDN+6oqUyixD/AK78asQd6p1J5/tRT9iY1Qmn/e+RR5UPpR5/tUc01aU6gqRZm/1P4VnT9qsef7UUxBDNUc372q/7+Go/OuP+e4rNe2AJ+1eX/H4ed4S1A/8ATGvUK8j+P15PD4S1DyP+eNKoZnxlN/rvxplSXk372TFV/P8AauhbHOSebN60ebN61H5/tR5/tTAsef7VH5/tRUdZgWJpqIZvNqOabzajrQC5B3qSq8HerFZgHn+1H2z3/So6KKZmWK9Y/Y5z/wALV8//AKc5a8f8791Xsn7DcP2z4q3Fv/1DZaPaGq3PsDTZoPKrQg71T0fTfJ/f1oQw0FzLmmd61IO9ZdnWhB3oA0POg8qq9V5pp+lWPO/dVoBH5sPrUn2z/ngKrzQ1HWYFzz/ao5rv/lhUdR+T5taAWPP9qjqPyPepP+WtAEkMPnSxj/ptVjUvP82OCCo/O8qjzvNoAsQzT9KkqvB3qTz/AGoAsQd6kmmqODvRNDQAVJRUlAFf/VVYg70T9qIf3VAElWIZv3VRwd6If3VABUnn+1R+R71JQZkkM3m1JVfz/ajz/ag0LHneVUlV/P8AaiDvWYFiiiiug0LH7qiq8M1WKACiftUlRz9q5wILz/Wj60+GbyYs1n+dPNf+QaueR70v3v8Ay7NvaBP2qv5HvVjyofSo5+1FT96bEdfLf/BTjXzB4D0vSjB/y2lk82vpyftXxn/wVW1meGLR7GCf939jl86ueoaHxHNefvar+f7VXmvP3tR/bP3vSvQWx5Zoed+9qx9s9/0rL+2e/wClH2z3/SsKlM0NCa8qnNeVX+2e/wClV7y8rSwGpDNVj7Z7/pWPZ3lXJpvIioA0Ptnv+lH2z3/SsuG886pIZqCFubEN5Un2z3/Ss+Gbyqk8/wBqy9mjvWxc8/2o8797VPz/AGo+2e/6Vl7NE+0Lnn+1H2z3/Ss/7Z7/AKUef7UUgqVC5NNUfn+1V/P9qjhmqv3xRc8/2qOb/rvVfz/aiaapVOqATTVh6lMfN4rQvLysfUpvWtan70DL1Kb/AFm2uX1Kb/Sq6DWJvSuXvv8Aj/NdVKn+5OMuQzfuqk8j3qnB3qx53+rpe0HT/enqHwNsvO163r6o8NWf2PS46+c/2b7PztT8/H+qr6U07/jwiryap6dKnqaHmzetE01V/P8Aajz/AGrE7JlipKr1J5HvUU/bdR0/ZIkoqvRWn/XsVWp7UJpvKqnNN5tST9qKj2TMynVizh7VHP2os65fZ+yBbmhP2qv537qiiH97XRSp1f8Al2aTPoCDvUnn+1V6khmp1MSa06Yf8taPO82jz/aq9Ze0Zr7Mkh8+rlU/O/e1J5HvW1P2RsXIZqkmmqvB3qOftXVcxqbmhRDN+9qvDNUnn+1Yqn7U1WxJNDUkHeo/O8qpPP8Aau2pSo2OS5J/y1qx537qqcM1WPP9q5qVM29oSUQd6j8/2qSs6m5p7QKPP9qKK0VQ5qpYhm82jz/ao4O9ST9qoRHUkHeo6PP9qDMsQzVJB3qvRU+19kHsyxNNP0qTz/aq/n+1FHswLHn+1Sef7VT8/wBqko9oc4ef7VJ5/tVf91R/qv8AlvWntTQsef7VJ5/tVPzv3VHn+1c/7szLnn+1RwzVX87yqsQzfuq6gKesWfnRYqPR4fJi8itCizhPm1j7MCT/AFVRzXlSTQ1Xn7UeyMvaleaaiiH/AFvkZqxRTp+1GEM1SfvvNqOaGiGaqF7Iued/y3/WjzvOlqnNNUkHeuggsQzVYhvKp+dB5tSUASTXlR+f7UTTebUc37mLFZmZHNN+9qSq/n+1FAFzzvKqneTedR5/tUdBPtCvDDUkM1E/ao4f9d+NXUpmf8U0PO/dVH/yyo8/2o/5ZVlTpmlQj8qb0qTyPepPP9qPI9619kZ0gh/cy4qTz/aq80372pIbyDNL2hJHN/rvxqvNNUk3+u/Gq8/ataZmR3k3kxYNeT/Hj994R1D/AK416pefvuteZ/HL/kUtQ/640wPifUpv9KkgP/Paq/neTLUmr/8AH/J/12qv5/tWhz1SxDNR5/tVfz/ajzv3tFQCx5/tRDNVfz/aiGagCx503/PD9KP+WtV4byrHn+1BmSef7VJDNVOGarEHeszQsef7VH5/tVeaaiGatALkM37qvaP2FP8AkrNx/wBeUteH+f7V7Z+wd/yVC8/686yqVNQPsyzvB5VXLObvWfp3+q/CrkHemaGpDNVyDvWfZ1c87yq0pASef7VHNNUdFZ+zH7RkcM0/m1Y86ebrUfke9SUGQeT+88nNWKj8nzakoNAom/dVHNNUfM0NaASed5tWIYajs7KCGKpJ+1AEs3+p/Cnww/uqr1J5/tQZhNeTwy+RViGaq/E01WP3PlVpT3NA8/2qSqdWIO9BmSQzVJD++ixUc80H/LCjzv3VZgXKkg71n+f7VYhvKDQsTTVHDN+6qPz/AGqvNNWlPcC55/tUc/ao4ZqkrMCSDvViGb91Vfz/AGo8/wBqzNCx9s9/0ohvKr1HR7MftTUhmqTz/aqcP/PDFWPI960EaFneW/lVX1KfzYqp/v4ajmmoAr+d/p/StDz/AGrLg/4/j9K1KPam3syOaafpVfzZvWrE/aq/7quc2JK+D/8AgqtrH/FR28H/AE5//Ha+8IO9fm//AMFUPEnn/FqSx/59f3f/AJCrKkaPY+U5pv3tRwzVH5/tR5sPrXorY8h7liDvRVfz/apIZqP3JYf8taim/wBd+NSz9qpTf678aPZgXYZqk+2HyfIqnDNUnnfuqzAk8791UkN5PWf5/tViGas6lM6DT+2XHoPzqWC8n8qqcP8AqfwqxSp7AWPP9qj+2e/6VHe58r9xVeGanUp0joLE15PUn2z3/Ss+ab97Unn+1cyp/vgLE15PUfn+1RzTfuqPJ86KtKn705zQ8/2qOaao/O8qo7ybvTOgr3kw8qsPUpvWtS8m71j3lXTpk1DL1K89vpXN+d50tbmpXn7qSsKH/XfjXVTpP2X8QxLMM0/Srln/AMfR+tU4O9aFn/x8xVl7P2QLc98/Z1sz9l88f9Mq+hNM/wCPaOvF/wBnXTfO0GOvZ7P/AFR+leLUv7Y9OlsS/wCqooo/1VUa+zCpIO9V/wDlrViDvWFSr/y7OgKj82b1o8/2opL2JmRz9qj8797Un7qo5pq19pS/5d1ACo4fPqPz/arkHeipT9qATef5VRw/aMfuafD/AK78aWilTtVMz3iGapKr1JB3rKnT9kdhYoqPz/aiaGinudBHP2q5Zzfuv+mlV4fP82rEHetVuc5YqOftUkMNE01dNSnoAUef7VHRN/qv9fWVOqRUph5/tUkPn1Tg71oQd6jWqWWIYasQd6j/AOWVSQzQdK9ClojMkqTz/aq/n+1RzTVnVAk8/wBqkqvB3qSin7ECx+9o87zaIZvIlqvNN+9rT2RmaEPkVHNN5VU/P9qIZqdP2IFjz/aiGao6jrL+KZlzz/apPO/e1X8/2orn+sWD2Zch/wCe+aPP9qrwzfuv9fUcHeuikBJUdSTTUQd6zqAE/aiDvUlV5oaz9oZlj/llR+9qv5/tVyzm71pSqBUpkkPn1JR53m1HNNXUZkk/aqc0372rn+tqvNZwZrMA8/2qP/VUeT5MtSQ/van/AK9i9kHn+1Hmw+tHke9Hke9Z1Bh53/TvUkM1Hke9EHetFUMyx+6o8/2qOo/I962MwmvPJo+2TTQ/vqk+x+dxUc0PlUAR/wDLWiiaGiDvU0gDyPeo5+1STTfuqr/8sqPZo5wmhogg7wVJR53lU/ZgHkzw9aPP9qPtnv8ApUdFSn/z7K9oSef7VY8/2qn/AMtak8/2qDYJpqj8/wBqjn7UUv8Al6c5J5/tUfnebR5HvUc37qil/GMyOftXm/xy/wCRS1D/AK416RNNXm/xsm/4pLUP+uNdvtAPhfWJv9Pkx/z2qvNNUmp/ub+T/rtVfz/ag5yTz/aiq/n+1SUGZYmmqPz/AGqOiGagCxB3qSq//LKjz/agCx5/tUnn+1U6k8/2oAsUef7VT8/2qTz/AGoAuef7V7R+w3N/xcHUP+vOvC690/YO/ffEvUIP+nOs6lM0PszRryCaL9xWpDNXP6P+5ljroIYaDoLlnVyDvVOzq5Z0U9jMKko8j3o8j3rQKe4eR71JDN5VV6k8j3oMyxB3qvNN+9ohhomhrM0D9xNUkMNV6uWf+qP0rQzDzvKqT/W0v2O39T+VPg71p7QCPyPerHke9FH/ACyrMCOo5pqk82H1qvNDQAfbPf8ASrEM1Z/lTelXIO9ZgWPO/eeTipKjg71J+6rT2QB/y1o/5a1HUlBoSef7VHRReTd6AqUwhhn82rnke9U4Zquef7UAFEM3m1HNN5tEM3lUVQLk/aiaaiftVeg0Lnn+1WKpwd6k8/2oGtySs+b/AF341Y8/2ommo9maVCnY/wDH+K1Kz7OHzpZP+mVWJpvKrL+EIsT9qpz9qPP9qj8/2rKqdBYg71+W/wDwUg1j+0vjnrljB/y63n/x2v1Ig71+R/7bGpT6l+0P4kn/AOet5Urc0PF6j8/2ommor0F+9PHqliDvUnn+1V/O86Wo5+1HtACaao5+1FE01c9QCSGbyYs1J5/tVfz/AGorRbGgT9qt2f8Aqj9Kg/5ZVYs5u9BrTpliGarHn+1U4ZvKqxDNWdT2JrTpkk00/Sq8/an3n+qH0qp5/tXNUJLEHepP9bVOGarHn+1aU/3oElSZ/wCm/wClU5pqkhmrSnUo0joLnn+1V5+1FRz9qhbgV9Sm9aw7z/Wj61qXk3ese8m710Kn+9Oc5/xJN+6rKs/9UfpV3xLN5MXWqejwGa1rsJ/ilzzv3tamgzf6VHWNN/rvxrZ8N/vrr7PXnVcT7IS3PrD4D2f2PwvbwV6ZDN+6rh/hjZQWejW8EFdZ5s3rXk1MSetS2Lnn+1Hn+1U/Nm9akrPDeyrGpco8/wBqrwzVYpezAjn7VJB3ommqPz/aumnhqP8Ay8D2hJ+58qqc3kVJNNUc03lVFNUbh/FCpKrwd6uQ/wCp/CtP3Qe1JIO9Rz9qkg70T9q0+r/uv3Ye1PcKP3tFHnfuq4vqx0EkM3lVY8/2qv5HvSQ/678a0w1MC7/yyq5D/wA981nz/wCq8jNXLP8A49oq7aiNCOftUc/apJv9d+NRz9qozI/tnv8ApUk037qq/ke9WIYfKrl9lSAIYf3taFnVPz/apIZq2A0P+WVEHeq8M3m1Y/dVVOpVHVDyPeiDvVfzZvWiGapM/Zs0KJ+1R0ef7VNQ0pEkM1SVTorWkZVNyxRUdE/alVNA8/2o87zar1J/qqzp1Dn9kaMP+p/Coqjg70fvayqfvQJP+WVEHeo4O9SeT+9rp9n+5M/aIJ+1SQw/uqIf9d+NSVz06dWqaB5HvR5HvUc3+u/GrEHeuz2SM/aIj8j3qxZ1JDDB1qT91UgR1HP2qSo5+1BmSQzeVUfnfvajqOp9qBcm/e1H/qqj8/2qStPagFSef7VH5/tUfn+1SZkk01Sef7VTmmos6ALnn+1WLOs+ab97+4ohvJ/N5rQDQmm8iKq/nTzS1H53nRUedBD1q6mxj7QJ+1RwzUTTebRB3rj/AMBsSTeRNVOaarFV5v3Muav2ZmSQzUVH5/tUla/wjnJJoaIYaJpqjhmpnQHke9FSef7VJ5HvWXsjnK9Rz9qsfY/b9ajms609kBX82b1o/wCWVSf8sqr0qX8YdQr3leb/ABm/5AGof9ca9In7V5v8Zv8AkAah/wBca29ojI+F9S/4+n/67VT8/wBqua9N/p9x5H/PasPz/atDnqlzz/apPP8Aaqfn+1SfbPf9K0p7gaHn+1R+f7Vn/wBp/wCzR9s9/wBKzMzU8791Ufn+1amjw6V9gknvv9Z5P7mufvJvJl6UGhc8/wBqPP8Aaqc15R9s9/0o9kBc8/2qTz/asv7Z7/pQbzzpaDM1Ptnk817x+wHN53xL1A4/5c6+d/tnv+lfQn/BPH978R9Un/55WcVaGh9oab/rkrYhmrH0eH/V5NbEMNBoWIO9XLP9z0qvB3qxWZmWPP8Aaio/I96KP4oEdXIYf3VV/I96k/1VaAWKy9Smn/1EFXPO/e1HND5tZmhXs4Z/K/f1qQ/uYs1nw/63yM1cg70BT3LHn+1Hn+1V5pqj8/2rMzLnn+1Sed+6qnDNVjz/AGrT2oBUnnfuqj8/2qP7Z7/pWgEn/LKpIYf3VV4O9WIZqzNAqTyf3VV6sQzeTFmtAD/VVHD/AK78ajog71mBoVTm/wBd+NSf6qo+JpqAJPP9qkg71X8j3q5DDQZh5/tUkPkVXm/e0ed5VZ+0RoXJ+1V/O/e0ef7VH5/tWZoXIZqk8/2qnDNViuj2oBRNNUc/ao5pvKrMA0G886K4n/6bVcn7Vl6D/q5P+u1aE01BoR1JB3qPzvOlon7VzezOgsTXnk1+M/7Qmpf2l8VdYnnn/wCW1fsJ4qm8nwvqE5H+q02WT/yFX4r/ABO1n+2PGV5qsH+rlmro9miqpz80372iiiDvRTp1keaSQzVJUcHejz/anT9sAVWm/wBd+NWYf30uarTf678a0qUwLP8AyyqOiDvRN/qfwrMPaMKkg71XqxB3rSkBco8/2o8j3qOsqlI2pEnn+1R1JB3qOftWf1esdQUVHRTRgtySrH/LKq9Sef7VlUVKkda2JIYajn7VJB3qvP2rUzM+8m71z+pTT+bW5eTd6w9Sm9a0pnLVOb16bzvMqTRz5MVU9YvP9Kq3p3/Hr+FdAUi7NN+9roPhvZwXusx+fXPz9q7T4P6b9s8UafAIPN/fVzYmkWtz6w8Ew+Ta10EMNZ+gwQG1ya1IO9eTTpUT1qWwf6qiH97UlSf6qs8NT/fezGV5oakg71JB3o8mCHpXRUp/vRrcjm/dUef7UTTVXn7VlesjoqBUc/ajz/ao66VaqcRJB3qTzvKqPyPeiGGs6hoWPO/e0TTUVXmhrpp/wTKp+6PoCaGkh/1341LB3o8j3rn/AIR6fs/akn/Xv+FRzQfvfPo87yqsf62umnp+7MVuEM1SQzebVOabyqNNmPm81l7Sx2LYuVJ5/tUf/LKpIO9HtDnI6sef7VHRWuhPtQ8/2qT97UdWIZv3VZfVv+Xhp7Qjhmq5DD+6/f1T879552KsQzUKp/OHsw8n97VjyPeo6lh/1P4UezJEo8j3o8/2qTz/AGrppU/ZGH8Ujn7Uef7VH9sg87FHn+1QZFiGGpJoarwzVYmmrM0I6kqOaGpIO9V7P2poRwd6uWcPaq8MP72rlTY5yOiftRUlAezI4O9SQzUQ/wCu/GpIYf3tSv3QEfE01FSeR71HXUc3tSSiiigKZc8791Uf+tog71J5HvU1Rmf5U3pUkHepKK5/ZgV5ofKo87yqsef7VH5HvVAR/vak/wBbVio6n2Zl7Uj/ANVR/raPI96PI966iCOftUdWJof3VRwd6DQjmhnhioh/e1Y8nzaP3VT7KsBH5HvUlV6P3tUBJ/qqjvP33Wo5pvKohhnmirL2ZmEP7mLNSUeR71H5HvTOckoqOGGjz/al7M6CTz/arHn+1V/tnv8ApRXYqWhxlzzv3VV5vP8ANo8/2qPzv3tZ+0QCzf6n8KpTTeVWhP2qhef60fWj2aK/hCTTfuq8v+Nn/IA1D/rjXpE/avP/AIzQ/wDEhvM/88afs0SfAfiSb/iaXH/Xasv7Z7/pR42m8nXrz/rtWHNqfk10HGbE15+9qP8AtP8A2ax/7T/2aP7T/wBmswNj7Z7/AKVJDeVh/bPf9KsaDP8AbL+OCeugzO40D99Yfv6r6lo/aCsfUvEk+my+RYz+VHUmg+Kr7Ur/AOz30/7uoRoU5tT74qP+0f8Apt+tU9YzZ38kE4rP+2e/6V2KmY+1Nz+0/wDZo/tP/ZrH/tP/AGaj+2e/6VH1YPanQfbPf9K+kP8Agm/N/wAV54g/64xV8pw6l+9r6c/4JpzfbPFviSf/AKYxR1ibe0R90abee31rUhmn6Vz+jf62P6V0FnN3rnNC5ZzXEMtXIZqrwzVJ537qinTNCSjzv3tV/wB7VitDMkg71YqvUnn+1C9iAVJND5tR/wCtqSgCvD+6q5B3qv8A8tasQ/uqDQjn7VH5372pJpqjrMzJP+WtWIO9Rwd6P+WtBoWKjn7UUT9qDMr+f7VJB3qvNNUkM1HszQsQzVLN/qfwqKDvUk/aioAQd6kg71HDDPj/AFFSUD/hBN/rvxplp/rY/rT5pvPlqv8A6qgRof62jzv3tV7OY+bVigCSabyqr/62iiH/AF341mBJUc/apKkn7Vp7I0CH97R53lVHVeftWY/alzz/AGqveTDyqjh/1340axNBDa1mdNMsaD/x6+fmiaaq+mz+Tax1JV/xSg8/2qSGbzpc1XqSH91TAz/jBeGz+FXiC/8A+eWj3X/oqvxT16bzb+Sv2M/ac1j+x/2ffFl9/wA8tHr8b9S/fSyXFdBjVKdFFFZnKSUUVJQHs2R/6qo4f30uasTQ/uqr2f7mXFAElR1YvKr1oAVYh/e1X8j3q5D+6rn9ma0iSjz/AGqOftUnn+1Kn+9LCo6Kjn7U6gB/y1o/1tSQ/wDLP8aKVT90AVJB3o8/2ohhrnqWqs6vZok/5ZVXvKsTTfuqpzTebXQqaM/aGXqc3k1z+sfvutbmpTetYeof8tK19kYnJ3nn/b619O/1X4Vjz/8AH8PpWxD/AMela+zZXtC7XqH7Pem3E3ii3uIP9XF/rq8qh/13417j+zHZ/wCnyf8AXGKsqtP9yFLc+idG/wCPSOr/AJ37qqemf8e0daEP+p/CvIqYY9JXI4Zp+lWP3tR1YhvIM0UsN7L/AJeGhJB3qSaGpPJ8qo66fZgU5oarzQ/uquT9qrzQ151WnSqmhn+T+9qSrE0PkxefUcHelhqYVNwqSio6inTrUmZ1KhJUc/aio5pq9Wla37wzPoDz/apPP9qp+f7VYhm/e1xez/fHsFjzYfWjz/ajyofSjyfJzj/WV2VKlX2xj7Mk8mCbrUdnUk/aiH91UP2NSr7Q09mSVJVf/llRDNR7KiZ+0rFjz/ao/wDlrUcHepIO9aU/7hzfxSxUnk/uqjg71Y82H1p+yNKRH5P7qpIZv3VHke9Hke9KnSqlh9s8nmpP7T/2ap+T5tSfY/b9a09nWOcuef7UTTVT8qb0qTyPes/Z1gD91UkMNV4YZ/Nq5D59UBYhhqSjz/aip9kaEcHepIO9Hn+1EM1Oo/ZGZJUkP/Tf8ajg71J5/tSpe1Mwn7VJDNUcM3m1YrpqbGZH/raIZp+lSeT5tHke9Z08MBHB3qSiiftWns0ZhB3qxB3qnD/rvxq5/radPYVQPI96IO9SQd6KyqkEdHke9FR0zQkmhog70UeVN6UGYeR70Uef7VXmmpeyAk/1VEP76LFV5pvNqxZzd66ReyI5+1RzTVYn7VHWYv4RH5/tRRN59Hke9BJJUdEMNSUAR+d+9qTz/ao/P9qjrH2oEnE01R8QzUf6qiqOckqOaGpKk/5ZVfs/amZXhhg61JVfyPepPP8Aao/fASVHR5/tUnn+1ZeyNCvNNVOftVy8mHlVnz9qKdOqOpUI/P8AauG+Lf8AyArj6V3v2P2/WuH+J/8AyCbj/rjXStzI/N/4nDyfFuoQf9Nq5Oaauo+Kk3/FW6h/1+S5rj5pq9GkjzqlUPtnv+lEM3lVH5/tUfnfvaViC55/tVjR9S+x3Xn4rP8AP9qPO/e10AdZnSp4v389R3mpaVpv7jQp/M/6a1yc00/Sk864/wCe4qaYGrrGvX2sTfaL6fzJKp+f7VX87r59FHswLH2z3/SiGbzap1JDNVAacP8Arvxr6l/4Jkf8jR4g+lfJ9fVn/BLsf8T7xJPj/nlXLVNFufcunf638a07ObvWHpuJv39bFn+56Vw+zOxbGpDNRDNVPz/arln5/lfv60Auw/6n8KfUfmw+tEHegzLkMNE/aq8M1Sf62uml/BAk+2e/6UVHN/rvxqSsvaM0I/P9qk/5ZVHRQZliabyqjhm82q95++/cVJZw+Ta1mBoQd6kqPz/aiGaszQsef7VXvKKrzTV0e0MyOpIO9V5+1SQd6zAuQzfvasf62s+pPP8AagDQ8791VepIZv3VR1pT3AKj/wBbUlR1maEnke9Sef7UUUv3tQ0CGb97UlV/P9qPP9qgCxDMftVWJpqz6Jpq09qaFjz/AGoh/e1X8/2qSHz6zmZh5/tUesTfuqkn7VX1j/UilPY2pEln5/lfv6sQzVTs5u9XPP8Aaop/vTYkqPzuvn0TTVXrSp+6Oc8n/b816+0f9lnxJ9h/5eofLr8m/tnnRf8Abav0w/4KiaxfWX7MlxYwT+V5t5FX5jw/6r/tvR7RDqhRB3ooropnEWIO9WPP9qrwd6KDQkmm/dVXs6S8/wBUPpTLOYeVRT3Akn7VHB3qSftUcHes/wB8aEkP7qpKjoo/5cmZY8/2oqPyPepJpqyqbGgf6qjyfNqOabzaPP8AaueoaBD+5lxUk01R0Qd66PaKxKpakkHepIZp4Zajs/8AWn61YrKkdFSm7EV5/qh9KqTTVcmm86LNZ15/qh9K6jP2ZQ1KY+bxXP6nN5Nbl5N3rm9Y8ib9+K1p1DOqYcP76/rUg71l2f8Ax9H61sQ/uq09p7UXsixZ/vrqOA19Gfs06b/osk9fOenf8f8AFX1B+zrpvk6D59cVWr+69mdNKmet2f8Aqj9K0YO9Z1n/AKo/SrsP+p/CvN9nWPR9oSf8taj/AOWtSUVpTID+0f3tWIbyDyuaz/J/e1YrYDQmhqvNDUc15PRNqffFclI6CvefvpcVHmb/AJ96sed5tE0Pnfv6f8IzI/8AllUf+qon7UUVKlzMP+WVRzQ1J5/tUf2z3/StKX7oD3DypvSrEP7qpJofIlo/1tZU6f749AsQd6Kjh/1P4VJDNW5mWPJ/e0VJVfzvJlq/3Jt7SsWP+WVHke9RzTfuqIZqz/dOt7MkPI96k8/2ommqOGatPZ0qNUzJPP8AapIbyo/9bR/qqPrH/PsdI1PJ/dUf6qqfn+1WIZqFiqXwCCDvVio6K1H7Mk8j3qTyPeo6KVT2VgpElFEM1SU9BBR+9qSDvRP2rKpTMyODvRRRWVMCSGapPP8Aao4O9Sf6quozJKKjorRbmZJDNUnn+1V6IO9aVALnn+1RzTUVHNNQqtEzJPNh9asQd6z/ACf3tXYf9T+FZgW/P9qrzTVJ5/tVO98+swLEM1FU4fPqx/qqunsAQ/uqkmvD5XkZqv8AvvNo8mfzaP4Rl7Qk/e0VH/qqk/cTUezRlUI/P9qk8/2qPyPeimHtA8/2o/1VHke9H+qpezQe0Cabzos0QzVH5/tUkMNZUv4xsE/apIfPmiqOf/W+RmpPO6eRXUY1NwqOpKjrmVIVTYk8j3qPyfKqx/qqrzTfva6ansaRkSVHUc01Rwd6zVREe1JKj8/2qx5HvVfyPesqlNuqIJ+1V/tn73pWh+4mrLms/wB7Wns/ZGhcmmrPmm/e1c8n91Vfyf3tZ1KZmR+f7Vw/xU/5A9xXeeR71xfxOh/4ldx/1xrppdAPzP8AjBD5PjzVB/02rj/+WVdp8bP+R91D/rtXF/8ALKvXWx4tQrwd6j8/2qSjyfNopfvSgg70ef7VHUkHeuj2dA59Qg70ef7UQd6kp08MdBH5HvUdSf8ALWo6y9kBJDDR5/tR5/tR5HvXT7KiBYhmr60/4JdQ4ufEE/8Az1r5Hg719gf8EtZh9q1iD/p8rzcR/CLpH25poMEXkVchhqOz/wBV59XILyCbpXmnorYks4e1akP+q8jNZ8M1WPOnmio/cmYTQ/vasQw1HB3qxWYEflTelXLP9zFmo6krTUAo/wCWVFRz9q0Aj8/2qxDN+6qnViDvQAVJVerFAEkHejz/AGoqOaas1uBchmqvP2qPzYfWo/P9q0mAT9qsWf8Aqj9Kjh/fS5qx5P7qs/ZGhHUdFSQd6DMkhm/dVJB3om/dVHDN+9rQC5VOb/XfjVi8/wBUPpWf/wAtazqmv8I0IZv3VHnebVOpIZqKQixP2qOo5pqJ+1FU29oWPP8AaiDvVOGafzasVmJbhP2qTz/alm/1P4VFWVTDnUiSqepTHyv+21Sed5VU9Sm80R0xlyH/AFSfSrFU4O9XP+WVXT2ArzXlEN5PUd5D5MuTRD/qvPxR7T2oHyv/AMFdNYP/AApvQ9K8/wD1usf/ABqvzzr7k/4K6axBPYeE9Dgn/wCnn/0VXw3RT2OTElepIO9R/wDLWo67v3PsTIsf8tasf62q9SVkZle8m70Qd6rTf678alh/1P4Vnf2pp7Rlio5+1SUUVNADzv3VFFFZgWPP9qjn7UVJWlSmAVHRUdZ/uTQsVHP2qPz/AGoh/e0fuav7sCxD5/m1YqOzqShU6KOr2tYjm/1P4Vl3k3etSb91WPqU3rWlQz9oZ95N3rm9em7Cty8m71zesf6kVotjEp6bDP5sdakHeqem/wCq8/rVinTp+yA3PDcEH2+MT19afB+H7H4ct/Ir5P8ABMH9pa9HY/8APWvsT4e6PPpujW8E9c2NO+kdR/yyqTz/AGqvDDViDvXL7U6Sx5/tUfn+1V5rz97UkHes/wCKZlz/AJZVH5372jz/AGqOftXTyUkjT2iJJ+1V5vPqSo5v9T+FZr6p0NCt9suPQfnWjDefuqp+R71Yh/1P4Vl+7MyxP/qvPxVepKj87935OK0p0zT2hHUc/aj/AFVV5pvPlo/hGdSofQnn+1H/AC1qPz/ais6lQ7CxD+9q5DDVOGapPP8AatKdMC55/tUfnfvarzTUed+9op+xqm3syTzvNqT/AFVRwzfvak8/2rOnU1MqlMP+WVHke9Hke9SeR71z1L+1EEHepKj/AOWVSQw+dFiuinSAkg71J/y1qPyPeitfq9ICxUkM1V6khhro/hGZc87zaj/1VE03lVHB3rOpTVU1pFiDvUnmw+tR8zQ1HXN7MyLnn+1FRzTUQzV1GYT9qjhmn6VJP2og71lUpmhJB3qxVeDvVimZh/y1o/dUUeR71ftADz/aio4fIh/19Hn+1L2tYCSo5v3VFSeTPN/r656gB5/tViH97VfyPepIO9a0jOpsWP8AVVXmlgmFSTTfuqr+T+9o9oBJUnE01R0UzQsf8sqjm8+o5+1R+d5VaHOWPO82j/VVXg71YoMySDvUlR1J5U3pQTSI6jmhqTypvSpIYaCjPmhqzD/qfwqfyPeo6KdOlSFVI5+1SWf+qP0qv/rakg71aqfvfaHIWJof3tEMNFHke9a+0K9mV7ybvUf/ACz8+pJofOlxR/qYvIrmq3NKdiOpIO9EHepKVI5Qmh82q/k/vauVHNDB1rqMyv5HvR5P72pJoajn7Vn7M6CPzoIetU5rweb+4on7VH/yyrMdOwTXlcP8TpvO0u4nnFdhNDXD/FT/AJBdx9a6VuZH5x/G3/kd9Q/67Vwfn+1d58cf+R41H/rtXn9erS2PJqhR5/tUdSU1+6JCiDvSQ/678au2cME0v7+uipTAj8meb/UVuTfDfxjZ6N/bl/ocsVvF/rpauaxr2laPax2PhyCLzIv9dLR/wtPxVqVh9h1XXJZY61pE1Dk5oZ/NqOpJsTXUmKjp1NzOkFE/aiiftSqbGlMkg719gf8ABLb/AJCesf8AX5LXx/X1/wD8Eu/+P/U/+vz/ANpV5uJ/hGtI+6bOEeVViDvVOzmHlVYhm/e1wnorY0IO9WPJ/dVXg71c8791WfsgJPJ/dVJ/yyqOGaj/AJZUASed+9qX7Zb+h/Oq1RzQ/vaPrPkBc8/2qOftR/qqj8/2oAIO9WKr0f62ip+6AsQd6kqOiDvR7UzLHnebVeb/AF340ef7VHP/AK3z8UARz9qKsVX8n97RUNCSH9zLij7Zceg/Oo/Nh9akhho9oBYhh86LFEHejz/aiDvQBJP2qSH91Ufn+1WIZv3VAEc01V5pvNqx5HvVesqvtf8Al2aEn+to/wBVVfiGarFHtAI4ZvNqSib91UcM1Z+0NaRYoo8/2qv5/tWo/aFiabzaj/5a0eR71HB3rL+KaVCxNB+68+sub/Wx1oTTVn/8vXkUyjQh/wCm/wCNXP3PlVT8j3qSg0JJ+1V/J879zmib99Lio6v2aA+C/wDgrdN5PxB8L6VB/q4tHl/9G18fzTeVX1J/wVc8SQaj+0jHodj/AKuw0eLyf+2tfKep9q19keZiSKb/AF341LB3qOH91RQZFzz/AGqSqdWIO9aGhT1H/W/jRD/qfwqPUv8Aj6jgq5D/AKn8Kz9kBHDNUlV5+1WIYf3VAElFEHeiDvR/CAKkqSo5/wDW+fig0CabzajoqOuar/FNA8/2qSDvUdWIO9agWIO9WIfIqvRWXtToI9T7Vj3n+tH1rUvJu9Y95WpPszOvP9UPpXKeJZv/AEdXSalN+6krl9e/fS4roqVP3JlTpkmj1cqnZw+TU8P+u/Guf6ya+zOx+GMP/FUW+P8AntX2PoP/AB6x/WvlT4D6bBqXiizguP8AntX1ho8J+yx4NebicSduCpGpRRB/yzgzRNN+9opGpFD/AK78alg71HNNUlnN3o9oZ09iSiq801SQzVlUxPtf3ZpSpB5/tUc01E/ao60pASQd6uQd6pwd6uQ/vaftACb/AFP4VTn7VYqP/llWf1inV0D2ZX8/2qOGHz5aSb/XfjV3R7PzpcVOpme8f8tPPo84/wCo/wCWlR+R70eT+887Na0vbf8ALw7CeH/XfjUtRQ/678alrp9mBHN+6qP7Z+96VJND5tRww1nSXsiqjLkHepKrww1J5HvR/CrC/iknn+1Sef7VX8j3qTyfNrP97VZZY8/2qSGaq/ke9SQw/va9FKtYzLEM1FR+RP8A8sKKj/lyc5cg70VXg71Y8791WP8AFOgkn7VHDDUk372iH91Tp0znJIO9Sef7VH537qil+9AJoaIYaJpqIZqzX8U0LHke9Hk/vaj8/wBqk8/2rtqUzMPI96kqv5/tUv2y39D+dc/1dgPg71JR/oVEPkV02ArzfuqWz/1p+tTzQ1X8n97WhzlyHyKk8/2qvDD+6orMVIJp/wDluKP7R/e1JRDDB1oGSed5tH+qqSGGiaHzaDQj8797UlV/I96seR70GZXvKOZoakn7UeR70ezQvah5HvViH9zFmqc03lUQzVp+5MC5DNUnn+1U/I96krM0JPO8qjz/AGqvP2qSDvWZmHnebUlV/wDW0Vp++NCSDvUnnfvarzfuqKXtPZAWKj8/2o8791Vfz/amY1CxDNUn+tqvD/rvxq55PkxVp7QxK9SQd6Kkg71n7NAE/ao/P9qJvIqOaalTpfbAlm/1P4VF5P7qjz/aitTMy5of3tR/6qrl5VeaH97R7MCOuE+Kn/IFvK7uDvXB/FT/AJBdx9aa3Mz84/j9N/xXlxxXndekfH6E/wDCb3Fedz9q9WjsebVI6J+1FFdXs0c/8IKk8/2qPyPerEMNaKl7Ukuabps94f34q5/wjnvVjTdegs7XyBYx1X1LWPOm/civSp0qtiPamXeQ+TdeRVeaHzasVH5HvRUw1qIU6pXoo8nyqK82pTLJK+wP+CYP/H1rEH/T5/7Sr4/g719gf8Eu/wDVaxP/ANPn/tKubE0zrpVD7g02YeVzVyH/AF341T07/VfhViz/AH0ua8SpueiaFnVyH91Uemw+lENnfTX+J5/3f/PKtFsZlipIO9SfY/b9aKDQjmhqTmGGiGb97Un+toMyOftUf+tqSis6f700D/llVfzvJlqSaaq8P2jP76gC55/tR5/tVeGb91R5/tWZoWKk/wBVVeGapPP9qKdMzJKpzTVJ5/tUc0NFQKe5HD581XIO9V6krT+EBc8j3qTyPeq8M1WPI960p/vTMjmmon7VJNDRWf740I6r+f7Vcn7VTm/1341nUNAhmq55sPrVSH/U/hUU/agCxNNR5/tVeb/XfjR5/tQbUix5/tR5/tVeisfamn8U0P3VR1X/AOnj9aj86fzaikaVNixVf/l9qTzv3tRwwz/avPrpM6Zcg71J5372o4O9SUFB5/tUfnz/APLCpKr/APLWP/rrWnszQ/MP/goprH9s/tVeIP8Ap1hijr56m/13416r+2br39pftVeNOf8AVal5eK8nrXT2XszzMSSUfbP3vSo6IO9Kp7X/AJdmS9iXIbyrHn+1ZcM1XPP9qz1NCvqP/H1+NSQd6gm/4+6fD+9rQzJKkn7VH/qqPO/e1nUp+1NCxD/qfwo8/wBqjorL+EdBY8791Ufn+1R1Yg71nSp1qv8AEMwqOftUk01R10gFWIO9R1PD/rvxrmqU6xrSJYZqPO/dVHDNRWlKmIz7z991rPvP3PWtC8+zwis+ftXV7NnQY+sTfuq5PUf+Pr8a6fXv9T+NczN/x91j9Xq+1Mf3Rfhm/dVJDN+9qvD/AKn8Kkh/1qfWtfZfuQpM9s/Zos4J/FNv58FfTmn/APLOvnv9l3TfOlkvs/6qGvoTTv8AW/jXk4mmduGqEnn+1R/8tak8/wBqjmhrRfwTUk8/2o/1VHneVUfnebXFiALEM1SVH5/tR5/tRSNF+6CaGo5+1Sef7UT9q6aWGCpUK8HetCDvVP8Ac+bVj/W09DOpsF5N3qv5/tUnke9R1l7IzI66TwHZz3niO3gggrn67j4S6b9s1nz4f+XWGtrGns0eiUTTVH/qqP8AW1odhJViH91VerHn+1ZUvZXIqbklRzfuZc1JB3om8iaWuia/dB7NhZ1Yg71Xq5B3rmWGLCo5pv3tWPP9qr/62u32dh+zJIbyiaao/I96IO9MRYs5u9STTVHD+9omhrQCxDNUkM1V4YaOIZq5/wDl8Zlyjz/ao6P9VQBY8/2o8/2qPz/aiswJKPP9qjg71J/y1q/Zf8vAJP8AW1J/yyqODvUnnfu/JxR/FCpuHke9SQd6jorWexzkk0NEP7qj7Z7/AKUTTVlUqeyNPZkk01SVXg71JUXD2QVYg71XqxWhn7NEfke9HnfvakooAPtnv+lWIf3tEMMHWiGGgzCiio6upTAjo8/2oqOaGsvagHk+bUkP7qiGHyqkoVICSDvUlV4fPmqxWxmV5+1R+f7VcqvP2rQA+2e/6UVXqxB3rP2gFjyZ5v8AX1HNZ1J9s/54Co/tnv8ApSqfvQDyPeq8/arnn+1Hk+bTMyvB3qxD+9qOpIf3VZgSeT5VR+bN61JUddHtDjCDvRUkHeo6KRoR+R70QzUUQw0VAI5+1V5+1WJ+1U7ysvZukZkdcH8T/wDkG3FdxXF/E2GCbS7jz61W5mfm/wDtCTmL4hXled+f7V6J+0hD5PxBuDivM/O8qvpML/BPNqhP2oo87zaK9KlS9qc/syTz/arEHeq8HetTTdN+2f6PXr5ZlFavW9mjlq1VRI4O9aH/ACyr0X4A/sr/ABX/AGjfGlv8PfhH4SuNV1a5ikkihh/dx4/6aSS19aXP/BvL+23beA/+EkFz4WOq4/5FYaoPtP8A38/1X/kWv0Z8IUsuhTji6tODqdJz1+4+EzDjbKsBW9nUqH59T9qrzTV1Pxb+GPjb4NeO7/4afEnw9LpWtaPefZtRsJpv9XLXJXlfKZtlNXAe5M+ty3FUMZR9pTZXmm82o/P9qJ+1V6+SxNI9SluWIZq+yP8Aglr/AMgvUJ8f8vktfGfn+1fZn/BLX/kD6j/2Epf/AGlXiZh/COylT/en3JB3qxZw9qjs/wDVefVyDvXz/s/anpGpZ/uelaEM1Z8HerEHegC5N5E1R+R70VH53lVoAeT+9qOaajzvNqvNNR7QCTz/AGo8/wBqIO9R1mBH5/tUc01XPJ/dVXh8iswI/wB7RB3qxUf/AC1opgLN/qfwpLOHyakqODvQBcqOftRVeftWhoSUVX8/2qSDvQBYg71Y+2e/6VX8791UfneTLQZlzzv3tWPP9qp+f7Uef7Ue1AsXk3eqdE/ao/P9qyqVP3xrSJKPP9qjqSipUOmmH/LKo6kqOoM/Z1gqx/yyqP8A5ZVLD/qfwq/Zo2IvP9qkg70UVl7KsTTJJv3VEM1FHke9UFMk8j3qOjz/AGo/5a1pUp0gpklFR1T1i8/s3QNQvv8AW+VZyyVmbH45/HK8g1j4yeKL+D/lrr11XL1Y169/tLxHql7j93LqV1JDVeug8sr0Qd6SH/XfjSz9q0AkqxP/AKrz8VTqx50/lUe0MyvD++lzViDvVez8j95VjyPeg0I6khmon7VHDD5MuKKn7o0LkM37qiH/AFP4VH/rasf6quepc0CiGajyPeo/J/e0qdOt7UCSb/ph+FR/8takhhqOftWtOkwJPP8AarEP+p/CqdSQd6z9p++9mBJUk01SVX/5a0VMO6Q6RTvJu9Zd5N3rQ1H/AFv41h6n2rRU61I3MvXry48qSub87zrqtfXv9T+NYkHeumn+9OCpUNCGarmmz+ddR1l1seG4p5r+Osv3wj6U/Zjs54dGkgg/5azV7pZxeR5deV/s9WcFn4Xjng/5a+bXqEM2JY686qenh9ySo6koq/ZHSRzTUQ+RRNDR5HvXF/y9APO/e1Yqv+6qSDvRUNKZYg71HP2qPz/ao/P9q1pVDOoH/LWpPNm9aj8nzakrOph2XTqVSSGao5poPOqOiuGpUrkBXqHwNh866kFeZww/va9g+A9n/oHn/wDTataeJrUqPtKhpSNyaGio/P8Aaiu32hr7RklEHeiiin7X23tIFliGapPNh9ajg71X8j3r0fae1I/hFyrEM37qs+DvVjz/AGrn9qWSTTVH/qqPJ/6eKko+s+1K9mSef7VJWdD/AK78a0YO9ZUvbf8ALwokg71J5/tWfNN+9qSGatfaeyOcuef7UVXqSDvWftaxoWKkqvUnnfva09qZklSVH5/tR5/tWX8UA/5a1Yg71lzzT/8ALCrFnNf+b+/qjMuTTVH5/tRD/wBN/wAak8j3qqQEf72rFFScww1sBX8nyqkg70Qd6K59zMseR70ef7Uef7UVj7L2R0EnnfuqPP8Aao6km/6711XOMIbypIZqp0Qd6n2oGpDeVH5/tVeo5+1Z/WKtLc0LE0372pKjhhn61JWtLUyqhB3oqOGapIZvNpjCDvRRRS9p7IXsiSGarEHeo4O9E/al7UgjvJj5tR0TTfvakhhg8qj2oqRHViH97UdSWcPk0UhkflTelSQ/vakn7VHDDWxmSQw1JRDD5VST9qVSmBHDDUnn+1Rwd6kqAI6KjqTyPetPaIx9mE01Rw+RRNDRWSqVqoezJJvIqv5/tUnn+1EMP7qtPaGnsyvVO7m/54Grl5WfWdOoc/sivNNXH/EiY/2XcZrtJoa4v4kTf6LcQf8ALOtaRJ+cf7Tn/JQbivL5+1esftRQ/wDFeST/APPXza8nn7V9hhv4B5NUrzTeTLmpJ+1Hn+1EMNd+G3OapUJIO9eq/sxfDXSviz8YfDvgTXLi4ttO1LUoo9Su7X/Wxxeb+9lrzGH/AF3419I/8E47rwzF+0VY2XiC9jt47mzkjhlli/5a+VX7F4dYejVzCTqdv6sfD8X4mtl/D+IxFP8AiH9Nf7I/wC/ZV8CfAfRvDn7M/hi0tPCxhkGnS6feSy5TzCTmSVzJJ8+eSfauR/bc0n4Tfsu/szeI/Gsur6ut3qUSaVpDS6lcXNzJfXP7qLyvNkPIJ8zqP9Wa/P34L/t5/H79hcEeHPCenan4dvCd/hHU74xXFtL/AM9Y8f6v/WDzMjriub+Lvxx/bo/bd8daT+1H8WdCj8K/DLwDd22qWH9oT/Z9O86OX/l3T/W3csn+qz/6Krz4+FfElPiF4zEYxfVOe6nOoueprdU7avne35dj8rfGPCXEPBzdTBw+uclvR/z/AKnx5/wU+/Zw8e/DTwP4W+JHxE0v7Hd66blITdn/AE2XypIv9Z/y0i/+218P3lfRH7ef7Xnjb9qb4qX3iLxP4guNRt3lxaeb+7jtYv8ApnFXz3P2r1/EeviJ1aVLFez9r/07/rptfrufd+GmV5jgeH6dPFmfN+5lzVf/AFtWLz/Wj61Xn7V+K4mrVpM/UaQQd6+1P+CWv/IG1D/sJf8AxqvieDvX2v8A8Etf+QLqH/X5/wDGq8TE1fanVSPunTYf9Fq5DD+9rP02b/RauQzV4tTRno09zUhmqxB3rPs6uQd6X8UzJJ+1Mm/1P4VPUc/ag0K/+qqP9/DVio/+WVc9SnqBHUlE37qo/J82tAJPtnv+lU+JpqkmhqPzvKoAsQ/uqkrPmm8qrEP76LFA1TZJ5/tRR5PlUQd6z9oaVA8/2qvP2qx5372o5+1aU6hmtyODvVizh7VXh/1341cg71nTp0jSoE/ao4fIqWb/AFP4VFWntRFjz/ajz/aq8M1EM1AFiftVeftUk/ao6yqFUiSo5+1Hn+1R+f7Vl/COokm/dUVH/raKF+6Akhmq5B3rP8nyZauQzVrUqgE01SQzVHP2o8j3op1TBbknn+1SVTqSsDqWxJR5/tVepLOHtW9OoKoWK5/4tXn9m/CrxJff88tBlroK8n/bS8SX2gfsv+NNVg/5ZaP5dajex+RcM2Yo6P8AlrRB3qPz/atDyXuSTTVHP2ooo9ozSnuSQzfvasTTeRFVf7H7frVj/UxefXP++LI9Nmg8qSerkM0HlVn6bCPsslSTf6n8K0AscTTUQd6r1Yhm/dVn7QCT/VVYqn/rak/1tX7Sqa/uiTz/AGo8/wBqPJ/dVH5HvUVMTWpDpBDNB0o8/wBqJoYOtRwd6KWJqmn7ssQd6sVXg71JWntDEsQd6rzTVYh/dVXvJvOoqVQM/UpvWsfUpvWtSftWPeV0U/3oHN69enypKw7OafysCtTxRWfZwjyqftPZHOXIO9bng/EusxwCufrqPhjZ+d4jjrOriR0qZ9ifDHTf7N8L2/H/ACxrsIf+Wf41y/gmGeHQY4Jx/wAsq6SH/ln+NeZ9YZ7VLYuef7VHNNR5/tRNNRTxNU6qnsSOrFV6sVw3rGX8Ijmm8qpPP9qj8/2qSaagPakdE/aiDvRNNVUjOpsSQd6Kjg71YrT6yBHP2oon7UVp7RGhHX0Z8K9HsYdBj8g/8sa+e9Ni+23XkV9GfD2HyfDluKz9p/y79mZmHP2oqx5PXz6j/wCWsk/WtPqx0EdSQzebUdEM1a/8ujX2poed5VRzTeb/AMsKj8791RB3opv/AJdiCDvVyDvVf/llUlMdOmR+dP5tWKj/ANVUnM0NV7JGdTcPP9qj+2eTzUc0NR+T5tR7KsWWPO82pIO9V7OHtVyGGtTMsed+6ohmqOpIYay9oZlyjyPeo6PKm9K6f+XIFiiiiDvXN/y9OgsQ+RUn7qq/n+1EHej2RmWKk/5cqr/62pIO9aez9kZ0wooqPzrjPkZpVPbB7Ik8/wBqKKKhUqwElSef7VHUc/atKn70zLFRz9qj8/2qxDB+68+ub+KaEdFSTTQdKjqgqhNNRDNUnke9V5pqv2f/AD8OcufbPf8ASo5pqr0Uvaf8uy6mxYqSHH/bKiGGpIYadSmOnsSeR70Qw0ed+6ooqdALEHeiftVeitVsc5HP2qSHz6j8/wBqJpvNrMCxB3qx53lVnwzeVUnnebV89IVSmXPP9qj8/wBqr+f7VJWpBJ5372pKjqPz/aj2iJ9mXIO9SQ+RVOGapKVPYKhY8j3qPyPepPtkEQqPz/ataZiSQd6r+T5tSQd6koBbkfk/uqP3tWIO9E0P7qszapsZ8/aq9XJ+1U5+1L2iM/Zle8/49pa4/wCJH+qk+tdhef8ALSCuL+JH/HtJWtI5qm5+df7VEP8AxXkkAH/PWvHpv9d+Ne0/tUQ+R4yk/wC2teL3n+tH1r6XDfwTzKoQzebRUdSf8sq9bDVf+XZyliDvWz4f8QX2gX9vqulX0tvd203mWkkP+tjlrGg70Q+f5tfW5Pm9fKa/taZ52Kw1HGUPZVT6g8Ff8FDPiVoPiG08X6/pVnrt3Yw+XFLqv7z93Tv2tv8Agpz+03+11bx6R8TvHVxLpdrL5lnpVhFHb20XGM+XFFyfrXzTDNRNNX6VV8Tq2LSnUw1P2i0VTk/h/wDXv16nxmG4D4fwtf2tOmR6leev4Vn+d5tSTfvajr8gzjHVsZX9rUZ95haSor2SK9H72iivmqp6AV9qf8Etf+QNqH/YS/8AjVfFdfbH/BMGD7Ho1wM/6288yvIxn8EuifbGnf6r8K0LOqenf6r8K1LOvIPRWxJZ1oQd6r2cParn+qop0wCab97RUdFAB5sPrRUdR1n++Akm/e1H537vycVJUc01ZmhXn7VHViGao4Ya0Myv5HvWhZ/6o/So/I96kg71oaBNN+9omvKjo8j3oMyOlm/1P4U/yPeo5oaDQIf3tSVH/qqIf9d+NAEnnfvaP3tE0P72pKzNadMjooqP/W0CLE/ajzv3tR+f7Uef7Uv4oBNNVfz/AGqSb99Liq//AC1rn9kVTqElWIf+m/41Xg71JD5FFOmae0Ln+j/8sOlR0Uef7UVQplj/AJZVHRB3qSr9nV9iURwd6kn7VTmmqSftRSJ9mE01Hn+1Hk+bRWxr7Ikhm8+KvC/+CkGsWOgfsheJPPn/AOPqa1tv/Ite6Qd6+X/+CtGsfY/2c7fSjB/x/wCsRR1oL2R+b9V/P9qkorRexPJI6k8/2qPyPepKz9lWNae5Y8/2qOab91RB3qO8hHlU1uFTckhm/dVH5/tUkP8AqfwqPyPerqCDz/apIZqjg71JWZoWIZqPO/e1X8j3qx5Pm1zVDQsef7UTTUUTTf8ATCtVsdBHRD+6oom/dUHOSQd6sQd6rww1YrGp7YZJN+5lzVO8m71oeT50VZ95D2rDWkdP8Uz7ysfUpvWti8rn9SmPm8V24W9VnNUqHL+KKrad/qvwq7r376XFV7OEeVXRjP3VU5iSvRPgPo8F54jt7if/AJ7eXXndeufs36bPN4os4PI/dy/66ueriDVbn1Zo9nBDa+RB/q6uef7VXs/+PXyKsQw+VXlnprYkqSDvVeftUkHeg6KRJ5/tRF++i8+pPI96j8nyqh4YPaEfnfvasVX8/wBqIbyubD+29qZ+zRY8j3qPyPejzv3tSTTV2AEHepPO8mXyKjqSH97Uez9kaEc01V/P9quTQ1H5P/TxRUqamZqeFYfO16z/AOu1fTGj2cENhHbwCvnv4S2fneKLf/plX0BZ/wDHtFXWjoOT+2edzUfn+1Hke9Hn+1dlKp7WiP2ZHeTd6IO9E0NR/wDLWuGpUrfwzp9mWPP9qsQd6z5ofKqxDNRTJLlR1HUkHetaftqpFT90WKkg71H5/tRD581dFOmOoE/ajyfNo8qb0qSDvUf8vhfxSSGHyIqns/8AVH6VB5sPrR5/tWlT2JkSef7VJB3qvDD5tSQwz+bQZlyio6KyqmgedP5tSef7VHDDRWdICTzZvWrEM1V/I96kh/13412+1o2M/wB8XKkg71Xqx+9rm+sM6ESVX8j3o8qb0qTyPej2hmRwzVJ537qo/I96P9bWntEZ+yCGbzh+4qS8m71H5PlVJ5Pm1xe0/fGhHD+6qx537qjyofSj91WqqAV+JpquQd6jm/dVH5s3rXSvYnP7RFiftVeiaapK5alT98BHUkMU8Jo8nzak8/2p/wDL0zDz/arHn+1U6K6bmntEWPOnm61JB3qvB3qSDvXNTqe1MqpYqTmGGo6jmm8n9xWlUZJN+9o5hhqvB3opgHn+1SQd6PI96j8/2pVKYvaklSef7VXqT/VVnSqN/wAQVQsUVHR5/tXRT2I9kWJv3VR+f7VH5/tR5HvWAySaarEM1V/I96k8j3rQx9mSef7VYhmqv/y1qSDvXR7Uzp0ySabyIqrzXlE/aqc/auf2ntDo9kWKJzBCP9RRDD+6qPyPert7Uz/hFO7/ANVJ9K5P4kQ/6LXYXf8AqpPpXJ/Ej/j2kp0jE/Ov9rT/AJG24uK8X8j3r3D9q6z/AOKy/wC/teLzQ+VX2GF/gHi1SvUdSeT+9qT7H7frXVS0ZJXhmqTz/aj7H/zwNJ5Nx/zwFel9ascZL5/tVfz/AGqTyPepIdMnml/1FcX1p9zb2RXqT97WhD4Vn6VqWfg+esqmJVVB7M5OaGjyPetjWNB/s2Wqf2P2/WvNqVDqK/8Ayyr7U/4Jm/8AIuSf9d6+N7Oz86Kvtz/gmbpv/EhuP5V5FX2xdI+yNCrYg71l6PD5MVaEHeuKkeiti5R5/tUdEHetalQCSGapP3VU/P8AaiaafH+vrKpVHSLnn+1U7yY+bR5/tVeb/XfjWftP3IiSabzaP3tRz9qPO/dUqdQfsgmhqT/r3/Cio5pqYiSGajz/AGqv5/tUc01KpUAsef7VJWfViGaoNC5B3olmt/N8io/P9qrzfvpcVoBZm/1P4VFDN5VR1HP2omZlz7Z7/pRDNBN0NV4YaP8AlrSqVDb2ZYn7VH5/tUdH/LWsqhJYo8/2qvR5Nxnz8UU/bDqlyq8/ao/Onm60T9qoKRZh/wBT+FJD+6qSzhHlVXvJj5tKka1C55/tVea8pZv9T+FZ3nfvaXtDSnTNSGapPO/5YfpVeH/VefiiqH/FJPP9qj8/2o8j3qPyPeuWnc1JKkmmqn5372rFdPtTMsef7V8d/wDBXrXv+KD8D2J/5a6xLJ/6Kr6w1i88n/UV8J/8FadYnm17wXpX/LP7HdS/+RYqFU1Lex8fTf678agn7VJ/yyqPyf3VdqmzyXuWIO9EM1R1JB3rb2rMixB3qveVJVeftWP7k6CSGajz/aq9Hn+1Z1MQ6o/ZFyiq8MNSUe1Mi5UkHeq8HerEHes+c6AoqPz/AGorT2hVOmSef7VHUdR1ze0NPZmhDN+9rQg71lww1Y8/2o9ozXQuTeRDWfeTDyqkn7VTvKPrlXsZezKd5N3rD1iti8h7Vj6xXbSqVapz+yOX1KbzrqrEMP7qs/yZ5r/rWh5M8PWtauJ/6dkkkMNe6fsu6P52qR33/PL95XhcMM/Wvoz9mPTZ4bWO9ng/6Z1xYnGUqtKx1Uz3Szqx5HvVeGGrH+qrgqfwj0qQUQzUT9qjrD2jNKe5oQzUf62qfEM1STTV0fWDMJ+1V6sUVze09qAQd6k82H1qP91Uda0qlKkFIuTTVHNNR5HvUcHel7T98BJRP2qOftRDNWtr1APUPgDpsE1/cX0//TKvZIf3H7jNeZ/AHTf+JXJP/wBcq9Im/wBd+NdPsjQ4fzZvWo/P9quf8sqpzQ1rtRHT3JIZqPP9qjqSGGuVKrVNw8j3qxDN5VH+to8j3rT6vWMwhm8qrEM/73yKrzQ0QzVr7Sr7IzLk/aiDvUn7iaj/AFVFN0TX+EHn+1Hn+1V5sf8AbKpIYazqVPZFh5/tViGao6ks65vZ1atUwqblzyPej/VUcww0ed5tdtPYRJUlV6krP2lWqBc/cTf6iq03+u/GpYYYOtHke9Kka+yI4O9WIO9V6kg71FQRYo8/2qvNNR5/tV+0sZmh5/tR53kf6+q8Heij2gFjz/aiDvVOrEHeqMySpKrwzfvakrH2hoE/aio6k/5ZVtoc5JUdH/LKo/P9qxp1Kpp7IKkqvUn72tjMsf6qiq9WIO9Y+1Aj/wBbUlH/AC1oqbgWYf8AU/hSTTUed+6ohm/e/v66DMPP9qjmm82jz/aisvaGgQzVJDN50uajh/dUUwLHnfuqr+f7VHViGGq9qZeyCCL/AJb5qx5P7vyc1XorX2iM/wB0WIf9d+NSeVD6VXqSGauf2oySftVfzp/NqSiDvWoqRJDDPN0FWKjhm/dUQzVoMsebD61HUcHej/lrRUMyxB3ohhoopUqntQDz/ao5+1SVHP2rX2pz7lOftXH/ABC/fWFxXaTQ+TFiuL8b+R9luPP/AFopDqUz4H/ais/O8UXFeNzaP3r6A+PFn9s8WyTiCvM9S8Nf88K+kw1T9yeTUpHD/wBmf7VSQ6OZv+WFdJeaD5P/AC70Wejz+bH+4o+s1heyM+y8H4i8+rEPg+3ml/f10n9nf6L04ogs4IelZVMTWM/ZGXD4b0qzi5gqv9j8n/UQVsT9qr/uqzput1NPZGXNZz/88Kkh/dVcmmqnN++lxRUqh7Ix/EkImlxWfZ6b38iugm0zzqP7O/6Y/pR7U19mjL/szyYq+zP+Ccv7nQZOP+etfJf2P2/WvsD/AIJ4wwDS5OP+W1c1WoZ0j6ws/wDVH6Vocww1T07/AFX4VYmmrzf4R6K2LEU3/PejzunkVT8/2qSDvWXtEV7IsVHNNUk00HlVXn7VBQTTfuqjn7VHP2orQCTz/aizqvUkHel7RGZcm/cxYqvRNeVX8/2o9ogCaaiq801SVl/CAP8AlrViH91VODvUnn+1P2pt7MkmvKPP9qjm/e0f6qj2hiXIf3tRzfuZc0Qd6J+1a+0RoHn+1FV4YfNqSlVqG3syxB3oqvDN5VHn+1Qa6FiH/nvmpPO/5YfpVPzZvWiGatKdQy9lRJPO8qjzYfWq80372o4f9d+NKpsZ0jU+2f8APAVX/fzS/SiGGpKy9qbEfn+1V/8AlrVyGGj7H7frUfxTQlh/1P4UQ/6n8KT/AFVH+p/19amhJ5/tVfz/AGo8/wBqIZv3tAFjyPeqd5eHzakmmqneQnzaVOoBXvJp5q+A/wDgqtef8Xu8N6FOf3lroMsk3/bWWvvzyf3tfnP/AMFRNYg1L9q/yYJ/+PDw3axzf9da1OX/AJdnzvPeQebUfn+1V6jrsXtaX7w4vaMufbPf9KkhmrPqxDNUVKtUKe5qf62qU3+u/Gl+2e/6VXmvPOv+lae1CpuaEPkeVUdR+f7Uef7VnUqIsseR70VH9s9/0qSGalMCxRUdHnfvaOc0LH+qqOiiioNbhB3oqPz/AGo8/wBq5qdQ61sWIO9WIO9U4Zqsef7V0+0XsvaHGSef7VXmmon7VTn7Vw0tToK+pzeTXP6xN+6rcvJh5Vc3r0xgr06ftaVEwqmXp3/H/JWp+6rL03/W+f0rQ8/2rL2lYyLFkIIZa+pP2b4fK8Lxzzwf62avmPQYYLzVI4J/+e1fVnwTs/sXgi3rLEVEdWGPRIO9H/LWq8M1SVze1R3En/LWiDvUc/ao6xNC5+6qT91VOpKqnUqoCSio6k8/2o9pVMyOiGGo5pqsWcPnVyrc0LHke9Rww1Yn7VHXVU2Ajn7UQ/61PrUc/aiD/j7jz/q6z/e0v4Zn7RHvnwTg8nw5nH/Lauwm/wBd+Nc/4CzZ6Nb5/wCeNbF5MfNrt9p+5NDi4bzzqk/e1nwzVYhm8mLNcftH/wAvDQsVJDNVPzvNqxXbTqUv+XYEnn+1WIO9U6kg71zYmFb/AJdnTSLkM3m1J5HvVOH91UnndPIoX8IyLH+qo/5aefRVfz/as17KkBcqSDvVOGafzasVp9Y9qbezJKIf3MuKKKv2Zityx5/tUnnf9O9U/wDVVYs5h5VOl/08NahYoqPzv3VEM3m0vZGftCxD59Hn+1V/O/6d6KjkpCJ4f9d+NSQ/678ag/5a1JXR7REezZJUn+qqP/W0T9qzt7X3xEkM1Sef7VXqSGGtdALFFFE/al7NGZH5372rHke9V/I96krnpmgQd6kh/e1HN59EHein+6ZmWJ+1V/P9qkqv/wAtaKgFiHyKkqvB3qxB3pVNgDyfKqSGapJ+1R+T+9qgCpKjo8/2pU6dIKpJUfnebRR5HvWv74zCGGiiiGGgA/1tSQd6PI96krKkZVCOpLP/AFR+lR1JD++lzR7I1WwT9qkg70UQd6uoZeyJPI96IYajo/1tQvZEBP2qT/VUVH/ra1NCTz/aiipIYaVIVSmFSef7UeR70eR71qYB5s3rViGH/pvVfyfKo/e1maFjz/aiaao/O82irpUzMr3k3euH8bf8e1xXYT9q5fxVZ+da3FOlT1Map8d/GaH/AIqO4g8ivO5rOvVPipZ+d4jkri5tN/e161KocNU4+8s/WCo4bOukvLP1grPms619qZ+zRjzSzwmq9XJrOo5oaXtUBXqOrHke9V/I96ftQK/ke9Hke9XPsft+tHke9choV6PI96seR71JDps9be0Ar/Y/O4r6g/YPhms7aT/prNXzn9j8nivpj9huHydLuJ8/8vlcVTcD6s00edFViaas/TbweVirnnebXDVO2kSQzfvaPP8Aao6IO9Qa+0LHn+1Sef7VX8/2oq/amJJUfn+1RzTVXn7UfwjMsef7Ued5VV/9VRNNWu5oSef7UT9qr/uIaJpq5qdP2RXtCSaGo/O8mXyKPP8Aaq82Jpf9fTNtCxB3qxVPzvKqOa8nq/aeyJ9mXIbyDNE2p+TWfD+9qTyfOlpe0M/Zlj+0/wDZqSa8/e1H9j9v1qSGH91UUjYk8/2qOaapJofKqOtP4oFfzZvWrln++lzUfk+bUkP+u/Gmc5YoqPz/AGo8/wBqVQAm/cxYpln/AK0/Wnzfvakh/dVqtjoLlR+d+9oqv/qqzAuVH5/tVf7Zceg/OiaatDQJpqj8797RB3orKpTAsef7VHDNVeib91UGhc82H1qPyPes+G8Hm/v6uQzUCpBNDX5Z/t7ax/bH7V/ijn/j1mitq/VCvyL/AGr7v+0v2nPHl8P9X/b0vk10UqepliNzzuaao/P9qj/5a0eR7101LnmhUlV6IZq0w3sv+XgFiq//AC+1JVPzv3tIPZs0PP8Aaj7Z7/pVfzv3VR/vazqbmtI0KsQ3lZf2zyeaPNm9azpiNz7Z7/pUnn+1Yf2yb/nsKkhvJ/8AnvR+5NVubHn+1Hn+1Zfn+1SfbPf9KKdU1qbFyjyf3tV4bypPtnv+la06dGqUWP8AVVJDNVOnw/678ayqU/ZHOWZv9T+FV5v9T+FSed+6rPvJu9FKnSpFVKhXvJu9c3rE372tjUph5XFc3rE372ulGfs2SabCPK5q5DD5VU9N8+G1qSHz5qdT2Rp7M6DwdD5uvW//AF2r7A8B2UFnoNv9n/5418l/D2Hydet7jP8Ay2r648N5hsI4P+mNeZjfqh04I3IO9SVXh/1P4UQd64qlT2f8M9Ekmm/dUQzfuqj8j3o8j3rT2pnSLHmw+tHn+1Rww1Yg70Xq1f4gEdSf6qo5+1H/ACyrNVAK801aFnVOrkMH7rz66FTomdMkmmqSGao5oajn7Ue1Akq54bh87WbeDMfmS/uqz66T4b6bBqXiO388/wCq/eVQHvEMMENrH5MHlfuaim/1341L+/8Asv7+q/8Ay1roOmnTOLs7PH/XOtCq8P7qisfaL2Jp9W9kSf6qpPP9qjhmo/5a1nSHVCH9x+4zViGb97Udn++6QVY+x+360Uqf772gvalj/W0Qd6rw/wDTf8asT9q1/i1f3ZmtyxVf/lrR5s3rRP2op1KppUEh/wBd+NS/bP3vSo/9bUflTelZU/bGRoef7VH5/tUcHejyPenrVNCxUkP7qo4Yak8j3pOp7Ir2RY8/2ohmqODvUnke9HtHVpFBUnmzetR1JDZ1n7MCxD++ixR/raj/AH8NSQw/uq6iKRJ/y1o/5a1XqT97WNSpVM/ZssVYhmqnUkHetadMssT9qjg71XhmqxB3rP2pn7Nh5/tUnn+1V/I96P8AllV+1Ak8/wBqPP8Aaq/ke9Hke9UZlzz/AGqPyf3tRwwz9asUAWIO9HneVRDN+6qOb/XfjU1TMIZqPP8AaipIO9c/szQkhmoh/wBd+NE0NR1006ZmSVJ/y1qOpK0qGXsw8j3oqOis6oyx/rak/wBVVepPO/e0zQko/wCWVSUf8sqVUzI6jqSftUdL+EaFjyPeiDvUdFUZVQqOo5+1SQd6n2RBY/1tXIO9U4Yasef7VhTuBYoqv5/tR5/tXRSAk87zar1H+/ml/cVJ5Pm0wI/P9qPP9qIYaseR71dOmBTrl/GE2YpIK6yaGuT8YQ+dFJioMz5b+J0P/E+krk5rP91XYfEiH/ifSVhw2fnV20qh55zd5Zz1n3lnPXaf2D51H/CKQTS0v3wHm81nPVebTZ69Mm8E/wDTCq83gn/phQZnmc2mz1H/AGbN/wA8K9Im8B+dFjyKj/4V7P5v+op+0YeyPP4dHnqx/Zn+1Xcf8IJcf3TUk3hXyYaRocPDpnk1J9j9v1rqLzwrP/zwqnN4bnoMzn5of3VfQn7Fs32Ow8g/9Na8Ln0eevfP2P8ATID5nn/9NaKm5ofSFnMPKrUhmrLsx5MUcArQ4hta872p0FipIO9ZcM1WPtnv+lM6C55/tUc/aqf2z3/Sia8rGoBJNeeTUf2z3/Sq/mzetHke9afvTQPP9qk+2e/6VXmm/e0Vn+8Myx5/tUfn+1R0V0eyOck8/wBqj/1tH+tqxZ1J0BDDP1qOftWh5HvVeaGgCvZ1chh8qo4f9d+NXKr2iJ9mRzTVHDN+9on7VYhhqjJbhUfn+1SUeR71ftEa1NgqSGao5oakh/cxZo9miiO8m70ef7VXmh86XFHk+VWdSkBY/e1JD/rvxqODvRD+6rSnsBYo8/2qPzvNqvRU2AsQ/vakm/e1Xos5u9FPYCSo/O8qrH/LKo/J/wCnis6pdIPO82q95++61Yhh/dUfY/b9agZnww1oQd6PJ8qj/lrV+0Ak/wCehH/LKHza/G/4qax/b3xG8Sa4YPK+1a9dSf8AkWv2A8Sax/YPhLVNc8jzfsumyyeTX4v3l59sv7y+/wCet5LJ/wCRa7aZjVqleiq03+u/Gmef7V1e0OEJof3tEHeo5pqPP9q5vaa+zpmhJP2qvD+9qxNN+6qOz/1Xn1pUp1QJIf8AU/hRNNRUdICOipPJ82q8/auc1XsiSGajzvKqOpPI96BB9s9/0qSG8rP/AHtR+bN60fVifaG5DeVJDNWPDNVyGatMPTq/wzSpuann+1WPtnv+lY/2z3/SrHn+1c1WpWpGy2Lk15VO8m70cww1TmmrnpVatUCnqUxh8zNc3rF5+9rc1iubvf30vk161SpVpUvZ0zGpubGmjzrSM1Yh/dVTs4Z4YvIqxD++lzQv3tE2Wx6Z8DYYLzxHZ5/57V9OabD/AKLXzn+zrZ+d4ojP/THzK+i9O/49fwrysYjfDGjD59H+tqP/AFVSQd6mlTNSx/yyooorOp+6qnQSUQd6jqSsqpmqYef7Uef7VT8797R5080vkCudVPZHT/FJ4f8AXfjWrB3rPhhq5B3rtVU5iSabEUlU4Zqkm/e1Xhhn61l++MyxP2rtPgnD52vSVxcMNeqfBLTYIYpL7NOp7Y0PRJpqr/8ALTyKsef7VXmm/wBK8+u32lL6odPszl5pqrzXn72pLyqf/LWssbU/fBTqlyzm71YrPg71chm82uf2gixZzd6sRfuZfPqvB3qxD+9op1GaEk376XFFV5pvJlzVjzvNroqEUiTz/apPP9qr+R71Y8j3op06oe1I6sVH5HvUldKqIRJUkHeo/P8AajzZvWtKlQftST91Unn+1V/P9qK56io0/jESed+9qx537qq/ke9SVpT9pS/hkVNwhm/e1c87yqrwd6P9bWdX/p2aUi5DN5tFRw/uqPtnv+lZUvbf8vBBRUfn+1SU6oBDNViqfneTLVyGbzpc0f8ALozCpIO9R1JDNTAJv3VEMP72ijzv3tKoBchhg60fuqj8/wBqK6J/vTMWH/U/hT/Nh9aj8/2o4mmrOnUD2RJD+9oqP/VUTTT9Kzq1ADzvKqTz/aqf7+apIZp+lL2oFjz/AGqTz/aqcHersP8Aqfwp+0dU0Eog71HUlKn+7rHOST9qjhmoqv5/tVAXKsWdZcM0/m1qw/6n8KAH1HDNRNeVHDNS9qBY8/2qP7Z7/pUfnfvaK09oKnsSef7UQzVHNDUkMP7qtQ9oiSj/AFVR+R70ef7Vl7UZJNeVJDNVejyPetKdSqc5Y8/2qTz/AGqvB3qSGaoNCxDNUlRwzVH5/tV+0RPs0WKKr/bPf9KJpqgxI5+1c34qh/0W4roPP9qwPFX3JfpQtwPmL4nQ+Tr0mK5uGauo+ME3/E0krj7ObvXWtjjNCGarlnN3rLg71oWdR7Qr2RoQzVchh82qcHerkM1dFzEseVD6VJDDB5VV/P8AapPP9q5zQJrODNU7zTYIRVyaaDpWfeal+6rQDPvLO38r/UVh6l9nhqxr2sCH/lvXB+JPGEEJp09zMuaxqUAlr2z9kW8/4lck/wD02r5P1jxtcTXXkQV9MfsZ3n2zw352P+etZVRrc+mIdT74qx/af+zWHZ/aJjWpDD5EVcZ6SsXDeedLUlU4O9WKqkZVCSGaiaaq9E01UYlipPtnv+lU/P8Aaq/mzetZm3tC55/tUfn+1U/Nm9akhhq1U9rVKLnnfuqjh/e1HViGH97WvtEBJDDVyGGo4O9XKDGkR/bPJ5qvNeT1Jef6ofSs/wDe1lUpmxYs5u9aEM37qs+zh7Vcg70yfaBD/rvxq5B3qv8A9e/4VJB3oM/aEk/aiGaDpUfn+1R+f7UElio/P9qk8791Veg6CT/W1Xn7Uef7UT9qun+9JqEkM1Hn+1Rwd6J+1RqUSed+6ohmqv5/tUnn+1aE06hJNP8AuvIqvZzd6sczQ1H5HvWXsyix53m0Qd6r1J5/tR7Q6C5RDNVeGapIO9KkaFyGHzarzeRDUkM1Rz9qozOP+P8ArH9g/AfxZqv/ADy0eX/0VX453n7n/tr+8r9YP23tZ/sb9lrxhz/rdN8uvyfvP+WX/XGuinVOWrTI4Zqj8/2on7VXn7V1e1OEJ+1H/LWo6K5qdP8Afe0Nfakk01SQ/wCp/Cq8/aiGau32hkWIZqP9bVfz/apP+WVZVP3QU9yTz/aqd5MfNqSaaq8/aubEe19ka0iT7Z7/AKUTTVHUfn+1LDbCI/O8qo/tnv8ApRn/AKb/AKVXm/dVHtK1VjpGh5/tUn2z3/SqcM1FdtSnVVI6zQhvPOqxZzd6x/tnv+lSQ3lebb7dQ51ubk15+6qnNNB0qnNrHao/tlv6H861p+xNamxX1K8HlYrDhHnXWaualN61Thm/e1pUqGRsQzVYs5u9Zfn+1XNNm8668iuml7ED6I/Zd02D/iYT3EH7yKGKvaLObyYuleZ/AGzgs9GkuIIP9bDXpkP+p/CvNxv8X92deH3LHn+1Sed5VV4O9Fc1PEe1O0sf2jb+pqSG8gm/1FZd5RD59aVNaIe1Niis/wC2eTzViG886uap7ECeH/XfjUtU6sUU6iAuQ/vasf6qq9n+56VYn7Vr7KlSpAv3pHNNUf2z3/So5+1R+f7VlTqVQ9oWIZq9o+EsPk+HPI/5614vZw+dL5Fe8eA7P7HoFviuqmFzYqTz4P8AlvVab/XfjTPP9qv+CGtUw5pvOizVP/lrVyaGq80H7rz656dT/n4dNSl+9I6khm/dVHB3qxWtJFklnN3q5B3rPh/e1chmrUzCf/W+fipIZv3v/TOio6y9n+9Naf7ouQzVY8791WfViGaj2hkSfbP3vSpPtnnc1T/5a1Y8j3rNVGyvZB/y1qxVOrFdFP8AdC9n7Ukg71JUdSQ+fDRV1NfZkk/aiH97RN+9oh/fRYrOpcxJPP8Aaiq9SUyPZssVHRUkP76LFdFqwgh/1P4VY/1tV4YakrKp+9NCTyf3tWIYfKqv/qqkhm82lSMySftRDNUcHeo/+WtR7QPZssQzfvakg71XqSDvQqYU9yx5HvUlEHeo5+1XU0AKkh8io6KPaGhYo/5a1HNNRDN5tZ/xTnJPJ/dVHNDR5/tR5/tVGnsySDvUnn+1V/P9qkqqRnVCj/p4/WiiH97VfuQDz/apP3E1V/I96P8AVUGZJNNBD1NSWcvkxfvqj8j3orL2oezRY8791RB3qOpIO9ZfxawFjyPej91R5/tUdduxl7MkqSo/P9qIZqzp1KVQ1DzZvWio5ryqf9pedzSqgaH7qjz/AGqvB3qSGaopu37sCTz/AGohmqPz/ao/O/e0VKhl7I1IZqrzf678aj+2XHoPzqOaafpRUqB7MsQzUfbPf9Kz/wB7R9s/57ir9qMkmm/e1j69DPeWsnNaE01YfiTUp/ssnkT1P7oxqUz53+MEP+nyVx8MPlV0nx4vfsl3JP8A9Nq87h8R/wDTeuylUOapTOos5u9XIZoJuhrk/wC3rf8A57n8qkh8R/8ATetbe1EdZ/aUENSQ6xBXH/8ACVQTd6JvFUEMX+vrL2ZmdxDrEFE2vQQxf6+vM9S+JGlWcXnz39c/qXxysYYpPIPmV0AesXnjCxhi/wBfXN698SbGGLieKvF9f+LV9ecWM/lVzd54wvrz/Xz+bQZnonir4qT3nEE8lcHqXiTzpZLgzy1h3upTzVX8/wBq6VUomZqf2x/nFfZn7B/7/wAJSV8P19yfsEweT4N8jP8AyxrixP7392dNI+k9O/1X4Veh/wBT+FUbP/VH6Vdg71zlhDDViq/ke9SUezQEnke9H2M+T59FHn+1L2iOgjqOaGpPP9qkrL+KBXhh8iKrFEMNFP2XsjnW4TQ1Yg70eR70Qw0zsWxJ5/tUkHeo/Ig/5YVJB3rSlU9qY+zCaGiDvVio/I96yqVNQp0w8j3qvN/rY6sVH/y1pVNTT2aLFl59SQw0Qd6k/wCWVa0qZn/FI5+1V/P9qkn7VTn7VGpRY+2e/wClSef7Vn+VN6VJ+9qfaVTQkmmohm/e1X/e0fvapVHSMahYon7UsP8AqfwpK3qVApkdSQd6PI96khh86LFZ+09qbElE01R+R71J/wAsqftETTph5/tRUc0P7qo6CiSb/XfjVyzqvB3q5DDWNIuqE3kVH5HvUlFR9Xq1a1xnzn/wVE1+fR/2S9Qgsf8Al/1K1jr8y5pv9X/1x9a/Qz/gr1r39m/ALS9D8jzft+sf63zv9V5VfnH+/wDNjzXbS0OGqXJpqj/5ZVHUldNOp7U5qlMjqSq8vn+b+49akh/1P4U6QitqP+q/Cls4f9FqPU+1WLOHybWOsqm5r7QsQ/uqjoqOrD2ZXn7VHB3om8+iuf6y/a29ma+zCaf/AJbijzvNqObz6r06mIdL/l2L2ftSSq8/aiftUdX7R1f4Y/ZknneTLUn+tqPyfOlom/dU6tV0v+XZsSVHR53m0eTPN0rL6x7VfwyfZhN/qfwqOiaGfrRWdTEf9Ow+rla8/wBUPpVCGGtyaz/dVTmh/dUfWa1X3PZh9WI4O9ang+GCbWY4J6x4Ya2PBMOdZjnrT2lb/n2Z+zPrj4V2dvZ6DZmD/lrDXYf8ta5PwHD9j0bT4D/zxro4f9d+NebTqVva/vD1sNSLtR+d+6o8qb0o8/2rOno/fCrSI5vPo8qb0oni/wCW+asQ/wCp/CtfaVjKlTK/+trQs4e1ENn+9q59j8mLielUpmvtQooqP/r4/Gj2YFyzqxNNUdnZwQxeRAKJoa0qe1sFIj/5a0UUUYfcyqUy5oH/AB/pX0BoMP2PS7O3/wCmNeF+CbKe88R2cEH/AD2r3SGbyPLrWpUH7MsTf6n8Kpz9qklmuPK8iq9Z1aXtToMfz/ajzvOiqP8A0f8A5YdKPP8Aauk29qEP7qj7Zb+h/Oib97UcMNKnUJLEHepPP9qj8nyqj8/2rKqZmhB3oqvDNR50/m1r7RexA0LOrE/aqcM0/wC7qxn/AKb/AKVrT/e0Q9ow8j3o8/2qSq80NZfvaRt7Ujq5DN/03qvUkMNZe1/e+zMluWIf3tWPI96jh/dVJ5/tWtP2ppUCpKj8/wBqPP8AamIIfPhqSjz/AGqSDvXN/wAvQI6PPn/5YVJNDUf2P2/Wuz2lVGZJDNP5tSef7VH5/tR5HvXPT/emhJUlRwd6kn7UfVmYVNyx5/tRVenw/wCu/GtLVUWtiX/lrVizqnDD+9qxDNWAFzz/AGo8/wBqr+f7Uef7V0ASf8sqJpqJpv3VHke9c1qpmEM1SUQw+VViHyK09kHtSn+9qTyPerH7qj91WlTYCvUlR1JT/wCXJmE/ao4fP82rH+to/wBVWVSnVqgR0VJP2qvP2rSlTAk8/wBqKjn7VH537qsqugFiGapKz/P9qsQzVFKpqZlyGbyqPO8qiq80Pm10ezQB/afnS1JB3qPyfKorKkBHN++lxRDDUnn+1R+R71rUpmZcg70eR71H5/tUfn+1QaEnn+1EHeq/n+1V5tS8n/lvXLUp6mhqTTQdKj+2e/6Vj/2x/nFSWd551dPQzNCaaqd5eUef7VXn7UeyNAm1jtXP69qQmtZK07z/AFQ+lYms/wCqk+tZKpWMz5z/AGotYns7WTyP+e1fO95488iX/X17x+1f/wAga8+hr5TvJj5telSPNqnWf8LCvoelU/8AhZ2retcn9s9/0qnNeV6VKnVOX2p1F58SNVm8z/Tpaz5vHmuTfuPt1c/PN/03qPz/AGrX6uHtTQ/t6+ml/wBfLUf9p/7NZfn+1SfbPf8ASj6uHtTU+2e/6UQzVThm82rFnD51cVSmBY8/2qPzv3tSUeR70qdMzmSef7V9yfsNzH/hEun/ACxr4b8j3r7k/Ybg8nwv/wBsaMTTOikfR9n/AKo/SrtV4f8AU/hViDvXFUOguw/6n8Kig70ef7VJDDWZ0B5/tUdE37qo/P8Aal7NHOST9qIZqJofNorNUivaGhB3qvR5P7qiGGu1bGJYhmo8j3qOGGrEP72s6mHNqdQIYaJpqP3Pm0TTVpTtSJJPP9qj/wCWtFSQd6z9kAVJF++i8+o5+1FHswJIZqkhmqvP2oh/1341lTp1qR0Ek372o5oakqOftWv74AooorMCvD/rvxon7UVJDD5tKn7X4AJIO9FFSVqBHUsP+p/CoqKz9mi/aliiaao6Wb/U/hWZqVpryozeeTFUc3+u/GpIYf3VTU9sBYs5h5VXPP8AaoIf9T+FT0ezNCOGapJpvIiqPyYIelH+transB8R/wDBYbWJ5tG8J6HAP+XyWT/yFXwvB3r7I/4K9Ted8RvC9iP+WVnLXx3/AMta6Dzav8Yj/wBVRDN5tST9qP8AVV00jlI6sQzVHP2qSDvXTTaqj/hGfeTGa78irFn+56VFN/yE/wAas+T5VZ1DIPP9qrz9qsT9qrz9qPZo0KdEHepKKVOmgI5pqjhhqSaGitfZ1aptSI5v3VV6sczQ1X/1VFkn+7JJKrz9qsVXvJh5VFX2tUr2bCGaj7Z5PNU4byo5pqyqU6vsRe09kXJryq82p98VHUdHs6tWkP2rLE2sdqj+2e/6VTvIe1H/ACyoVOp/y8MqdVliGaug8EzQQ6pH55rm4ZquabefY7qO4rSpTq0l+7qBTqH2B4P16xvLCzngn/1UPl11EN5+9r5n8H/FSfTYo7ee+r1Dwr8ToL2KP/TvNryquGqntYaqesQzfuqkrH0bWINStY54KuQzVxeyNalQsY/6YfrUkPn1JDefuqIZqVT2pmoUSSGarHn+1V4ZoOlWJv3ta06Yv4RYqvN/x9eRmpIf9T+FVz/yE4609n7UZqQd6JpqsQ/6n8Kjn7VnUwNX/n4HtEV5pqr+f7VYm/dVXrKphq1H+GZnafBryP8AhI/Pn/5ZV6553+rrzP4J2cE91cefXocP+u/GtVVqr+IaEs15VOa8/e0TTVH5372slUrVavtDT2pnwd6kqODvUldX7k0I5v3H7/FEM1Hned/ywqSzh7VxWpVX7lQ0Dzp/NoqS8h7VXhhpezftQLH/ACyqSDvUc37qpIO9FN/8uzQks6sf9e/4VHUkM1dS0I9mSRTf896sTTVTohmn6VdSpSMv3xYh/e1JDDP1qODvUkPn1xfw374FjyPepIYYPtXNvUcM1T/uf+mtdvJ/z7NfZCzf6n8KrzfuqkqP/W0VP3pYef7VJ5/tUfk/uqj8/wBqzqVPZHOWPP8AapKrwd6uWdc1L21U0I/J86WpKk87yZaPO86Ku32fsjMjhm/e1cqn5HvUkHeuelUrX/eAXPJ82o6k4mmo8n97XRUq0gI6IO9SeR70TQ+VXP8AvQJKIZoOlV6If3VHtQNCpIO9U/O/dUef7VftTMuef7UVT82H1qSGatfaICTz/apKp1J5/tUGZYqTz/aqfn+1SQzVlSps0LEM1STTQdKr+f7VX82b1ro9ogLE372iDvVfz/ajzYfWue9UCx5sPrVfz/ao6IZqKlT2n8QKRJDNUn+qqvUk01GhmWPtn7rpUn2z3/Ss/wA791Uf2z3/AEpe1MzQ87zaX7Zb+h/Os6a8qnNeeTTpGhqfbIIZelE2sQRVzc95PN1oh8+tQNj+3aIdSnmjrH8/2qxDeUAWLyafpVObz5qsT9qj8j3qapmEHerkHeqfke9WIYaoCxn/AKb/AKUef7VH5HvVigzM+8rD1j/Uity8rD1iH91JWVUD5j/av/5A159DXyfeV9cftXQ50a8/7a18h3lejgva+xOGqZ95N3rPmm/e1bvP9UPpUHke9a3Zykfn+1R+f7UTQ/8ALD/nrXYfDH4D/FT4s3/2HwN4NubmP/ltdzfurWL/ALa1rTqmftDk5v3VSWdmL39xAP8ASP8AnjX1p8Pf+CbGlQf6d8TPGMt7J/z6Wn7qKvdPDf7Pfwr8K2H2Hwr4Aso4/wDptD5lFTEs0Pgvwf8AAH4qeJLX7RY+Drmu80b9j7x//wAv19bR19YeNryDwrFJBffuq8z1P48eFdNv/sJ82ST/AKY1zVMSdNPDHn//AAx/PD/r9c/9p1ch/ZL0rp9ul/8AAyukm+Nl9N/x4+HLn/ttDVP/AIWd44ml/caH5VaUmHsinpv7KNh/0y/7/V7p8K9B0r4b6Db6VnypIq8/8E694jvLqO4voPKjr0SHUoPsuJ6zxNQzpUzuIfipYwxf6+tDR/iFY3n7/wA/za8nvP7K/wCXGCKOrEM3k/8AHjPXN7RG57hZ+KrG8/5b1c/t6D/lhNXhcPiTVbMf6/8Ad1qab48n83ieip+9Og9chvPOq5DNXn+j+MIJf3/n+ZXWadqcE0UfkVmBsUQzVn+f7VJ5/tWhzmp5/tR5/tWfDeUed+887FAGp9s9/wBKX7Zb+h/Osr7Z7/pUnn+1FwND7Z+66VH53m1Thm8qpPP9qy9qdBoef7VYhmrLg71YhmrT+KBcmmqOo/P9qPO/dUwCrMP+p/Cq3/LKjzvKrQqqWKkqn9s87mpPP9qDlJJpqj87zaj8/wBqk/dVnTpm3tCOrEHeq9SUVNgqEkHeiDvRRQUE/aq/n+1ST9qrz9qALnnedFUfn+1R/wCqorM29qRz9qIZqj86fzakh/1340EmjD/qfwpPP9qjo/1tKqdATTefLViHz/KqnD/rvxq5BL/ywxWdID84/wDgqteQTftIx2P/AD66PFXy3X0B/wAFGte/tj9qrXIP+fDTYrb/AMhV8/16P/Lk82qR+f7VJUfke9STQ+bWftTEIO9WPJ/dVHB3qTzZvWtaXsr/ALwipUKkP/H3U8/ao7P/AFvn1JP2pCK9Rz9qkqnN5/m1nU/emhHP2qPzvJ/fYqTz/aq8/as/aOkBY+2e/wClRzTVT87yqks5vtk3StaWJdUftSTzvKqneXkHm/uBUesTf6VWf5372uj2vsjI3LOHzrDz6rz9qjg1Kf7L5FU7ybzq4vauqzoCftViz8jzeKz/AN7Un+qrWpB1fgM6ftaRc1GeD95WXDNUnn+1JD/rvxq9bfxCKlQgmm/e0ef7Uyb/AI+6f5HvXFd1TT2gef7VJ5/tRRWuoixDqX/Teuk8N+Np9N/cTz1x/ke9SQd6FUdKr+8KpH0h4J+KnkxR+fPXpGg/EKx1KL/X18d6b4lvtN/cQT/u67Dwt8Tp9N8uee+rSpUos7aVSqfXFnr0F5/qK0PO/dV4X4I+MEF55f76vTPDfjCw1KL/AF9eb/CrHadJD5/m1qWc3esPTbzzulbFnMPKo9m/bGZc87yqr6bB51/5/wDyzqOaarmjzfuq1pGhqQd6jvJvJ6wVH5/tRDNWlOqZle8oh/575qSftVOH/WxwYrKpVre19oB7J8JYLGHQfPg/1ktdZ53+l+9cn8MYfsegxwZrqLz9z1p037VnRT3K+pTetZc2sWP/AD3rH+J3iqfQdB+3ef5X/Tavmvxt8cv3vnz332muilSObE2PqSGGpIZvJ/cVHN/qfwoih/57101ErjLlR+dPD/qKP3VSVzYymqWx00qged+6o8791Uc0M/WiuKniH/y8Rp7IPNm9asQd6r+R71JR/wAvRFj/AKeP1qTz/ao4fP8AKqx5MHm/uK6PaVjO4QQz/wDLerHn+1HneVRB3p06XszX2lGoSQd6PO/e1HR5HvWfs/ZCLAmghl/cVJ5083Wq/ke9XIYfKrT+KOnVDz/ajz/aiaHzaIYaXtDIPP8AapPNh9aj8/2qOuf2gGh+6qPyPeo/I96kg710+0/59h7NklEM3lUUQd6Xtaw/ZskqPzv3tSUVsIsQzUTXlU/P9qIO9c4FzzvNqT7Z7/pVeikqlWrSAkoon7UVdTYzDz/aq801STTVHB3rE0D97ViDvRB3ooMw8/2qT7Z5PNV/9bRB3ran/BAued50VSfbPf8ASqcM1HneVRSAsTTUUfuqPJ8qpAjoo8/2qPz/AGpezAk8/wBqk8/2qn++8qiDvWVP/p4Zlyaao/tnnc1Xmmoim/570ezD2iLE37qq8/ai8vKj/wBbXR7ICSo5vIqSo62Myv5UPpUn2P2/Wnw/678alg71maGf5P72pIO9XJvIqOaGszMjoqSrEHeq9mjMr+R71Yoog70U6fsqpoR1H53lVJNNVeaapAJpqx9Ym/dVoTTeRFWXqR86KgzPnP8Aaoh87RtQ5/5Yy18bz9q+yP2ov+QLqn/XCWvju8r0MN/BOGqZc/atDwd4V1zxhrVv4c8OaVJc6hdfu4Yq6z4PfAHxx8ZvFFvY+HLH/Q/O/wBMlr74+DPwB8K/B/QbfSvDmlfvP+Xy7/5a3VP2jMvZs8H+Bv8AwTx0PQfsfir47zxalqH/AEBIv+PWL/rr/wA9a+oNH8NwfZbPw54c0OX91+7hhhrU8ST+DvB+jSeI/iNrkem2cX/Pb/Wy/wDXKuD/AOFqfE34wR3Fj8ObGTwl4Tuv3c003/H1dRVzVKuovZHaXmpfCv4b+XB4/wDEf9r6xLD5n/CPeHv9Jli/66y/8sq5vxV8YPiN4k0b+w/hxocfg6z/AOuPmXX/AH9o0D4e+FfCsMlxpWlf6ZL/AK67/wCWstXIYf3taKqbeyPI/wDhQ9jNdSar4j1W+vriX/ltNVyz+Ffgez/5lyKvSNS03zqy5tM7ZrSoLU5uHQdKs4vIsbGOOOq/9g283+vgroJrP91VeaGufkEZ9noMME32is/xXN5MVdJzDDXN+MP33l1p7Qa3Ofhmn82rg1KeH/lvWfUnn+1ZnWtgvNYvoYswVjw+ML6zuv389ak0Im/cVnzaD/0woA6zw34287y/39dxoPjaeE58+vD/AN/psv7iuk8N+MPO8uCf/WVoB9EeGvEkF5W5537qvH/DniSfpBPXeaD4knmij8+uakT7M6iGajz/AGqnDeVYrpMQ8/2qSGao/P8Aais6lMC5R5/tUf8Ayyo8/wBqLHQWIZqsQd6p1Yhm8qsfZmhcg70TTVn/AGz3/SjzvOlqgLH2z970o8/2qvR5/tWhxkkM372rkN5Wf5/tUn/LKtP4R0FiaapKp+d+9qx5/tWVPYr2Qfvaks5u9V7ybvRDDUFGh9s9/wBKk8/2rL86fzasVpT9jVK9oi5j/ph+tU5+1Sef7UTTfuqKn/TsoKjqPzZvWjz/AGoAkqSDvVeDvUkP72santjQsQ/vasT9qjs4fJiyKjn7U1Uq/wDLw0/hElSVXh/1341Ymm8m1knH/LKGoA/I/wDbA1g6x+0X4svj/wBBLy/+/Veb+T5VdZ8YNS/t74jeINVnP+kS6xLXLz9q6DyapHRDDUnke9EMM83QV0U9zlD91RP2qT7H7frUd75EMWK19oaFOzm71JP2qOib/U/hWX74zK803ky5qPz/AGqOabz5aks/I83is6dQ0Kc01RzTebXQQ+FZ9Sikng/5ZVn6loP2KtOesZmP5HvUlneQabL58H+som/dVTnhnml5rWp7EAvB50vn1H5HvUnke9Hn+1cVOp7X92dBXqOGakm/1340z/lrWlPD1UV7Q0LOE/ZZJ6p+d5tWIZj9l8ipNNs/+e8FZ/vjmqbmf5HvRPZz+VWhNDUepfubWtKVKsJexK+m6b50vnz1JNDVjTZoPKrQs9B/tGKSe3rp+r+yNf4phzQ1X/1VWJofIlqv5Pm1zCLHnQeV9nqPyPepIdNnomh8k/v66LafvAK/nfvasef7VX8iD/lhVjyPeuemP2vsjY0HXp9Nlj8ivTPB/wAVLeHy/wB/5UleRww1J9s8nmtfZUjWnVPqjwr8WvtnlwTz16Bo/iqxvLWOeCevi/w38Qr7R5Y/3/7uvWPBPxOn8qOeCeuX6tVO2liaJ9Ef2l53NXNNm/0WPArzvw34w+2CPz76u80fUoJrXieuKpTrDNjz/ao/P9qj86DyqK6KWwqpJ5/tUcP+tT60ef7VY0399fW8H/PWbpWHsqxn7U9s8K2f2PRrf9/5n7mrmpXnkxSGiz8j7BH5H/LKGqepzedFJWlKmdJ4v+2Z4qvtH+FVx5E/7yviu816e84uJ6+pP29vEkEOg6f4cn/4+Lqavke8r1qVP9yebiXofqJ5/tUlV/Jnh61JB3rmPe0Hw/678asww0QzUQ3lZv2JmXPI96rzQ1H9s9/0qTz/AGrKovamhHND/wA8P9ZRUfn+1EM1ZU/ZAaEM1Hmw+tRw/wCp/CiftWlP2pmSed5tWIO9V4O9WLOafpT9pV/5eD9mWPI96JpvKo8/2qOs/aHT7Mks5/Oi/f1Ymn/deRVfmGGrEPkTRda0/h0Tlp7kkP7mLNHM0NR0Tf8ATD8KX8OiBH/qqkhmqn+/mqSopAaFWP8AllWfDN+6qxDNXR/17Ak/5a0+H/XfjTKk8/2qDQkn7UVHN5FFaGZJUdHneR/r6j/5a1jVNC5DNUc/aq8HepKzp1DOpuWPP9qj82b1qP8A1VRzXnky5rXnpGZYm/e0Qw1H537qjz/atbXAuf8ALKo5+1V/P9qPP9qyqYf2RoSef7UT9qjhmqSub/l2FPcPP9qPP9qjmm8qo4ZqKZz1Ni55/tR5/tVfzvNqPzvKrWnUAuef7VH5/tVPzv3tWPO/dUv4poWPO/dVH5s3rVfzZvWpP+WVSZlio/3tHn+1R+f7V0f8uQJIYfOixR5PlVHDeeTRWYBViDvVeGHyZcVJQKnTJKjn7UVH/raX7oZHFN/z3q5VeDvUnMMNKnTMySpIO9V6khmp+zNAmmqOab97RNeVTmmro/inOSTTVHNNVea8rPvNS7efUGhJeal28+s+81L/APVWfeXlZ815SpGZ5X+0tB52jap/1xlr5b+Ffwl8VfGDxlb+FfDn/LX/AF0v/PKKvrD48abPrGjXn2c/vJYa6z9kv4J6V8PdB/taCx/0yWH99LLWtP8AdHDUWp3nwf8AgnpXwx8OW+h6VY/8sYvOq58Wvip4c+DOhR6rfQeZeS/u7O0/56/9sq6zUvFWh+CfDlx4j8RT+Xb2tfPfwT8H/wDC/vHknxp8YweZb2v7uzirQ6FTNTwT8MfGPxzv5Pip8cIPNs/3X9m6JF+6iir1T+wZ4Yo4LGDy7eL/AFMVdZDD5MUcEEHlxxUfY/8Anga5qh0eyOTvNH8mLms+Gz/e8wV3E2g+cPPqO90EwxV0UqftSPZHFzaZ2zVO803/APXXYT6PPWfeab38imQcXeab/wDrrLn7V1GsQ+TFXN6l+561mc5T8/2rD8SQed3rU8/2rPvIfOqLNm+hy95D5NV/Jnmlrcns55paLPR5/Nrf2RgZ8OmeTVj7H7frWxDpvk/8sKk+x+TxXNU/dG1M5fUtHE3mcVjzabPZy4ggrvLyHtWXrGmwTRVnUq1aQf8ALwp+D/FU9pdfYa9A8N+JLjzY4K8b1iGezuv3E/lSV2HgnXvtlr/r/Nki/wBdSpGx7h4a1jzoY4JzWx5/tXn/AIP17/lgP0rtLO886LFdRmXPO/e1JWf9s9/0qxDN5tZk+zNDz/ajzunkVT8/2o8/2rQ2NSGaiaasuG8qTzv3VZUjMkmmqxDNWf8A8tauWcPatPZoCxD/AKn8KSH/AF341BN+5lzViGan7Qn2ZcqOH97R53m1JB3pVNjYjn7VJB3o/wCWVFZ0qf8Az8Jqkk372j91UdFKqUWP+WtHn+1R0QzU6QFio/8AllR5/tRD+9rT2fsgD/lrUlRzf678aPP9qftEAT9qkg71HNNRDN5tZnQaM3+p/Coqr+f7VJQAf6qq/iq8+x+EtYn/AOeWmyyVY8/2rl/jxqU+j/BbxJfQT+VJ/ZssdZrc0ex+SepYvL+4uP8AnreSyVXhs61KrTf678a9CmeG9xn9nW/oarzTeVUl5eT1JZ6DfXtrHceRW3sqJHtGY95qVwJay7y8nm/Oug1LQfJl/f1HN4Inhtft3kfu6xLMOGafyakm8+atSfR/scX781n3la+1MFT1Kc/armj+R9q4qv5HvRDN5Vc1Of772h0VD1TQdB+2eHI/sMH7yvO/FUx/tmSuw8NePLKz0fyPPkik8ny68/vNTnmupJ5/9ZXTUqGRl3n76XFRww1JNNUfn+1H1e5rTqEk0H7rz6z5+1WJpq3JvDcEOjR3E/8ArKzpYYdU5eftVP8A5a1oTTVHDZ+dRU3MvZsuaDD511Gauaj/AMfX41Ho+j6tN/qIKsTWc8P7+etaQezZHaWf2yXyKz/FUM+my+ROa3LPUrHTZf39YfjbUoLy/wDPgq17YRn2cw8quo8K6lBZ6XJ5/wDzxrk4Yf3VXIZv3VcWI9rVrDpfuiTUrzzpZDVez/4+j9ajn7UWcx82tFoKpudp4V0eC8tZDP8A88ap/wDCN/2xFJPBb1Xs/Ef2Ow+wwV0Gg3lj/wAIvcX0/wDzxrVVP+fg6e55/ND5U0hqxZw9qj1T/j/qxZw9qy9n++Mi5DD+6rPvIT5taH2zyeapzTefLWvs6JoR+R71c03WL/TZf3E/7uo6jmhrpq06XsqYHongL4kX0Msf7+vfPAfxJsZrC38+fy45a+P7KaezljnruPDfxCvtNijg8/8A7Y1xVEdWGqn2Jo+sWOpf6ietSab91Xzn4D+Mxs7qMif/ALY16xoPxIh16KuGphjs9qdZ5/tWx4Ph+2a9Z2P/AC086uTs9Sgm/wBRPXafCv8A5HK3uCf9VRTp1iT2Szm8mq+pTHzeKPO8quf8SeMLGzEgNx5Vaqm6TND5X/b21j7Z4t0+Dz/M8r97XzneTd69M/ac8VQeJPG8k9vP5scVn5cNeR6xeeVF59e0qaseHiT9TIO9XIZqx7O886rkM1eavYn0lI0PP9qj8+D/AJYVH5/tRmb/AJ96zxIFyy8ijzv+e/8ArKr2f2iE1Y8nzv3/APy0op1P3JoR/wDLWrEHeq/k/vakhhrm9lS9t7Q0JIO9WIZqr+R71JDDWf8Ay9MyxUkHejyfJiqSGH91XaaBViGHzajo/wBVRUVEAn7UQzUT9qPP9qzMyx+/mi/Go4fP82iGafpVjyf+W/60qdMCP/0bRRNDP5v7iiGGfza5/aDqbhB3q5DDVfyPerEM1XSF7NhR5/tUfn+1H/LPyKipUNA82b1qxVeDvUn/ACyroXtf+XhFPcKsedBDF5ArP86fzaPP9q4va0i/3xc+2W/ofzomvPOrP/e1JDDVAWJpqPP9qKr+d+9qvaeyMy5537qo/P8Aaiq8/as6lWqP+EWPP9qkg71T8/2qSGao9rcRY/1VHn+1FFb1Nh0/3RH+/mog71J5/tUf+qop0xEk15Vfz/aopv8AXfjTKo5y553nRUQd6jhhoh8+p9qjQueR70T9qrfbLj0H50vnTzdaKlSkBJDNUlRwd6kp0gK//LWrkHeq/k/vaPtnv+lKnUD2RYoqvDNViDvWn8UzDyPeo/33m1JRWtP/AJ9mgQw1H/qqkqOs6lMy/hEfn+1E/aqc15+9qvNeVnTp+yM/alibU++Kz7zUv/1VTmvJ6pfbLj0H510jLN5eT1l3l55NSTXlZ95N3pVP3pmF5N3qnNeVXvLyq9nN50vSoNDQ03QINev4xfQfu69Q0GaDTbWOCCvP/B83k+Z59dYdYt7O1kvp/wDVxQ1dPYDi/jxrA8YazH4Hg/494pv31eoeD9BsfB/hy38OQH/j1hrxPw5ZnXfGVvPN/wA9q90hvPOrUDYs5u9XPNh9a5+G8rQ02886XrWYG5DD50WKk8n935OarwzQeVUk2pQeVWlSmBHeabB1rLvNNg61of2j+9qnqWpQdKKRznJ+JNNg/eV5/wCJYfJl6V6JrF5/rMVw/iqGCa66UUf4xPszm/J82rEOj/uquabpsHWrnnQQ9a9F1KNIz9mY8Og9p4KkGmwQ/wDLCrn2z3/Sq815+9rz/a0TT2ZTmhqneQnzasTTVXmm82uX2dzX2RTmmqvP2q5eVXrm/e0qxqcn4q02fyvPgFU/BOpeTrX2f/nrXUalZ/bLWS3ri9G/c+LY4K6k6JmeueGry4hu469E0e8861jnry/R5v8ASo/+u1eiaB/x4JV0/wB6aGp5/tViGasuabyqIbyemZmx537qo4ZvNqn+/mi/GiH9zLitPZAaEHejz/aq9Hnfva5zM0IZv3VWIbys+GapPP8Aag0ND97RDNUcN550uak/dVftEX7Ik8/2qxVOpPO6eRUGfsi5D+6opkP+p/Cn/wCqrSlU/dB7Ikog70f8sqjmm8mXNKpTGWJoaKrzTVH9s9/0rOpoaFzzv3VRwzedLmo/O/dUQ/678a09oZVS5P2o/wCnj9aPP9qr+bN61H/L4Zn69qU8Ev7ipNHmnmijqnrEPnS1c0f9z0rKpUOinsbEM1E01R+f7Uef7VlT9saEleX/ALZms/2P+zn4gyf9bD5deoV4P+35/wAkHvLCef8A0e6m8uat1uD2Pzz8nyYfIqvN++lxWpeWf+lVHDo89exOpSseLTp6mfFo4mm/f13lmLHTNB/1HmebD5dZ/hvwr9s/18FXNSh8mXyayLqU0cveQ+ddVY17Xreawt7G3/d+VVi802fzfPNc3r0E5usiqpbmRn6wPOlrPmh82tibTf8AphWf9jnhlFdFT2NIdIr6bo895L5EFR6lpps5fIroNHvLHTYpPPg/eVj6xNcTXUk9ZUqSqmdTcr2dU7yHzrqSpK7Dwf4D+2eF9Q8Y30Hm28UP7mnUVL2wjz+70e+8rz81X8j3rc16afyvIgrD8j3op4il7X2ZrUphD5EMtbGpax9s0uOCCsPyf3tamm2c8wjgrSl/G/eGVSoV4dH/AOW5FSWdn/p8cFd5D4Ogh0vyBB+8rz/WL3+zdZk8iipTo3H7RnsnwT02x03WZJxYxy+VD/qpaw/j/oOlaNqkd9pcHl/av3k0Ncf4P+IV9oOsxzwT/u/+W0NWPid48n8bX/n+R5ccX+phopfujT2Zyd5++61n3kP2ybrUnnT+bVebz/tVPD1KVWr+8M6lMLP9z0qxefvutR+R71HWVWpSpP8Ad0zSnTI5vPog71oaZD51RzWc8MvkZrL/AJe+0D2ZXq5FNceV5FSWeg30w8/yKLyz8mXNPWsINH0efXrryIP9ZVi802fTrqSxn/1kVbHwxh/4n0dHjaznm1m4v66KaMzk5oZ/Np8P+u/GrPke9V/Km9K5KiwtKn7Q6vqxoebD61HNN5Muajm8+ovsdx6j8q61iPbGXshnmzetXIO9V/J8mKrmmd6KU/a1fZ1DP+EXLO8ns5fP8+u08K/E6ezMcHn+VJXF/Y55TUkNnWtTDUqRue+eFfiROPL8+evZPhX8SLGHzJ7gR/8AXbzq+O9H12+039xXaeGviRPZ+XxLHJXD9TrbnQfWniT4zWNnYSD+1Yq8n8bfHiCbzLGe4/1teV6l8QdVvJfP+3S1h699u1Kw+3VdjH2hl+PNY/tjVJL63ri9S/fS/Z563JvtGP31Y95D511XfhvZVf3fszlxJ+oHhub/AECOtiDvXH+D9Y87S4566SzvPOrwKfsrfuz3KexqQzefFR5/tUcM1HEM1aVPamhchvB5vkZqTz4P+W9U6sQQwTRfZ565vaVlV9nTAkmmo87j/pnUdSQd6ytWq1f4hoSTTfuqPP8AajyfNon7Uf7Vc1p2LEN5ViGas6H/AF341d/5ZVr9Yq0hFj7Z7/pUc/apIYaJvIop1KrpAHkzzdKOIZqPtn/PAUfbPf8ASsqbq1QJPO8mWrH9pQeV/o9Z/wDrajm8jyqPbVaX7sDY/tH91VP7Z7/pUcM37qjz/atrfuQJJpqPtn7rpVf/AJa1J5/tWPs/3pC3LkM37qo/O/e0ed+6orP94aVCx537qk+2XHoPzplHmw+td1Oq/ZezMgmmqTyPeio/38P+orm9l/z8Ak87yqPNh9ar+d18+j/R/wDlh0rKpUpAWPP9qPP9qrw+fUlYgSWc3epJv3tV/wDVVJDNW1L+CaB5HvUlE15n/rpUf/Xx+NZ+yNvaB9s9/wBKPOnm61JNDB1qP/VVpU/dnKSUT9qjqPz/AGoqVKoElH/LWiiH/W+fis6dN+1Ased/q6Jryo5pvKo/dUVKdb/l2HtAn7UQw1Y8+D/lvVeaaj+EBJUkM3lVTqxD/qfwrSmv+nhmHn+1R+d5tH+tqSGGs6lOqFIkhmn6VY/5Z+fUcHeo5pq7VsAed+9qPz/ao5ryq801MCxNeVTmvKpzXk3m+RVPzvJlrH2n2wNT7Z5PNU7zUvOP7+s+a8nqveXlP2hzhNeCeWo5ryq/n+1V5pq0p1AJJrys+a8qSabzar+R71n++Ar3k3nUkP8ArvxpZ+1RzTfvaYGxps3k/uKuaxqX/ErkPSufs7z/AEqOrmpTefYVoBT8BzeT4ut69chm82vF9H/4lutW+q483ypq9cs5vJlwKuf8Eyp3NTz/AGqxpupeTLzWX5/tUkN55NZqpWNTpP7S/df6+o/7Zh/57fpWHNqX7qq/9p/7NdO5mdJ/bEPpWfqWvQQ1h/bJ5TVebz5q2w2Br1v3dMxqVSPUte86WT9/XH69qU091XQawPJirDvLOuzEYZYBez/5eGftPahpt5PNViftVOz/AHPSrk/avJqVGbFOaaqc01XL3yKpzfvpcVw1PbGhXn7VXqxP2qvR7Q0K8/aipPI96khs6KlS5oU7yH/RZK8/0eCeb4l28EH/ACyr0TWJfselyTz/AOr8muL+Eum/bNVvPFU/+slm/c0U6fsjM9As/wBzLiu40fUvOsIzXD11Hgk+dayf9dq1w1X2rA3PO82rFl5FU/I96sQzVr++J9qXPO/5YfpUc01R+f7VH9sg87FXT9sSXIZv3tHn+1Z/2z3/AEqSG8rOoaFz7Z5PNH9sf5xWPqWpev5VThvKVTYDqLPXu1akM3nReea5PTbyCaWOuohm/dVFU0LnnebVyz/c9Kz4ZqsQzVpSMy55372pPP8Aaqfn+1Sef7UqRoWIZqrzTUef7VX4mmp1QJPP9qsWcPao4YakpmYT9qjs5p+lXPKm9KPI96VI0D/W0ef7VJ/yyqOoArzeRRZzd6jmhohh8qsfaGhcg70ef7VX8/2pftlv6H86FU1My/5/tXzP/wAFLNYnh+Gmj6VB/wAtdSr6Q8/2r5P/AOCk155+qeG9JH/TW5rpXtvamlX+EfKfke9XLOz/AOXio/I960NH/wBSa1qYiseOaGm3hs+tV+Zrqjyf3tamj6b511HPV/WK3Yr2ZHZeG7i7/wBRBXD+MNH+x69JBX2p4D+Eulab8Of7c1vSYo7iKHzP9dXx/wCMIftnii8P/PKby6iliSTD+x/uutH9j/bP3FaH2P2/Wj/VV0e0uV7M5vWNHh02sO88jyuK7DWIftlc3NpnnVrTxP7k5qlMw69I/wCEqsdB+Ev/AAjkE/8ApF1DVP4b/DH+3r+Sef8A5ZVH48s7HTZZLGA/6qlTqVRHn+pTfvZKz/P9q0NQ/wCWlU4Ya6f3g6m5X87yZa1NN1iCHy81j3n+tH1qOb/U/hWdSpWMj0ib4n6VaWEn/PSvK9Y1jzrrz6kmm86LNZ83n+bXPUvVNCxDqU9WP7S/57/nVOH/AFP4VJDpk88XnwV0L60ZhNeVX+2fvelSXkE8Uv8AqKr/APLWipOrV/h1A9oXPP8AaiH/AFXn4qvB3qx/yyrn+rYr+J7Q1VWkWNN1LEtbGj2f22/jxXP2dnP53niuo8HzfY7/AM+tKarVRHeQ6PYw2scHkVx/iqzgh1CTyK6C88VWOcVY03R7HXpf38HmUnUxdH4DX2Ry/gmznm1T9xWh4wh8mvZNB/Zvg0fwv/bljP5vlfvJvOri/FXg+3miknnrPDYnMKpr7M8fmmqPzvNrY17R4LK6/cVl+T5VO2LqmVSqV5rypIZp+lWP7Nhh/wBfVj7Hb+p/KtFSxdH957Q5vae1I4f3tXILOebpRDDB1q5D+6rWoqtX/l4FIPJnh/5YZohm/e1oTf6n8Ky4f+Prz8Vy1FmH/Pw6DQvL2DzfPt4PLqOG8nqneTedVizhHlVrUqVvZez9oVVNSzvKualrH/Er8iA1n1l6xPPDFxWVJ1vgMSxNN+6rPmh87/UVXhvJ61NHh86Ku5f2hUMz9BPCv/Hr5FdRD5FcH4JvPOta7SHz5oq8ipiaNV/uj6CnsakM3lVchmM0vnisPypvSrkPn0VKtal/ENKVT2pofbPf9KsQ3lZc/an2f+qP0rmqYk0NGaapIZqp+d5MVSQzebXNTqe1rBT3LkN5UlR2cPaj/VV6fs/3XtALFnNB/rp6uQ3kH/LCsuiDvXD9ZuaFya8/e1H5/tVeaaiH/pv+Na3pfwzQsT9qsQd6z5pv3tWIZftkVc1OrSo1ivZlj/r3/CpPJg8qo4f3MWaj/wBdL5FdNOrSf7z2Zn7IIO9WIO9V5v3VR1l9ZrXD2Rch/e1JUcHeib/U/hXT0D2RJ5/tUkN5WfUnn+1ebTqe1KNCa8o8/wBqz/381WIYa6aVSqRU/eljzp5v9fRDeQQ9qjn7VXn7VpUqf8/BEk00Ex/cT1JB3qnB3qSuen/08Auef7VJVOpIZqP4QElFR+f7Uef7UUqhoSVJVejzvNoxFT2RzlipIO9V4f8AnvmpK09pRt7QCTz/AGpIf9d+NMhmon7Vz+1OgsfuqKp+f7VJDNXR9ZOcsT9qP+WVV5pvJlzUf2z3/SsqeJ9lW9mFTcsVJDDVfz/apIZqiqaEn/LWpPOg8qqfnfvaJpv3tdFOp+5Myx5/tUn2y39D+dY/2wed5FE15WlOp7WkBqf2n/s1XmvIPK4rLmvKjmvKz9oZmp9s9/0qOaaDpWf9s9/0qOa8rWnVpVQLHnQebVOab97VebU++KjmvKXtKIEnmw+tU7yaDpVPU7yeGWs+bU++KPaI5zQmvIPK4qn9s87mqfnebUc03lUfWDoLk01R/wDLWqfn+1Sf8sqPafuvaHOE/aq8/apKivP9UPpXPUqmlPYgmvPJlzWxZ3n2yKuT1Kb1rQ8K6lP5UkE//PatKeJogaB8+GXyM12HhXWIIbX7DPPXJzxf8t81JZzfY5Y76D/WV0+1A9I/tiH0oh1KCbvXL2esQXkWBPWhDeU17YDcmvIM1TmvKy7zUv8A9VV/7SHlefXQrszOks5oJquTwwQxc1j6PUmpXn/LAmv1nJcto5XkVTGVP4h8/iavtcX7OmZ+pTetZd5N51WLyYzS+RWfP2r8rzPFOtXPWpIsaZ3qxNNB5VZ9nUl5MPKrzKftjuK95N50uDVfz/aq815P5vFR0U6gFiftVeGapKPI9656n8cCSGHz/Lo8mCHpRDDVzyINNsP7Vvrf93/yxh/560VKtH+GJUqp538TtSvtSurPwPpVv+8uv+Pyb/nlFW54b06DTbW3sYIP9VDUmm+FL6a/k8SarP5l5df67/pl/wBMq6jwr4Vvtev47Gwg8yStKYVKRy/iTWINHi/f12Hw9m/4lfn5/wBbNXh/jzxjBrvxBjsdKvvMt7W88vzf+WVe2eA5vJ0GPitadOlSIOk8797Vj/llWXNN+9qx/qq6PaIzLE15+6rPvLyisvUf9b+Nc31hm/s0SfbP3vStuy/5B8f1rnNO8j/Xz1YvNe8n9zBR7UdPYsXd5BNLRD+9rPh/f/v8V0Gmw/6vdWftALGg6PP5sc89dJP2rPhm8mLNXK6v+XIe0RYg71YhlghFU5poPOqOCaeaXiszQ0Jryejz/ao6jn7UGZJ5/tViDvVeDvVisadT/n4Bd+2W/ofzp8M0HSqcHepJ+1H1mq/4Z0Fya8ohm7zwVnz9qkhvKSqVva+zM/ZGjN/qfwqt5/tVf7Z7/pUc01a+0omhJNeVJDNVOftUkP8AqfwrhNCSaao4Zv3tFRwd6ALnn+1fHf8AwUI1Lzvirpelf88tNlr7Amhr4j/bk1I6l+0Fcfv/ADPKs/Lr08N7b/l4YYj+Ejx/yYJutaFl5FU/+WtaFnZjyq0qVP8An2cHsixB3rtPhvDpP2rz76Dza4ezmg+0/Z4K6DR9TnszHPBRSqFH0J4q8earr3gj+yrGf/SPJ/11fJevQ+d4o1D/AK/K90vNdGm+Df7VnnrwezPnRSX0/wDrJZvMrXQv2RX8j3qP7Hb+p/Krnn+1V6PrNKl/y7D2Rn6lZ+/0rL8j3roLyHtWPeQ9qPa/uTi9maGj+Nh4WtbiGCD95L/y2/55VwfjbxJ/aV1J9nrY1ib0rm9Sh/dSUe1pVaJn7Ix/9bVf/VVYqnef60fWunBYmiZKnXK975FRzC38ryKZLLuNRT9q5/rnsqtSEDT2Rch+z/6g1T/syf7V3qSDvUkM0811WSxxr7M0PDfgm+8SSx+RBXqngn4D32g3Xn65B/o/k+ZWX8H4YPNt67j4kfFn+x9LksdKP/Xau36yjP2RzfxOs9Es7DyP7Kj8v/rjXg+pTWP9oSfZ/wDV11Pjb4mah4ktJLHzv3dcV/y1rOnjKS+AXsixD5FaGnRedLVPTdN866rc02zghuv389FSqHsjY03wrBNYSX1Zc039m3ckFdReaxBpug+RAIpfNrg9SvPOl6Vl9dq0h+yND+0/OlrsPBPjCx02WOC+/eV53D59WNHmnhuo/wB9WdTG+1NKdOsfemj+MPDmpfC+Q2M/7yLTfLh/6a14P4817Q9N8yx+3ebXN2fxC1WHQfsPn/vK4/WLye8upJ7+fza6aeJo4X+GdPsiTUtSsZpZKz4Ybf7VUc/aqc80/wDywrSniTiqmhqUPrVeG8gh7VX/ANO82OCc/wCtqO8sp4ZaKlUx9mzcs/33SrHke9YcM19D5f2H/WVoXl5S+sf8/Db2Rc87zapzXg83yM1JZwz3kX7isu8hnh1TyJ68+pV9kdHsqtjoNNhg82Oukh8N+da+fBXJ6P8A62ODFdxpupQWel+fP/q4ofMmrepiV7H2hw63MuaHyqw9Ys/Or0Dxfo882gW88P8Ayyh/fVwd5XOsdSpHV7KqU4dNgrY8mCztaz4YP3vn1sXn/IMj+leusz0MqmGP/9k=\n", + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 9 + } + ] + } + ] +} \ No newline at end of file diff --git a/eval.py b/eval.py index cd56e987..cc95466a 100644 --- a/eval.py +++ b/eval.py @@ -8,15 +8,10 @@ import locality_aware_nms as nms_locality import lanms -tf.app.flags.DEFINE_string('test_data_path', '/tmp/ch4_test_images/images/', '') -tf.app.flags.DEFINE_string('gpu_list', '0', '') -tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_icdar2015_resnet_v1_50_rbox/', '') -tf.app.flags.DEFINE_string('output_dir', '/tmp/ch4_test_images/images/', '') -tf.app.flags.DEFINE_bool('no_write_images', False, 'do not write images') - import model from icdar import restore_rectangle +import flags FLAGS = tf.app.flags.FLAGS def get_images(): @@ -68,15 +63,15 @@ def resize_image(im, max_side_len=2400): return im, (ratio_h, ratio_w) -def detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2): +def detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_thresh=0.2): ''' restore text boxes from score map and geo map :param score_map: :param geo_map: :param timer: - :param score_map_thresh: threshhold for score map - :param box_thresh: threshhold for boxes - :param nms_thres: threshold for nms + :param score_map_thresh: threshold for score map + :param box_thresh: threshold for boxes + :param nms_thresh: threshold for nms :return: ''' if len(score_map.shape) == 4: @@ -96,8 +91,8 @@ def detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_ timer['restore'] = time.time() - start # nms part start = time.time() - # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres) - boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres) + # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thresh) + boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thresh) timer['nms'] = time.time() - start if boxes.shape[0] == 0: diff --git a/flags.py b/flags.py new file mode 100644 index 00000000..1e0faf12 --- /dev/null +++ b/flags.py @@ -0,0 +1,41 @@ +import tensorflow as tf + +# required for both training and testing +tf.app.flags.DEFINE_string('checkpoint_path', './models/east_icdar2015_resnet_v1_50_rbox', + 'model folder that contains checkpoint, index and meta') +tf.app.flags.DEFINE_integer('text_scale', 512, '') + +# testing parameters +tf.app.flags.DEFINE_string('test_data_path', './training_samples', + 'folder that contains images to test') +tf.app.flags.DEFINE_string('output_dir', './outputs', + 'result will be written to this folder') +tf.app.flags.DEFINE_bool('no_write_images', False, + 'do not write images') + +# training parameters +tf.app.flags.DEFINE_boolean('restore', False, + 'whether to restore from checkpoint') +tf.app.flags.DEFINE_integer('max_steps', 100000, '') +tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') +tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') +tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') +tf.app.flags.DEFINE_integer('num_readers', 16, '') +tf.app.flags.DEFINE_string('training_data_path', './training_samples/', + 'training dataset to use') +tf.app.flags.DEFINE_string('pretrained_model_path', './models/resnet_v1_50.ckpt', '') +tf.app.flags.DEFINE_string('gpu_list', '0', '') +tf.app.flags.DEFINE_integer('input_size', 512, '') +tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') +tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') +tf.app.flags.DEFINE_integer('max_image_large_side', 1280, 'max image size of training') +tf.app.flags.DEFINE_integer('max_text_size', 800, + 'if the text in the input image is bigger than this, ' + 'then we resize the image according to this') +tf.app.flags.DEFINE_integer('min_text_size', 10, + 'if the text size is smaller than this, we ignore it during training') +tf.app.flags.DEFINE_float('min_crop_side_ratio', 0.1, + 'when doing random crop from input image, ' + 'the min length of min(H, W)') +tf.app.flags.DEFINE_string('geometry', 'RBOX', + 'which geometry to generate, RBOX or QUAD') diff --git a/icdar.py b/icdar.py index 26b97ff0..a1e6becd 100644 --- a/icdar.py +++ b/icdar.py @@ -10,26 +10,10 @@ import matplotlib.patches as Patches from shapely.geometry import Polygon -import tensorflow as tf - from data_util import GeneratorEnqueuer -tf.app.flags.DEFINE_string('training_data_path', '/data/ocr/icdar2015/', - 'training dataset to use') -tf.app.flags.DEFINE_integer('max_image_large_side', 1280, - 'max image size of training') -tf.app.flags.DEFINE_integer('max_text_size', 800, - 'if the text in the input image is bigger than this, then we resize' - 'the image according to this') -tf.app.flags.DEFINE_integer('min_text_size', 10, - 'if the text size is smaller than this, we ignore it during training') -tf.app.flags.DEFINE_float('min_crop_side_ratio', 0.1, - 'when doing random crop from input image, the' - 'min length of min(H, W') -tf.app.flags.DEFINE_string('geometry', 'RBOX', - 'which geometry to generate, RBOX or QUAD') - - +import flags +import tensorflow as tf FLAGS = tf.app.flags.FLAGS diff --git a/lanms/.gitignore b/lanms/.gitignore index 6a57227e..5797dcd9 100644 --- a/lanms/.gitignore +++ b/lanms/.gitignore @@ -1 +1,2 @@ adaptor.so +build diff --git a/lanms/CMakeLists.txt b/lanms/CMakeLists.txt new file mode 100644 index 00000000..d51a499a --- /dev/null +++ b/lanms/CMakeLists.txt @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.4) +project(lanms) + +set(CMAKE_CXX_STANDARD 11) +if(WIN32) + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) +else(WIN32) + add_compile_options(-Wall -Wextra -Wpedantic) + add_compile_options(-pthread) +endif(WIN32) + +set(SOURCES "lanms.cpp" "include/clipper/clipper.cpp") + +add_library(lanms_library SHARED ${SOURCES}) +target_link_libraries(lanms_library) +target_include_directories(lanms_library PUBLIC ${CMAKE_CURRENT_LIST_DIR}) + +include_directories("include") + +add_executable(main main.cpp ${SOURCES}) +target_link_libraries(main) + +add_subdirectory("pybind11") +pybind11_add_module(adaptor "adaptor.cpp" ${SOURCES}) +target_link_libraries(adaptor PRIVATE lanms_library) +target_compile_definitions(adaptor PRIVATE) diff --git a/lanms/__init__.py b/lanms/__init__.py index 649d6468..9ae5f46f 100644 --- a/lanms/__init__.py +++ b/lanms/__init__.py @@ -1,13 +1,5 @@ -import subprocess -import os import numpy as np -BASE_DIR = os.path.dirname(os.path.realpath(__file__)) - -if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value - raise RuntimeError('Cannot compile lanms: {}'.format(BASE_DIR)) - - def merge_quadrangle_n9(polys, thres=0.3, precision=10000): from .adaptor import merge_quadrangle_n9 as nms_impl if len(polys) == 0: @@ -17,4 +9,3 @@ def merge_quadrangle_n9(polys, thres=0.3, precision=10000): ret = np.array(nms_impl(p, thres), dtype='float32') ret[:,:8] /= precision return ret - diff --git a/lanms/include/pybind11/attr.h b/lanms/include/pybind11/attr.h deleted file mode 100644 index b4137cb2..00000000 --- a/lanms/include/pybind11/attr.h +++ /dev/null @@ -1,471 +0,0 @@ -/* - pybind11/attr.h: Infrastructure for processing custom - type and function attributes - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "cast.h" - -NAMESPACE_BEGIN(pybind11) - -/// \addtogroup annotations -/// @{ - -/// Annotation for methods -struct is_method { handle class_; is_method(const handle &c) : class_(c) { } }; - -/// Annotation for operators -struct is_operator { }; - -/// Annotation for parent scope -struct scope { handle value; scope(const handle &s) : value(s) { } }; - -/// Annotation for documentation -struct doc { const char *value; doc(const char *value) : value(value) { } }; - -/// Annotation for function names -struct name { const char *value; name(const char *value) : value(value) { } }; - -/// Annotation indicating that a function is an overload associated with a given "sibling" -struct sibling { handle value; sibling(const handle &value) : value(value.ptr()) { } }; - -/// Annotation indicating that a class derives from another given type -template struct base { - PYBIND11_DEPRECATED("base() was deprecated in favor of specifying 'T' as a template argument to class_") - base() { } -}; - -/// Keep patient alive while nurse lives -template struct keep_alive { }; - -/// Annotation indicating that a class is involved in a multiple inheritance relationship -struct multiple_inheritance { }; - -/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class -struct dynamic_attr { }; - -/// Annotation which enables the buffer protocol for a type -struct buffer_protocol { }; - -/// Annotation which requests that a special metaclass is created for a type -struct metaclass { - handle value; - - PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.") - metaclass() {} - - /// Override pybind11's default metaclass - explicit metaclass(handle value) : value(value) { } -}; - -/// Annotation to mark enums as an arithmetic type -struct arithmetic { }; - -/** \rst - A call policy which places one or more guard variables (``Ts...``) around the function call. - - For example, this definition: - - .. code-block:: cpp - - m.def("foo", foo, py::call_guard()); - - is equivalent to the following pseudocode: - - .. code-block:: cpp - - m.def("foo", [](args...) { - T scope_guard; - return foo(args...); // forwarded arguments - }); - \endrst */ -template struct call_guard; - -template <> struct call_guard<> { using type = detail::void_type; }; - -template -struct call_guard { - static_assert(std::is_default_constructible::value, - "The guard type must be default constructible"); - - using type = T; -}; - -template -struct call_guard { - struct type { - T guard{}; // Compose multiple guard types with left-to-right default-constructor order - typename call_guard::type next{}; - }; -}; - -/// @} annotations - -NAMESPACE_BEGIN(detail) -/* Forward declarations */ -enum op_id : int; -enum op_type : int; -struct undefined_t; -template struct op_; -template struct init; -template struct init_alias; -inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret); - -/// Internal data structure which holds metadata about a keyword argument -struct argument_record { - const char *name; ///< Argument name - const char *descr; ///< Human-readable version of the argument value - handle value; ///< Associated Python object - bool convert : 1; ///< True if the argument is allowed to convert when loading - bool none : 1; ///< True if None is allowed when loading - - argument_record(const char *name, const char *descr, handle value, bool convert, bool none) - : name(name), descr(descr), value(value), convert(convert), none(none) { } -}; - -/// Internal data structure which holds metadata about a bound function (signature, overloads, etc.) -struct function_record { - function_record() - : is_constructor(false), is_stateless(false), is_operator(false), - has_args(false), has_kwargs(false), is_method(false) { } - - /// Function name - char *name = nullptr; /* why no C++ strings? They generate heavier code.. */ - - // User-specified documentation string - char *doc = nullptr; - - /// Human-readable version of the function signature - char *signature = nullptr; - - /// List of registered keyword arguments - std::vector args; - - /// Pointer to lambda function which converts arguments and performs the actual call - handle (*impl) (function_call &) = nullptr; - - /// Storage for the wrapped function pointer and captured data, if any - void *data[3] = { }; - - /// Pointer to custom destructor for 'data' (if needed) - void (*free_data) (function_record *ptr) = nullptr; - - /// Return value policy associated with this function - return_value_policy policy = return_value_policy::automatic; - - /// True if name == '__init__' - bool is_constructor : 1; - - /// True if this is a stateless function pointer - bool is_stateless : 1; - - /// True if this is an operator (__add__), etc. - bool is_operator : 1; - - /// True if the function has a '*args' argument - bool has_args : 1; - - /// True if the function has a '**kwargs' argument - bool has_kwargs : 1; - - /// True if this is a method - bool is_method : 1; - - /// Number of arguments (including py::args and/or py::kwargs, if present) - std::uint16_t nargs; - - /// Python method object - PyMethodDef *def = nullptr; - - /// Python handle to the parent scope (a class or a module) - handle scope; - - /// Python handle to the sibling function representing an overload chain - handle sibling; - - /// Pointer to next overload - function_record *next = nullptr; -}; - -/// Special data structure which (temporarily) holds metadata about a bound class -struct type_record { - PYBIND11_NOINLINE type_record() - : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false) { } - - /// Handle to the parent scope - handle scope; - - /// Name of the class - const char *name = nullptr; - - // Pointer to RTTI type_info data structure - const std::type_info *type = nullptr; - - /// How large is the underlying C++ type? - size_t type_size = 0; - - /// How large is the type's holder? - size_t holder_size = 0; - - /// The global operator new can be overridden with a class-specific variant - void *(*operator_new)(size_t) = ::operator new; - - /// Function pointer to class_<..>::init_instance - void (*init_instance)(instance *, const void *) = nullptr; - - /// Function pointer to class_<..>::dealloc - void (*dealloc)(const detail::value_and_holder &) = nullptr; - - /// List of base classes of the newly created type - list bases; - - /// Optional docstring - const char *doc = nullptr; - - /// Custom metaclass (optional) - handle metaclass; - - /// Multiple inheritance marker - bool multiple_inheritance : 1; - - /// Does the class manage a __dict__? - bool dynamic_attr : 1; - - /// Does the class implement the buffer protocol? - bool buffer_protocol : 1; - - /// Is the default (unique_ptr) holder type used? - bool default_holder : 1; - - PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) { - auto base_info = detail::get_type_info(base, false); - if (!base_info) { - std::string tname(base.name()); - detail::clean_type_id(tname); - pybind11_fail("generic_type: type \"" + std::string(name) + - "\" referenced unknown base type \"" + tname + "\""); - } - - if (default_holder != base_info->default_holder) { - std::string tname(base.name()); - detail::clean_type_id(tname); - pybind11_fail("generic_type: type \"" + std::string(name) + "\" " + - (default_holder ? "does not have" : "has") + - " a non-default holder type while its base \"" + tname + "\" " + - (base_info->default_holder ? "does not" : "does")); - } - - bases.append((PyObject *) base_info->type); - - if (base_info->type->tp_dictoffset != 0) - dynamic_attr = true; - - if (caster) - base_info->implicit_casts.emplace_back(type, caster); - } -}; - -inline function_call::function_call(function_record &f, handle p) : - func(f), parent(p) { - args.reserve(f.nargs); - args_convert.reserve(f.nargs); -} - -/** - * Partial template specializations to process custom attributes provided to - * cpp_function_ and class_. These are either used to initialize the respective - * fields in the type_record and function_record data structures or executed at - * runtime to deal with custom call policies (e.g. keep_alive). - */ -template struct process_attribute; - -template struct process_attribute_default { - /// Default implementation: do nothing - static void init(const T &, function_record *) { } - static void init(const T &, type_record *) { } - static void precall(function_call &) { } - static void postcall(function_call &, handle) { } -}; - -/// Process an attribute specifying the function's name -template <> struct process_attribute : process_attribute_default { - static void init(const name &n, function_record *r) { r->name = const_cast(n.value); } -}; - -/// Process an attribute specifying the function's docstring -template <> struct process_attribute : process_attribute_default { - static void init(const doc &n, function_record *r) { r->doc = const_cast(n.value); } -}; - -/// Process an attribute specifying the function's docstring (provided as a C-style string) -template <> struct process_attribute : process_attribute_default { - static void init(const char *d, function_record *r) { r->doc = const_cast(d); } - static void init(const char *d, type_record *r) { r->doc = const_cast(d); } -}; -template <> struct process_attribute : process_attribute { }; - -/// Process an attribute indicating the function's return value policy -template <> struct process_attribute : process_attribute_default { - static void init(const return_value_policy &p, function_record *r) { r->policy = p; } -}; - -/// Process an attribute which indicates that this is an overloaded function associated with a given sibling -template <> struct process_attribute : process_attribute_default { - static void init(const sibling &s, function_record *r) { r->sibling = s.value; } -}; - -/// Process an attribute which indicates that this function is a method -template <> struct process_attribute : process_attribute_default { - static void init(const is_method &s, function_record *r) { r->is_method = true; r->scope = s.class_; } -}; - -/// Process an attribute which indicates the parent scope of a method -template <> struct process_attribute : process_attribute_default { - static void init(const scope &s, function_record *r) { r->scope = s.value; } -}; - -/// Process an attribute which indicates that this function is an operator -template <> struct process_attribute : process_attribute_default { - static void init(const is_operator &, function_record *r) { r->is_operator = true; } -}; - -/// Process a keyword argument attribute (*without* a default value) -template <> struct process_attribute : process_attribute_default { - static void init(const arg &a, function_record *r) { - if (r->is_method && r->args.empty()) - r->args.emplace_back("self", nullptr, handle(), true /*convert*/, false /*none not allowed*/); - r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none); - } -}; - -/// Process a keyword argument attribute (*with* a default value) -template <> struct process_attribute : process_attribute_default { - static void init(const arg_v &a, function_record *r) { - if (r->is_method && r->args.empty()) - r->args.emplace_back("self", nullptr /*descr*/, handle() /*parent*/, true /*convert*/, false /*none not allowed*/); - - if (!a.value) { -#if !defined(NDEBUG) - std::string descr("'"); - if (a.name) descr += std::string(a.name) + ": "; - descr += a.type + "'"; - if (r->is_method) { - if (r->name) - descr += " in method '" + (std::string) str(r->scope) + "." + (std::string) r->name + "'"; - else - descr += " in method of '" + (std::string) str(r->scope) + "'"; - } else if (r->name) { - descr += " in function '" + (std::string) r->name + "'"; - } - pybind11_fail("arg(): could not convert default argument " - + descr + " into a Python object (type not registered yet?)"); -#else - pybind11_fail("arg(): could not convert default argument " - "into a Python object (type not registered yet?). " - "Compile in debug mode for more information."); -#endif - } - r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none); - } -}; - -/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees that) -template -struct process_attribute::value>> : process_attribute_default { - static void init(const handle &h, type_record *r) { r->bases.append(h); } -}; - -/// Process a parent class attribute (deprecated, does not support multiple inheritance) -template -struct process_attribute> : process_attribute_default> { - static void init(const base &, type_record *r) { r->add_base(typeid(T), nullptr); } -}; - -/// Process a multiple inheritance attribute -template <> -struct process_attribute : process_attribute_default { - static void init(const multiple_inheritance &, type_record *r) { r->multiple_inheritance = true; } -}; - -template <> -struct process_attribute : process_attribute_default { - static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; } -}; - -template <> -struct process_attribute : process_attribute_default { - static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; } -}; - -template <> -struct process_attribute : process_attribute_default { - static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; } -}; - - -/// Process an 'arithmetic' attribute for enums (does nothing here) -template <> -struct process_attribute : process_attribute_default {}; - -template -struct process_attribute> : process_attribute_default> { }; - -/** - * Process a keep_alive call policy -- invokes keep_alive_impl during the - * pre-call handler if both Nurse, Patient != 0 and use the post-call handler - * otherwise - */ -template struct process_attribute> : public process_attribute_default> { - template = 0> - static void precall(function_call &call) { keep_alive_impl(Nurse, Patient, call, handle()); } - template = 0> - static void postcall(function_call &, handle) { } - template = 0> - static void precall(function_call &) { } - template = 0> - static void postcall(function_call &call, handle ret) { keep_alive_impl(Nurse, Patient, call, ret); } -}; - -/// Recursively iterate over variadic template arguments -template struct process_attributes { - static void init(const Args&... args, function_record *r) { - int unused[] = { 0, (process_attribute::type>::init(args, r), 0) ... }; - ignore_unused(unused); - } - static void init(const Args&... args, type_record *r) { - int unused[] = { 0, (process_attribute::type>::init(args, r), 0) ... }; - ignore_unused(unused); - } - static void precall(function_call &call) { - int unused[] = { 0, (process_attribute::type>::precall(call), 0) ... }; - ignore_unused(unused); - } - static void postcall(function_call &call, handle fn_ret) { - int unused[] = { 0, (process_attribute::type>::postcall(call, fn_ret), 0) ... }; - ignore_unused(unused); - } -}; - -template -using is_call_guard = is_instantiation; - -/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found) -template -using extract_guard_t = typename exactly_one_t, Extra...>::type; - -/// Check the number of named arguments at compile time -template ::value...), - size_t self = constexpr_sum(std::is_same::value...)> -constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) { - return named == 0 || (self + named + has_args + has_kwargs) == nargs; -} - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/buffer_info.h b/lanms/include/pybind11/buffer_info.h deleted file mode 100644 index 6d1167d2..00000000 --- a/lanms/include/pybind11/buffer_info.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - pybind11/buffer_info.h: Python buffer object interface - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "common.h" - -NAMESPACE_BEGIN(pybind11) - -/// Information record describing a Python buffer object -struct buffer_info { - void *ptr = nullptr; // Pointer to the underlying storage - ssize_t itemsize = 0; // Size of individual items in bytes - ssize_t size = 0; // Total number of entries - std::string format; // For homogeneous buffers, this should be set to format_descriptor::format() - ssize_t ndim = 0; // Number of dimensions - std::vector shape; // Shape of the tensor (1 entry per dimension) - std::vector strides; // Number of entries between adjacent entries (for each per dimension) - - buffer_info() { } - - buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim, - detail::any_container shape_in, detail::any_container strides_in) - : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim), - shape(std::move(shape_in)), strides(std::move(strides_in)) { - if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) - pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length"); - for (size_t i = 0; i < (size_t) ndim; ++i) - size *= shape[i]; - } - - template - buffer_info(T *ptr, detail::any_container shape_in, detail::any_container strides_in) - : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor::format(), static_cast(shape_in->size()), std::move(shape_in), std::move(strides_in)) { } - - buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size) - : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}) { } - - template - buffer_info(T *ptr, ssize_t size) - : buffer_info(ptr, sizeof(T), format_descriptor::format(), size) { } - - explicit buffer_info(Py_buffer *view, bool ownview = true) - : buffer_info(view->buf, view->itemsize, view->format, view->ndim, - {view->shape, view->shape + view->ndim}, {view->strides, view->strides + view->ndim}) { - this->view = view; - this->ownview = ownview; - } - - buffer_info(const buffer_info &) = delete; - buffer_info& operator=(const buffer_info &) = delete; - - buffer_info(buffer_info &&other) { - (*this) = std::move(other); - } - - buffer_info& operator=(buffer_info &&rhs) { - ptr = rhs.ptr; - itemsize = rhs.itemsize; - size = rhs.size; - format = std::move(rhs.format); - ndim = rhs.ndim; - shape = std::move(rhs.shape); - strides = std::move(rhs.strides); - std::swap(view, rhs.view); - std::swap(ownview, rhs.ownview); - return *this; - } - - ~buffer_info() { - if (view && ownview) { PyBuffer_Release(view); delete view; } - } - -private: - struct private_ctr_tag { }; - - buffer_info(private_ctr_tag, void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim, - detail::any_container &&shape_in, detail::any_container &&strides_in) - : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in)) { } - - Py_buffer *view = nullptr; - bool ownview = false; -}; - -NAMESPACE_BEGIN(detail) - -template struct compare_buffer_info { - static bool compare(const buffer_info& b) { - return b.format == format_descriptor::format() && b.itemsize == (ssize_t) sizeof(T); - } -}; - -template struct compare_buffer_info::value>> { - static bool compare(const buffer_info& b) { - return (size_t) b.itemsize == sizeof(T) && (b.format == format_descriptor::value || - ((sizeof(T) == sizeof(long)) && b.format == (std::is_unsigned::value ? "L" : "l")) || - ((sizeof(T) == sizeof(size_t)) && b.format == (std::is_unsigned::value ? "N" : "n"))); - } -}; - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/cast.h b/lanms/include/pybind11/cast.h deleted file mode 100644 index 5db03e2f..00000000 --- a/lanms/include/pybind11/cast.h +++ /dev/null @@ -1,2058 +0,0 @@ -/* - pybind11/cast.h: Partial template specializations to cast between - C++ and Python types - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pytypes.h" -#include "typeid.h" -#include "descr.h" -#include -#include -#include - -#if defined(PYBIND11_CPP17) -# if defined(__has_include) -# if __has_include() -# define PYBIND11_HAS_STRING_VIEW -# endif -# elif defined(_MSC_VER) -# define PYBIND11_HAS_STRING_VIEW -# endif -#endif -#ifdef PYBIND11_HAS_STRING_VIEW -#include -#endif - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) -// Forward declarations: -inline PyTypeObject *make_static_property_type(); -inline PyTypeObject *make_default_metaclass(); -inline PyObject *make_object_base_type(PyTypeObject *metaclass); -struct value_and_holder; - -/// Additional type information which does not fit into the PyTypeObject -struct type_info { - PyTypeObject *type; - const std::type_info *cpptype; - size_t type_size, holder_size_in_ptrs; - void *(*operator_new)(size_t); - void (*init_instance)(instance *, const void *); - void (*dealloc)(const value_and_holder &v_h); - std::vector implicit_conversions; - std::vector> implicit_casts; - std::vector *direct_conversions; - buffer_info *(*get_buffer)(PyObject *, void *) = nullptr; - void *get_buffer_data = nullptr; - /* A simple type never occurs as a (direct or indirect) parent - * of a class that makes use of multiple inheritance */ - bool simple_type : 1; - /* True if there is no multiple inheritance in this type's inheritance tree */ - bool simple_ancestors : 1; - /* for base vs derived holder_type checks */ - bool default_holder : 1; -}; - -// Store the static internals pointer in a version-specific function so that we're guaranteed it -// will be distinct for modules compiled for different pybind11 versions. Without this, some -// compilers (i.e. gcc) can use the same static pointer storage location across different .so's, -// even though the `get_internals()` function itself is local to each shared object. -template -internals *&get_internals_ptr() { static internals *internals_ptr = nullptr; return internals_ptr; } - -PYBIND11_NOINLINE inline internals &get_internals() { - internals *&internals_ptr = get_internals_ptr(); - if (internals_ptr) - return *internals_ptr; - handle builtins(PyEval_GetBuiltins()); - const char *id = PYBIND11_INTERNALS_ID; - if (builtins.contains(id) && isinstance(builtins[id])) { - internals_ptr = *static_cast(capsule(builtins[id])); - } else { - internals_ptr = new internals(); - #if defined(WITH_THREAD) - PyEval_InitThreads(); - PyThreadState *tstate = PyThreadState_Get(); - internals_ptr->tstate = PyThread_create_key(); - PyThread_set_key_value(internals_ptr->tstate, tstate); - internals_ptr->istate = tstate->interp; - #endif - builtins[id] = capsule(&internals_ptr); - internals_ptr->registered_exception_translators.push_front( - [](std::exception_ptr p) -> void { - try { - if (p) std::rethrow_exception(p); - } catch (error_already_set &e) { e.restore(); return; - } catch (const builtin_exception &e) { e.set_error(); return; - } catch (const std::bad_alloc &e) { PyErr_SetString(PyExc_MemoryError, e.what()); return; - } catch (const std::domain_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; - } catch (const std::invalid_argument &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; - } catch (const std::length_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; - } catch (const std::out_of_range &e) { PyErr_SetString(PyExc_IndexError, e.what()); return; - } catch (const std::range_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; - } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); return; - } catch (...) { - PyErr_SetString(PyExc_RuntimeError, "Caught an unknown exception!"); - return; - } - } - ); - internals_ptr->static_property_type = make_static_property_type(); - internals_ptr->default_metaclass = make_default_metaclass(); - internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass); - } - return *internals_ptr; -} - -/// A life support system for temporary objects created by `type_caster::load()`. -/// Adding a patient will keep it alive up until the enclosing function returns. -class loader_life_support { -public: - /// A new patient frame is created when a function is entered - loader_life_support() { - get_internals().loader_patient_stack.push_back(nullptr); - } - - /// ... and destroyed after it returns - ~loader_life_support() { - auto &stack = get_internals().loader_patient_stack; - if (stack.empty()) - pybind11_fail("loader_life_support: internal error"); - - auto ptr = stack.back(); - stack.pop_back(); - Py_CLEAR(ptr); - - // A heuristic to reduce the stack's capacity (e.g. after long recursive calls) - if (stack.capacity() > 16 && stack.size() != 0 && stack.capacity() / stack.size() > 2) - stack.shrink_to_fit(); - } - - /// This can only be used inside a pybind11-bound function, either by `argument_loader` - /// at argument preparation time or by `py::cast()` at execution time. - PYBIND11_NOINLINE static void add_patient(handle h) { - auto &stack = get_internals().loader_patient_stack; - if (stack.empty()) - throw cast_error("When called outside a bound function, py::cast() cannot " - "do Python -> C++ conversions which require the creation " - "of temporary values"); - - auto &list_ptr = stack.back(); - if (list_ptr == nullptr) { - list_ptr = PyList_New(1); - if (!list_ptr) - pybind11_fail("loader_life_support: error allocating list"); - PyList_SET_ITEM(list_ptr, 0, h.inc_ref().ptr()); - } else { - auto result = PyList_Append(list_ptr, h.ptr()); - if (result == -1) - pybind11_fail("loader_life_support: error adding patient"); - } - } -}; - -// Gets the cache entry for the given type, creating it if necessary. The return value is the pair -// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was -// just created. -inline std::pair all_type_info_get_cache(PyTypeObject *type); - -// Populates a just-created cache entry. -PYBIND11_NOINLINE inline void all_type_info_populate(PyTypeObject *t, std::vector &bases) { - std::vector check; - for (handle parent : reinterpret_borrow(t->tp_bases)) - check.push_back((PyTypeObject *) parent.ptr()); - - auto const &type_dict = get_internals().registered_types_py; - for (size_t i = 0; i < check.size(); i++) { - auto type = check[i]; - // Ignore Python2 old-style class super type: - if (!PyType_Check((PyObject *) type)) continue; - - // Check `type` in the current set of registered python types: - auto it = type_dict.find(type); - if (it != type_dict.end()) { - // We found a cache entry for it, so it's either pybind-registered or has pre-computed - // pybind bases, but we have to make sure we haven't already seen the type(s) before: we - // want to follow Python/virtual C++ rules that there should only be one instance of a - // common base. - for (auto *tinfo : it->second) { - // NB: Could use a second set here, rather than doing a linear search, but since - // having a large number of immediate pybind11-registered types seems fairly - // unlikely, that probably isn't worthwhile. - bool found = false; - for (auto *known : bases) { - if (known == tinfo) { found = true; break; } - } - if (!found) bases.push_back(tinfo); - } - } - else if (type->tp_bases) { - // It's some python type, so keep follow its bases classes to look for one or more - // registered types - if (i + 1 == check.size()) { - // When we're at the end, we can pop off the current element to avoid growing - // `check` when adding just one base (which is typical--.e. when there is no - // multiple inheritance) - check.pop_back(); - i--; - } - for (handle parent : reinterpret_borrow(type->tp_bases)) - check.push_back((PyTypeObject *) parent.ptr()); - } - } -} - -/** - * Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will - * be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side - * derived class that uses single inheritance. Will contain as many types as required for a Python - * class that uses multiple inheritance to inherit (directly or indirectly) from multiple - * pybind-registered classes. Will be empty if neither the type nor any base classes are - * pybind-registered. - * - * The value is cached for the lifetime of the Python type. - */ -inline const std::vector &all_type_info(PyTypeObject *type) { - auto ins = all_type_info_get_cache(type); - if (ins.second) - // New cache entry: populate it - all_type_info_populate(type, ins.first->second); - - return ins.first->second; -} - -/** - * Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any - * ancestors are pybind11-registered. Throws an exception if there are multiple bases--use - * `all_type_info` instead if you want to support multiple bases. - */ -PYBIND11_NOINLINE inline detail::type_info* get_type_info(PyTypeObject *type) { - auto &bases = all_type_info(type); - if (bases.size() == 0) - return nullptr; - if (bases.size() > 1) - pybind11_fail("pybind11::detail::get_type_info: type has multiple pybind11-registered bases"); - return bases.front(); -} - -PYBIND11_NOINLINE inline detail::type_info *get_type_info(const std::type_info &tp, - bool throw_if_missing = false) { - auto &types = get_internals().registered_types_cpp; - - auto it = types.find(std::type_index(tp)); - if (it != types.end()) - return (detail::type_info *) it->second; - if (throw_if_missing) { - std::string tname = tp.name(); - detail::clean_type_id(tname); - pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \"" + tname + "\""); - } - return nullptr; -} - -PYBIND11_NOINLINE inline handle get_type_handle(const std::type_info &tp, bool throw_if_missing) { - detail::type_info *type_info = get_type_info(tp, throw_if_missing); - return handle(type_info ? ((PyObject *) type_info->type) : nullptr); -} - -struct value_and_holder { - instance *inst; - size_t index; - const detail::type_info *type; - void **vh; - - value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index) : - inst{i}, index{index}, type{type}, - vh{inst->simple_layout ? inst->simple_value_holder : &inst->nonsimple.values_and_holders[vpos]} - {} - - // Used for past-the-end iterator - value_and_holder(size_t index) : index{index} {} - - template V *&value_ptr() const { - return reinterpret_cast(vh[0]); - } - // True if this `value_and_holder` has a non-null value pointer - explicit operator bool() const { return value_ptr(); } - - template H &holder() const { - return reinterpret_cast(vh[1]); - } - bool holder_constructed() const { - return inst->simple_layout - ? inst->simple_holder_constructed - : inst->nonsimple.status[index] & instance::status_holder_constructed; - } - void set_holder_constructed() { - if (inst->simple_layout) - inst->simple_holder_constructed = true; - else - inst->nonsimple.status[index] |= instance::status_holder_constructed; - } - bool instance_registered() const { - return inst->simple_layout - ? inst->simple_instance_registered - : inst->nonsimple.status[index] & instance::status_instance_registered; - } - void set_instance_registered() { - if (inst->simple_layout) - inst->simple_instance_registered = true; - else - inst->nonsimple.status[index] |= instance::status_instance_registered; - } -}; - -// Container for accessing and iterating over an instance's values/holders -struct values_and_holders { -private: - instance *inst; - using type_vec = std::vector; - const type_vec &tinfo; - -public: - values_and_holders(instance *inst) : inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {} - - struct iterator { - private: - instance *inst; - const type_vec *types; - value_and_holder curr; - friend struct values_and_holders; - iterator(instance *inst, const type_vec *tinfo) - : inst{inst}, types{tinfo}, - curr(inst /* instance */, - types->empty() ? nullptr : (*types)[0] /* type info */, - 0, /* vpos: (non-simple types only): the first vptr comes first */ - 0 /* index */) - {} - // Past-the-end iterator: - iterator(size_t end) : curr(end) {} - public: - bool operator==(const iterator &other) { return curr.index == other.curr.index; } - bool operator!=(const iterator &other) { return curr.index != other.curr.index; } - iterator &operator++() { - if (!inst->simple_layout) - curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs; - ++curr.index; - curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr; - return *this; - } - value_and_holder &operator*() { return curr; } - value_and_holder *operator->() { return &curr; } - }; - - iterator begin() { return iterator(inst, &tinfo); } - iterator end() { return iterator(tinfo.size()); } - - iterator find(const type_info *find_type) { - auto it = begin(), endit = end(); - while (it != endit && it->type != find_type) ++it; - return it; - } - - size_t size() { return tinfo.size(); } -}; - -/** - * Extracts C++ value and holder pointer references from an instance (which may contain multiple - * values/holders for python-side multiple inheritance) that match the given type. Throws an error - * if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If - * `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned, - * regardless of type (and the resulting .type will be nullptr). - * - * The returned object should be short-lived: in particular, it must not outlive the called-upon - * instance. - */ -PYBIND11_NOINLINE inline value_and_holder instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/) { - // Optimize common case: - if (!find_type || Py_TYPE(this) == find_type->type) - return value_and_holder(this, find_type, 0, 0); - - detail::values_and_holders vhs(this); - auto it = vhs.find(find_type); - if (it != vhs.end()) - return *it; - -#if defined(NDEBUG) - pybind11_fail("pybind11::detail::instance::get_value_and_holder: " - "type is not a pybind11 base of the given instance " - "(compile in debug mode for type details)"); -#else - pybind11_fail("pybind11::detail::instance::get_value_and_holder: `" + - std::string(find_type->type->tp_name) + "' is not a pybind11 base of the given `" + - std::string(Py_TYPE(this)->tp_name) + "' instance"); -#endif -} - -PYBIND11_NOINLINE inline void instance::allocate_layout() { - auto &tinfo = all_type_info(Py_TYPE(this)); - - const size_t n_types = tinfo.size(); - - if (n_types == 0) - pybind11_fail("instance allocation failed: new instance has no pybind11-registered base types"); - - simple_layout = - n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs(); - - // Simple path: no python-side multiple inheritance, and a small-enough holder - if (simple_layout) { - simple_value_holder[0] = nullptr; - simple_holder_constructed = false; - simple_instance_registered = false; - } - else { // multiple base types or a too-large holder - // Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer, - // [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool - // values that tracks whether each associated holder has been initialized. Each [block] is - // padded, if necessary, to an integer multiple of sizeof(void *). - size_t space = 0; - for (auto t : tinfo) { - space += 1; // value pointer - space += t->holder_size_in_ptrs; // holder instance - } - size_t flags_at = space; - space += size_in_ptrs(n_types); // status bytes (holder_constructed and instance_registered) - - // Allocate space for flags, values, and holders, and initialize it to 0 (flags and values, - // in particular, need to be 0). Use Python's memory allocation functions: in Python 3.6 - // they default to using pymalloc, which is designed to be efficient for small allocations - // like the one we're doing here; in earlier versions (and for larger allocations) they are - // just wrappers around malloc. -#if PY_VERSION_HEX >= 0x03050000 - nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *)); - if (!nonsimple.values_and_holders) throw std::bad_alloc(); -#else - nonsimple.values_and_holders = (void **) PyMem_New(void *, space); - if (!nonsimple.values_and_holders) throw std::bad_alloc(); - std::memset(nonsimple.values_and_holders, 0, space * sizeof(void *)); -#endif - nonsimple.status = reinterpret_cast(&nonsimple.values_and_holders[flags_at]); - } - owned = true; -} - -PYBIND11_NOINLINE inline void instance::deallocate_layout() { - if (!simple_layout) - PyMem_Free(nonsimple.values_and_holders); -} - -PYBIND11_NOINLINE inline bool isinstance_generic(handle obj, const std::type_info &tp) { - handle type = detail::get_type_handle(tp, false); - if (!type) - return false; - return isinstance(obj, type); -} - -PYBIND11_NOINLINE inline std::string error_string() { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, "Unknown internal error occurred"); - return "Unknown internal error occurred"; - } - - error_scope scope; // Preserve error state - - std::string errorString; - if (scope.type) { - errorString += handle(scope.type).attr("__name__").cast(); - errorString += ": "; - } - if (scope.value) - errorString += (std::string) str(scope.value); - - PyErr_NormalizeException(&scope.type, &scope.value, &scope.trace); - -#if PY_MAJOR_VERSION >= 3 - if (scope.trace != nullptr) - PyException_SetTraceback(scope.value, scope.trace); -#endif - -#if !defined(PYPY_VERSION) - if (scope.trace) { - PyTracebackObject *trace = (PyTracebackObject *) scope.trace; - - /* Get the deepest trace possible */ - while (trace->tb_next) - trace = trace->tb_next; - - PyFrameObject *frame = trace->tb_frame; - errorString += "\n\nAt:\n"; - while (frame) { - int lineno = PyFrame_GetLineNumber(frame); - errorString += - " " + handle(frame->f_code->co_filename).cast() + - "(" + std::to_string(lineno) + "): " + - handle(frame->f_code->co_name).cast() + "\n"; - frame = frame->f_back; - } - trace = trace->tb_next; - } -#endif - - return errorString; -} - -PYBIND11_NOINLINE inline handle get_object_handle(const void *ptr, const detail::type_info *type ) { - auto &instances = get_internals().registered_instances; - auto range = instances.equal_range(ptr); - for (auto it = range.first; it != range.second; ++it) { - for (auto vh : values_and_holders(it->second)) { - if (vh.type == type) - return handle((PyObject *) it->second); - } - } - return handle(); -} - -inline PyThreadState *get_thread_state_unchecked() { -#if defined(PYPY_VERSION) - return PyThreadState_GET(); -#elif PY_VERSION_HEX < 0x03000000 - return _PyThreadState_Current; -#elif PY_VERSION_HEX < 0x03050000 - return (PyThreadState*) _Py_atomic_load_relaxed(&_PyThreadState_Current); -#elif PY_VERSION_HEX < 0x03050200 - return (PyThreadState*) _PyThreadState_Current.value; -#else - return _PyThreadState_UncheckedGet(); -#endif -} - -// Forward declarations -inline void keep_alive_impl(handle nurse, handle patient); -inline PyObject *make_new_instance(PyTypeObject *type, bool allocate_value = true); - -class type_caster_generic { -public: - PYBIND11_NOINLINE type_caster_generic(const std::type_info &type_info) - : typeinfo(get_type_info(type_info)) { } - - bool load(handle src, bool convert) { - return load_impl(src, convert); - } - - PYBIND11_NOINLINE static handle cast(const void *_src, return_value_policy policy, handle parent, - const detail::type_info *tinfo, - void *(*copy_constructor)(const void *), - void *(*move_constructor)(const void *), - const void *existing_holder = nullptr) { - if (!tinfo) // no type info: error will be set already - return handle(); - - void *src = const_cast(_src); - if (src == nullptr) - return none().release(); - - auto it_instances = get_internals().registered_instances.equal_range(src); - for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) { - for (auto instance_type : detail::all_type_info(Py_TYPE(it_i->second))) { - if (instance_type && instance_type == tinfo) - return handle((PyObject *) it_i->second).inc_ref(); - } - } - - auto inst = reinterpret_steal(make_new_instance(tinfo->type, false /* don't allocate value */)); - auto wrapper = reinterpret_cast(inst.ptr()); - wrapper->owned = false; - void *&valueptr = values_and_holders(wrapper).begin()->value_ptr(); - - switch (policy) { - case return_value_policy::automatic: - case return_value_policy::take_ownership: - valueptr = src; - wrapper->owned = true; - break; - - case return_value_policy::automatic_reference: - case return_value_policy::reference: - valueptr = src; - wrapper->owned = false; - break; - - case return_value_policy::copy: - if (copy_constructor) - valueptr = copy_constructor(src); - else - throw cast_error("return_value_policy = copy, but the " - "object is non-copyable!"); - wrapper->owned = true; - break; - - case return_value_policy::move: - if (move_constructor) - valueptr = move_constructor(src); - else if (copy_constructor) - valueptr = copy_constructor(src); - else - throw cast_error("return_value_policy = move, but the " - "object is neither movable nor copyable!"); - wrapper->owned = true; - break; - - case return_value_policy::reference_internal: - valueptr = src; - wrapper->owned = false; - keep_alive_impl(inst, parent); - break; - - default: - throw cast_error("unhandled return_value_policy: should not happen!"); - } - - tinfo->init_instance(wrapper, existing_holder); - - return inst.release(); - } - -protected: - - // Base methods for generic caster; there are overridden in copyable_holder_caster - void load_value(const value_and_holder &v_h) { - value = v_h.value_ptr(); - } - bool try_implicit_casts(handle src, bool convert) { - for (auto &cast : typeinfo->implicit_casts) { - type_caster_generic sub_caster(*cast.first); - if (sub_caster.load(src, convert)) { - value = cast.second(sub_caster.value); - return true; - } - } - return false; - } - bool try_direct_conversions(handle src) { - for (auto &converter : *typeinfo->direct_conversions) { - if (converter(src.ptr(), value)) - return true; - } - return false; - } - void check_holder_compat() {} - - // Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant - // bits of code between here and copyable_holder_caster where the two classes need different - // logic (without having to resort to virtual inheritance). - template - PYBIND11_NOINLINE bool load_impl(handle src, bool convert) { - if (!src || !typeinfo) - return false; - if (src.is_none()) { - // Defer accepting None to other overloads (if we aren't in convert mode): - if (!convert) return false; - value = nullptr; - return true; - } - - auto &this_ = static_cast(*this); - this_.check_holder_compat(); - - PyTypeObject *srctype = Py_TYPE(src.ptr()); - - // Case 1: If src is an exact type match for the target type then we can reinterpret_cast - // the instance's value pointer to the target type: - if (srctype == typeinfo->type) { - this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder()); - return true; - } - // Case 2: We have a derived class - else if (PyType_IsSubtype(srctype, typeinfo->type)) { - auto &bases = all_type_info(srctype); - bool no_cpp_mi = typeinfo->simple_type; - - // Case 2a: the python type is a Python-inherited derived class that inherits from just - // one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of - // the right type and we can use reinterpret_cast. - // (This is essentially the same as case 2b, but because not using multiple inheritance - // is extremely common, we handle it specially to avoid the loop iterator and type - // pointer lookup overhead) - if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) { - this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder()); - return true; - } - // Case 2b: the python type inherits from multiple C++ bases. Check the bases to see if - // we can find an exact match (or, for a simple C++ type, an inherited match); if so, we - // can safely reinterpret_cast to the relevant pointer. - else if (bases.size() > 1) { - for (auto base : bases) { - if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type) : base->type == typeinfo->type) { - this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder(base)); - return true; - } - } - } - - // Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type match - // in the registered bases, above, so try implicit casting (needed for proper C++ casting - // when MI is involved). - if (this_.try_implicit_casts(src, convert)) - return true; - } - - // Perform an implicit conversion - if (convert) { - for (auto &converter : typeinfo->implicit_conversions) { - auto temp = reinterpret_steal(converter(src.ptr(), typeinfo->type)); - if (load_impl(temp, false)) { - loader_life_support::add_patient(temp); - return true; - } - } - if (this_.try_direct_conversions(src)) - return true; - } - return false; - } - - - // Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast - // isn't needed or can't be used. If the type is unknown, sets the error and returns a pair - // with .second = nullptr. (p.first = nullptr is not an error: it becomes None). - PYBIND11_NOINLINE static std::pair src_and_type( - const void *src, const std::type_info &cast_type, const std::type_info *rtti_type = nullptr) { - auto &internals = get_internals(); - auto it = internals.registered_types_cpp.find(std::type_index(cast_type)); - if (it != internals.registered_types_cpp.end()) - return {src, (const type_info *) it->second}; - - // Not found, set error: - std::string tname = rtti_type ? rtti_type->name() : cast_type.name(); - detail::clean_type_id(tname); - std::string msg = "Unregistered type : " + tname; - PyErr_SetString(PyExc_TypeError, msg.c_str()); - return {nullptr, nullptr}; - } - - const type_info *typeinfo = nullptr; - void *value = nullptr; -}; - -/** - * Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster - * needs to provide `operator T*()` and `operator T&()` operators. - * - * If the type supports moving the value away via an `operator T&&() &&` method, it should use - * `movable_cast_op_type` instead. - */ -template -using cast_op_type = - conditional_t>::value, - typename std::add_pointer>::type, - typename std::add_lvalue_reference>::type>; - -/** - * Determine suitable casting operator for a type caster with a movable value. Such a type caster - * needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be - * called in appropriate contexts where the value can be moved rather than copied. - * - * These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro. - */ -template -using movable_cast_op_type = - conditional_t::type>::value, - typename std::add_pointer>::type, - conditional_t::value, - typename std::add_rvalue_reference>::type, - typename std::add_lvalue_reference>::type>>; - -// std::is_copy_constructible isn't quite enough: it lets std::vector (and similar) through when -// T is non-copyable, but code containing such a copy constructor fails to actually compile. -template struct is_copy_constructible : std::is_copy_constructible {}; - -// Specialization for types that appear to be copy constructible but also look like stl containers -// (we specifically check for: has `value_type` and `reference` with `reference = value_type&`): if -// so, copy constructability depends on whether the value_type is copy constructible. -template struct is_copy_constructible, - std::is_same - >::value>> : is_copy_constructible {}; - -#if !defined(PYBIND11_CPP17) -// Likewise for std::pair before C++17 (which mandates that the copy constructor not exist when the -// two types aren't themselves copy constructible). -template struct is_copy_constructible> - : all_of, is_copy_constructible> {}; -#endif - -/// Generic type caster for objects stored on the heap -template class type_caster_base : public type_caster_generic { - using itype = intrinsic_t; -public: - static PYBIND11_DESCR name() { return type_descr(_()); } - - type_caster_base() : type_caster_base(typeid(type)) { } - explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) { } - - static handle cast(const itype &src, return_value_policy policy, handle parent) { - if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) - policy = return_value_policy::copy; - return cast(&src, policy, parent); - } - - static handle cast(itype &&src, return_value_policy, handle parent) { - return cast(&src, return_value_policy::move, parent); - } - - // Returns a (pointer, type_info) pair taking care of necessary RTTI type lookup for a - // polymorphic type. If the instance isn't derived, returns the non-RTTI base version. - template ::value, int> = 0> - static std::pair src_and_type(const itype *src) { - const void *vsrc = src; - auto &internals = get_internals(); - auto &cast_type = typeid(itype); - const std::type_info *instance_type = nullptr; - if (vsrc) { - instance_type = &typeid(*src); - if (!same_type(cast_type, *instance_type)) { - // This is a base pointer to a derived type; if it is a pybind11-registered type, we - // can get the correct derived pointer (which may be != base pointer) by a - // dynamic_cast to most derived type: - auto it = internals.registered_types_cpp.find(std::type_index(*instance_type)); - if (it != internals.registered_types_cpp.end()) - return {dynamic_cast(src), (const type_info *) it->second}; - } - } - // Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer, so - // don't do a cast - return type_caster_generic::src_and_type(vsrc, cast_type, instance_type); - } - - // Non-polymorphic type, so no dynamic casting; just call the generic version directly - template ::value, int> = 0> - static std::pair src_and_type(const itype *src) { - return type_caster_generic::src_and_type(src, typeid(itype)); - } - - static handle cast(const itype *src, return_value_policy policy, handle parent) { - auto st = src_and_type(src); - return type_caster_generic::cast( - st.first, policy, parent, st.second, - make_copy_constructor(src), make_move_constructor(src)); - } - - static handle cast_holder(const itype *src, const void *holder) { - auto st = src_and_type(src); - return type_caster_generic::cast( - st.first, return_value_policy::take_ownership, {}, st.second, - nullptr, nullptr, holder); - } - - template using cast_op_type = cast_op_type; - - operator itype*() { return (type *) value; } - operator itype&() { if (!value) throw reference_cast_error(); return *((itype *) value); } - -protected: - using Constructor = void *(*)(const void *); - - /* Only enabled when the types are {copy,move}-constructible *and* when the type - does not have a private operator new implementation. */ - template ::value>> - static auto make_copy_constructor(const T *x) -> decltype(new T(*x), Constructor{}) { - return [](const void *arg) -> void * { - return new T(*reinterpret_cast(arg)); - }; - } - - template ::value>> - static auto make_move_constructor(const T *x) -> decltype(new T(std::move(*const_cast(x))), Constructor{}) { - return [](const void *arg) -> void * { - return new T(std::move(*const_cast(reinterpret_cast(arg)))); - }; - } - - static Constructor make_copy_constructor(...) { return nullptr; } - static Constructor make_move_constructor(...) { return nullptr; } -}; - -template class type_caster : public type_caster_base { }; -template using make_caster = type_caster>; - -// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T -template typename make_caster::template cast_op_type cast_op(make_caster &caster) { - return caster.operator typename make_caster::template cast_op_type(); -} -template typename make_caster::template cast_op_type::type> -cast_op(make_caster &&caster) { - return std::move(caster).operator - typename make_caster::template cast_op_type::type>(); -} - -template class type_caster> { -private: - using caster_t = make_caster; - caster_t subcaster; - using subcaster_cast_op_type = typename caster_t::template cast_op_type; - static_assert(std::is_same::type &, subcaster_cast_op_type>::value, - "std::reference_wrapper caster requires T to have a caster with an `T &` operator"); -public: - bool load(handle src, bool convert) { return subcaster.load(src, convert); } - static PYBIND11_DESCR name() { return caster_t::name(); } - static handle cast(const std::reference_wrapper &src, return_value_policy policy, handle parent) { - // It is definitely wrong to take ownership of this pointer, so mask that rvp - if (policy == return_value_policy::take_ownership || policy == return_value_policy::automatic) - policy = return_value_policy::automatic_reference; - return caster_t::cast(&src.get(), policy, parent); - } - template using cast_op_type = std::reference_wrapper; - operator std::reference_wrapper() { return subcaster.operator subcaster_cast_op_type&(); } -}; - -#define PYBIND11_TYPE_CASTER(type, py_name) \ - protected: \ - type value; \ - public: \ - static PYBIND11_DESCR name() { return type_descr(py_name); } \ - template >::value, int> = 0> \ - static handle cast(T_ *src, return_value_policy policy, handle parent) { \ - if (!src) return none().release(); \ - if (policy == return_value_policy::take_ownership) { \ - auto h = cast(std::move(*src), policy, parent); delete src; return h; \ - } else { \ - return cast(*src, policy, parent); \ - } \ - } \ - operator type*() { return &value; } \ - operator type&() { return value; } \ - operator type&&() && { return std::move(value); } \ - template using cast_op_type = pybind11::detail::movable_cast_op_type - - -template using is_std_char_type = any_of< - std::is_same, /* std::string */ - std::is_same, /* std::u16string */ - std::is_same, /* std::u32string */ - std::is_same /* std::wstring */ ->; - -template -struct type_caster::value && !is_std_char_type::value>> { - using _py_type_0 = conditional_t; - using _py_type_1 = conditional_t::value, _py_type_0, typename std::make_unsigned<_py_type_0>::type>; - using py_type = conditional_t::value, double, _py_type_1>; -public: - - bool load(handle src, bool convert) { - py_type py_value; - - if (!src) - return false; - - if (std::is_floating_point::value) { - if (convert || PyFloat_Check(src.ptr())) - py_value = (py_type) PyFloat_AsDouble(src.ptr()); - else - return false; - } else if (PyFloat_Check(src.ptr())) { - return false; - } else if (std::is_unsigned::value) { - py_value = as_unsigned(src.ptr()); - } else { // signed integer: - py_value = sizeof(T) <= sizeof(long) - ? (py_type) PyLong_AsLong(src.ptr()) - : (py_type) PYBIND11_LONG_AS_LONGLONG(src.ptr()); - } - - bool py_err = py_value == (py_type) -1 && PyErr_Occurred(); - if (py_err || (std::is_integral::value && sizeof(py_type) != sizeof(T) && - (py_value < (py_type) std::numeric_limits::min() || - py_value > (py_type) std::numeric_limits::max()))) { - bool type_error = py_err && PyErr_ExceptionMatches( -#if PY_VERSION_HEX < 0x03000000 && !defined(PYPY_VERSION) - PyExc_SystemError -#else - PyExc_TypeError -#endif - ); - PyErr_Clear(); - if (type_error && convert && PyNumber_Check(src.ptr())) { - auto tmp = reinterpret_borrow(std::is_floating_point::value - ? PyNumber_Float(src.ptr()) - : PyNumber_Long(src.ptr())); - PyErr_Clear(); - return load(tmp, false); - } - return false; - } - - value = (T) py_value; - return true; - } - - static handle cast(T src, return_value_policy /* policy */, handle /* parent */) { - if (std::is_floating_point::value) { - return PyFloat_FromDouble((double) src); - } else if (sizeof(T) <= sizeof(long)) { - if (std::is_signed::value) - return PyLong_FromLong((long) src); - else - return PyLong_FromUnsignedLong((unsigned long) src); - } else { - if (std::is_signed::value) - return PyLong_FromLongLong((long long) src); - else - return PyLong_FromUnsignedLongLong((unsigned long long) src); - } - } - - PYBIND11_TYPE_CASTER(T, _::value>("int", "float")); -}; - -template struct void_caster { -public: - bool load(handle src, bool) { - if (src && src.is_none()) - return true; - return false; - } - static handle cast(T, return_value_policy /* policy */, handle /* parent */) { - return none().inc_ref(); - } - PYBIND11_TYPE_CASTER(T, _("None")); -}; - -template <> class type_caster : public void_caster {}; - -template <> class type_caster : public type_caster { -public: - using type_caster::cast; - - bool load(handle h, bool) { - if (!h) { - return false; - } else if (h.is_none()) { - value = nullptr; - return true; - } - - /* Check if this is a capsule */ - if (isinstance(h)) { - value = reinterpret_borrow(h); - return true; - } - - /* Check if this is a C++ type */ - auto &bases = all_type_info((PyTypeObject *) h.get_type().ptr()); - if (bases.size() == 1) { // Only allowing loading from a single-value type - value = values_and_holders(reinterpret_cast(h.ptr())).begin()->value_ptr(); - return true; - } - - /* Fail */ - return false; - } - - static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) { - if (ptr) - return capsule(ptr).release(); - else - return none().inc_ref(); - } - - template using cast_op_type = void*&; - operator void *&() { return value; } - static PYBIND11_DESCR name() { return type_descr(_("capsule")); } -private: - void *value = nullptr; -}; - -template <> class type_caster : public void_caster { }; - -template <> class type_caster { -public: - bool load(handle src, bool convert) { - if (!src) return false; - else if (src.ptr() == Py_True) { value = true; return true; } - else if (src.ptr() == Py_False) { value = false; return true; } - else if (convert || !strcmp("numpy.bool_", Py_TYPE(src.ptr())->tp_name)) { - // (allow non-implicit conversion for numpy booleans) - - Py_ssize_t res = -1; - if (src.is_none()) { - res = 0; // None is implicitly converted to False - } - #if defined(PYPY_VERSION) - // On PyPy, check that "__bool__" (or "__nonzero__" on Python 2.7) attr exists - else if (hasattr(src, PYBIND11_BOOL_ATTR)) { - res = PyObject_IsTrue(src.ptr()); - } - #else - // Alternate approach for CPython: this does the same as the above, but optimized - // using the CPython API so as to avoid an unneeded attribute lookup. - else if (auto tp_as_number = src.ptr()->ob_type->tp_as_number) { - if (PYBIND11_NB_BOOL(tp_as_number)) { - res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr()); - } - } - #endif - if (res == 0 || res == 1) { - value = (bool) res; - return true; - } - } - return false; - } - static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) { - return handle(src ? Py_True : Py_False).inc_ref(); - } - PYBIND11_TYPE_CASTER(bool, _("bool")); -}; - -// Helper class for UTF-{8,16,32} C++ stl strings: -template struct string_caster { - using CharT = typename StringType::value_type; - - // Simplify life by being able to assume standard char sizes (the standard only guarantees - // minimums, but Python requires exact sizes) - static_assert(!std::is_same::value || sizeof(CharT) == 1, "Unsupported char size != 1"); - static_assert(!std::is_same::value || sizeof(CharT) == 2, "Unsupported char16_t size != 2"); - static_assert(!std::is_same::value || sizeof(CharT) == 4, "Unsupported char32_t size != 4"); - // wchar_t can be either 16 bits (Windows) or 32 (everywhere else) - static_assert(!std::is_same::value || sizeof(CharT) == 2 || sizeof(CharT) == 4, - "Unsupported wchar_t size != 2/4"); - static constexpr size_t UTF_N = 8 * sizeof(CharT); - - bool load(handle src, bool) { -#if PY_MAJOR_VERSION < 3 - object temp; -#endif - handle load_src = src; - if (!src) { - return false; - } else if (!PyUnicode_Check(load_src.ptr())) { -#if PY_MAJOR_VERSION >= 3 - return load_bytes(load_src); -#else - if (sizeof(CharT) == 1) { - return load_bytes(load_src); - } - - // The below is a guaranteed failure in Python 3 when PyUnicode_Check returns false - if (!PYBIND11_BYTES_CHECK(load_src.ptr())) - return false; - - temp = reinterpret_steal(PyUnicode_FromObject(load_src.ptr())); - if (!temp) { PyErr_Clear(); return false; } - load_src = temp; -#endif - } - - object utfNbytes = reinterpret_steal(PyUnicode_AsEncodedString( - load_src.ptr(), UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr)); - if (!utfNbytes) { PyErr_Clear(); return false; } - - const CharT *buffer = reinterpret_cast(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr())); - size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT); - if (UTF_N > 8) { buffer++; length--; } // Skip BOM for UTF-16/32 - value = StringType(buffer, length); - - // If we're loading a string_view we need to keep the encoded Python object alive: - if (IsView) - loader_life_support::add_patient(utfNbytes); - - return true; - } - - static handle cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) { - const char *buffer = reinterpret_cast(src.data()); - ssize_t nbytes = ssize_t(src.size() * sizeof(CharT)); - handle s = decode_utfN(buffer, nbytes); - if (!s) throw error_already_set(); - return s; - } - - PYBIND11_TYPE_CASTER(StringType, _(PYBIND11_STRING_NAME)); - -private: - static handle decode_utfN(const char *buffer, ssize_t nbytes) { -#if !defined(PYPY_VERSION) - return - UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) : - UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) : - PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr); -#else - // PyPy seems to have multiple problems related to PyUnicode_UTF*: the UTF8 version - // sometimes segfaults for unknown reasons, while the UTF16 and 32 versions require a - // non-const char * arguments, which is also a nuissance, so bypass the whole thing by just - // passing the encoding as a string value, which works properly: - return PyUnicode_Decode(buffer, nbytes, UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr); -#endif - } - - // When loading into a std::string or char*, accept a bytes object as-is (i.e. - // without any encoding/decoding attempt). For other C++ char sizes this is a no-op. - // which supports loading a unicode from a str, doesn't take this path. - template - bool load_bytes(enable_if_t src) { - if (PYBIND11_BYTES_CHECK(src.ptr())) { - // We were passed a Python 3 raw bytes; accept it into a std::string or char* - // without any encoding attempt. - const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr()); - if (bytes) { - value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr())); - return true; - } - } - - return false; - } - - template - bool load_bytes(enable_if_t) { return false; } -}; - -template -struct type_caster, enable_if_t::value>> - : string_caster> {}; - -#ifdef PYBIND11_HAS_STRING_VIEW -template -struct type_caster, enable_if_t::value>> - : string_caster, true> {}; -#endif - -// Type caster for C-style strings. We basically use a std::string type caster, but also add the -// ability to use None as a nullptr char* (which the string caster doesn't allow). -template struct type_caster::value>> { - using StringType = std::basic_string; - using StringCaster = type_caster; - StringCaster str_caster; - bool none = false; -public: - bool load(handle src, bool convert) { - if (!src) return false; - if (src.is_none()) { - // Defer accepting None to other overloads (if we aren't in convert mode): - if (!convert) return false; - none = true; - return true; - } - return str_caster.load(src, convert); - } - - static handle cast(const CharT *src, return_value_policy policy, handle parent) { - if (src == nullptr) return pybind11::none().inc_ref(); - return StringCaster::cast(StringType(src), policy, parent); - } - - static handle cast(CharT src, return_value_policy policy, handle parent) { - if (std::is_same::value) { - handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr); - if (!s) throw error_already_set(); - return s; - } - return StringCaster::cast(StringType(1, src), policy, parent); - } - - operator CharT*() { return none ? nullptr : const_cast(static_cast(str_caster).c_str()); } - operator CharT() { - if (none) - throw value_error("Cannot convert None to a character"); - - auto &value = static_cast(str_caster); - size_t str_len = value.size(); - if (str_len == 0) - throw value_error("Cannot convert empty string to a character"); - - // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that - // is too high, and one for multiple unicode characters (caught later), so we need to figure - // out how long the first encoded character is in bytes to distinguish between these two - // errors. We also allow want to allow unicode characters U+0080 through U+00FF, as those - // can fit into a single char value. - if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) { - unsigned char v0 = static_cast(value[0]); - size_t char0_bytes = !(v0 & 0x80) ? 1 : // low bits only: 0-127 - (v0 & 0xE0) == 0xC0 ? 2 : // 0b110xxxxx - start of 2-byte sequence - (v0 & 0xF0) == 0xE0 ? 3 : // 0b1110xxxx - start of 3-byte sequence - 4; // 0b11110xxx - start of 4-byte sequence - - if (char0_bytes == str_len) { - // If we have a 128-255 value, we can decode it into a single char: - if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx - return static_cast(((v0 & 3) << 6) + (static_cast(value[1]) & 0x3F)); - } - // Otherwise we have a single character, but it's > U+00FF - throw value_error("Character code point not in range(0x100)"); - } - } - - // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a - // surrogate pair with total length 2 instantly indicates a range error (but not a "your - // string was too long" error). - else if (StringCaster::UTF_N == 16 && str_len == 2) { - char16_t v0 = static_cast(value[0]); - if (v0 >= 0xD800 && v0 < 0xE000) - throw value_error("Character code point not in range(0x10000)"); - } - - if (str_len != 1) - throw value_error("Expected a character, but multi-character string found"); - - return value[0]; - } - - static PYBIND11_DESCR name() { return type_descr(_(PYBIND11_STRING_NAME)); } - template using cast_op_type = remove_reference_t>; -}; - -// Base implementation for std::tuple and std::pair -template class Tuple, typename... Ts> class tuple_caster { - using type = Tuple; - static constexpr auto size = sizeof...(Ts); - using indices = make_index_sequence; -public: - - bool load(handle src, bool convert) { - if (!isinstance(src)) - return false; - const auto seq = reinterpret_borrow(src); - if (seq.size() != size) - return false; - return load_impl(seq, convert, indices{}); - } - - template - static handle cast(T &&src, return_value_policy policy, handle parent) { - return cast_impl(std::forward(src), policy, parent, indices{}); - } - - static PYBIND11_DESCR name() { - return type_descr(_("Tuple[") + detail::concat(make_caster::name()...) + _("]")); - } - - template using cast_op_type = type; - - operator type() & { return implicit_cast(indices{}); } - operator type() && { return std::move(*this).implicit_cast(indices{}); } - -protected: - template - type implicit_cast(index_sequence) & { return type(cast_op(std::get(subcasters))...); } - template - type implicit_cast(index_sequence) && { return type(cast_op(std::move(std::get(subcasters)))...); } - - static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; } - - template - bool load_impl(const sequence &seq, bool convert, index_sequence) { - for (bool r : {std::get(subcasters).load(seq[Is], convert)...}) - if (!r) - return false; - return true; - } - - /* Implementation: Convert a C++ tuple into a Python tuple */ - template - static handle cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence) { - std::array entries{{ - reinterpret_steal(make_caster::cast(std::get(std::forward(src)), policy, parent))... - }}; - for (const auto &entry: entries) - if (!entry) - return handle(); - tuple result(size); - int counter = 0; - for (auto & entry: entries) - PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr()); - return result.release(); - } - - Tuple...> subcasters; -}; - -template class type_caster> - : public tuple_caster {}; - -template class type_caster> - : public tuple_caster {}; - -/// Helper class which abstracts away certain actions. Users can provide specializations for -/// custom holders, but it's only necessary if the type has a non-standard interface. -template -struct holder_helper { - static auto get(const T &p) -> decltype(p.get()) { return p.get(); } -}; - -/// Type caster for holder types like std::shared_ptr, etc. -template -struct copyable_holder_caster : public type_caster_base { -public: - using base = type_caster_base; - static_assert(std::is_base_of>::value, - "Holder classes are only supported for custom types"); - using base::base; - using base::cast; - using base::typeinfo; - using base::value; - - bool load(handle src, bool convert) { - return base::template load_impl>(src, convert); - } - - explicit operator type*() { return this->value; } - explicit operator type&() { return *(this->value); } - explicit operator holder_type*() { return &holder; } - - // Workaround for Intel compiler bug - // see pybind11 issue 94 - #if defined(__ICC) || defined(__INTEL_COMPILER) - operator holder_type&() { return holder; } - #else - explicit operator holder_type&() { return holder; } - #endif - - static handle cast(const holder_type &src, return_value_policy, handle) { - const auto *ptr = holder_helper::get(src); - return type_caster_base::cast_holder(ptr, &src); - } - -protected: - friend class type_caster_generic; - void check_holder_compat() { - if (typeinfo->default_holder) - throw cast_error("Unable to load a custom holder type from a default-holder instance"); - } - - bool load_value(const value_and_holder &v_h) { - if (v_h.holder_constructed()) { - value = v_h.value_ptr(); - holder = v_h.holder(); - return true; - } else { - throw cast_error("Unable to cast from non-held to held instance (T& to Holder) " -#if defined(NDEBUG) - "(compile in debug mode for type information)"); -#else - "of type '" + type_id() + "''"); -#endif - } - } - - template ::value, int> = 0> - bool try_implicit_casts(handle, bool) { return false; } - - template ::value, int> = 0> - bool try_implicit_casts(handle src, bool convert) { - for (auto &cast : typeinfo->implicit_casts) { - copyable_holder_caster sub_caster(*cast.first); - if (sub_caster.load(src, convert)) { - value = cast.second(sub_caster.value); - holder = holder_type(sub_caster.holder, (type *) value); - return true; - } - } - return false; - } - - static bool try_direct_conversions(handle) { return false; } - - - holder_type holder; -}; - -/// Specialize for the common std::shared_ptr, so users don't need to -template -class type_caster> : public copyable_holder_caster> { }; - -template -struct move_only_holder_caster { - static_assert(std::is_base_of, type_caster>::value, - "Holder classes are only supported for custom types"); - - static handle cast(holder_type &&src, return_value_policy, handle) { - auto *ptr = holder_helper::get(src); - return type_caster_base::cast_holder(ptr, &src); - } - static PYBIND11_DESCR name() { return type_caster_base::name(); } -}; - -template -class type_caster> - : public move_only_holder_caster> { }; - -template -using type_caster_holder = conditional_t::value, - copyable_holder_caster, - move_only_holder_caster>; - -template struct always_construct_holder { static constexpr bool value = Value; }; - -/// Create a specialization for custom holder types (silently ignores std::shared_ptr) -#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \ - namespace pybind11 { namespace detail { \ - template \ - struct always_construct_holder : always_construct_holder { }; \ - template \ - class type_caster::value>> \ - : public type_caster_holder { }; \ - }} - -// PYBIND11_DECLARE_HOLDER_TYPE holder types: -template struct is_holder_type : - std::is_base_of, detail::type_caster> {}; -// Specialization for always-supported unique_ptr holders: -template struct is_holder_type> : - std::true_type {}; - -template struct handle_type_name { static PYBIND11_DESCR name() { return _(); } }; -template <> struct handle_type_name { static PYBIND11_DESCR name() { return _(PYBIND11_BYTES_NAME); } }; -template <> struct handle_type_name { static PYBIND11_DESCR name() { return _("*args"); } }; -template <> struct handle_type_name { static PYBIND11_DESCR name() { return _("**kwargs"); } }; - -template -struct pyobject_caster { - template ::value, int> = 0> - bool load(handle src, bool /* convert */) { value = src; return static_cast(value); } - - template ::value, int> = 0> - bool load(handle src, bool /* convert */) { - if (!isinstance(src)) - return false; - value = reinterpret_borrow(src); - return true; - } - - static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) { - return src.inc_ref(); - } - PYBIND11_TYPE_CASTER(type, handle_type_name::name()); -}; - -template -class type_caster::value>> : public pyobject_caster { }; - -// Our conditions for enabling moving are quite restrictive: -// At compile time: -// - T needs to be a non-const, non-pointer, non-reference type -// - type_caster::operator T&() must exist -// - the type must be move constructible (obviously) -// At run-time: -// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it -// must have ref_count() == 1)h -// If any of the above are not satisfied, we fall back to copying. -template using move_is_plain_type = satisfies_none_of; -template struct move_always : std::false_type {}; -template struct move_always, - negation>, - std::is_move_constructible, - std::is_same>().operator T&()), T&> ->::value>> : std::true_type {}; -template struct move_if_unreferenced : std::false_type {}; -template struct move_if_unreferenced, - negation>, - std::is_move_constructible, - std::is_same>().operator T&()), T&> ->::value>> : std::true_type {}; -template using move_never = none_of, move_if_unreferenced>; - -// Detect whether returning a `type` from a cast on type's type_caster is going to result in a -// reference or pointer to a local variable of the type_caster. Basically, only -// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe; -// everything else returns a reference/pointer to a local variable. -template using cast_is_temporary_value_reference = bool_constant< - (std::is_reference::value || std::is_pointer::value) && - !std::is_base_of>::value ->; - -// When a value returned from a C++ function is being cast back to Python, we almost always want to -// force `policy = move`, regardless of the return value policy the function/method was declared -// with. Some classes (most notably Eigen::Ref and related) need to avoid this, and so can do so by -// specializing this struct. -template struct return_value_policy_override { - static return_value_policy policy(return_value_policy p) { - return !std::is_lvalue_reference::value && !std::is_pointer::value - ? return_value_policy::move : p; - } -}; - -// Basic python -> C++ casting; throws if casting fails -template type_caster &load_type(type_caster &conv, const handle &handle) { - if (!conv.load(handle, true)) { -#if defined(NDEBUG) - throw cast_error("Unable to cast Python instance to C++ type (compile in debug mode for details)"); -#else - throw cast_error("Unable to cast Python instance of type " + - (std::string) str(handle.get_type()) + " to C++ type '" + type_id() + "''"); -#endif - } - return conv; -} -// Wrapper around the above that also constructs and returns a type_caster -template make_caster load_type(const handle &handle) { - make_caster conv; - load_type(conv, handle); - return conv; -} - -NAMESPACE_END(detail) - -// pytype -> C++ type -template ::value, int> = 0> -T cast(const handle &handle) { - using namespace detail; - static_assert(!cast_is_temporary_value_reference::value, - "Unable to cast type to reference: value is local to type caster"); - return cast_op(load_type(handle)); -} - -// pytype -> pytype (calls converting constructor) -template ::value, int> = 0> -T cast(const handle &handle) { return T(reinterpret_borrow(handle)); } - -// C++ type -> py::object -template ::value, int> = 0> -object cast(const T &value, return_value_policy policy = return_value_policy::automatic_reference, - handle parent = handle()) { - if (policy == return_value_policy::automatic) - policy = std::is_pointer::value ? return_value_policy::take_ownership : return_value_policy::copy; - else if (policy == return_value_policy::automatic_reference) - policy = std::is_pointer::value ? return_value_policy::reference : return_value_policy::copy; - return reinterpret_steal(detail::make_caster::cast(value, policy, parent)); -} - -template T handle::cast() const { return pybind11::cast(*this); } -template <> inline void handle::cast() const { return; } - -template -detail::enable_if_t::value, T> move(object &&obj) { - if (obj.ref_count() > 1) -#if defined(NDEBUG) - throw cast_error("Unable to cast Python instance to C++ rvalue: instance has multiple references" - " (compile in debug mode for details)"); -#else - throw cast_error("Unable to move from Python " + (std::string) str(obj.get_type()) + - " instance to C++ " + type_id() + " instance: instance has multiple references"); -#endif - - // Move into a temporary and return that, because the reference may be a local value of `conv` - T ret = std::move(detail::load_type(obj).operator T&()); - return ret; -} - -// Calling cast() on an rvalue calls pybind::cast with the object rvalue, which does: -// - If we have to move (because T has no copy constructor), do it. This will fail if the moved -// object has multiple references, but trying to copy will fail to compile. -// - If both movable and copyable, check ref count: if 1, move; otherwise copy -// - Otherwise (not movable), copy. -template detail::enable_if_t::value, T> cast(object &&object) { - return move(std::move(object)); -} -template detail::enable_if_t::value, T> cast(object &&object) { - if (object.ref_count() > 1) - return cast(object); - else - return move(std::move(object)); -} -template detail::enable_if_t::value, T> cast(object &&object) { - return cast(object); -} - -template T object::cast() const & { return pybind11::cast(*this); } -template T object::cast() && { return pybind11::cast(std::move(*this)); } -template <> inline void object::cast() const & { return; } -template <> inline void object::cast() && { return; } - -NAMESPACE_BEGIN(detail) - -// Declared in pytypes.h: -template ::value, int>> -object object_or_cast(T &&o) { return pybind11::cast(std::forward(o)); } - -struct overload_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the OVERLOAD_INT macro -template using overload_caster_t = conditional_t< - cast_is_temporary_value_reference::value, make_caster, overload_unused>; - -// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then -// store the result in the given variable. For other types, this is a no-op. -template enable_if_t::value, T> cast_ref(object &&o, make_caster &caster) { - return cast_op(load_type(caster, o)); -} -template enable_if_t::value, T> cast_ref(object &&, overload_unused &) { - pybind11_fail("Internal error: cast_ref fallback invoked"); } - -// Trampoline use: Having a pybind11::cast with an invalid reference type is going to static_assert, even -// though if it's in dead code, so we provide a "trampoline" to pybind11::cast that only does anything in -// cases where pybind11::cast is valid. -template enable_if_t::value, T> cast_safe(object &&o) { - return pybind11::cast(std::move(o)); } -template enable_if_t::value, T> cast_safe(object &&) { - pybind11_fail("Internal error: cast_safe fallback invoked"); } -template <> inline void cast_safe(object &&) {} - -NAMESPACE_END(detail) - -template tuple make_tuple(Args&&... args_) { - constexpr size_t size = sizeof...(Args); - std::array args { - { reinterpret_steal(detail::make_caster::cast( - std::forward(args_), policy, nullptr))... } - }; - for (size_t i = 0; i < args.size(); i++) { - if (!args[i]) { -#if defined(NDEBUG) - throw cast_error("make_tuple(): unable to convert arguments to Python object (compile in debug mode for details)"); -#else - std::array argtypes { {type_id()...} }; - throw cast_error("make_tuple(): unable to convert argument of type '" + - argtypes[i] + "' to Python object"); -#endif - } - } - tuple result(size); - int counter = 0; - for (auto &arg_value : args) - PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr()); - return result; -} - -/// \ingroup annotations -/// Annotation for arguments -struct arg { - /// Constructs an argument with the name of the argument; if null or omitted, this is a positional argument. - constexpr explicit arg(const char *name = nullptr) : name(name), flag_noconvert(false), flag_none(true) { } - /// Assign a value to this argument - template arg_v operator=(T &&value) const; - /// Indicate that the type should not be converted in the type caster - arg &noconvert(bool flag = true) { flag_noconvert = flag; return *this; } - /// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args) - arg &none(bool flag = true) { flag_none = flag; return *this; } - - const char *name; ///< If non-null, this is a named kwargs argument - bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type caster!) - bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument -}; - -/// \ingroup annotations -/// Annotation for arguments with values -struct arg_v : arg { -private: - template - arg_v(arg &&base, T &&x, const char *descr = nullptr) - : arg(base), - value(reinterpret_steal( - detail::make_caster::cast(x, return_value_policy::automatic, {}) - )), - descr(descr) -#if !defined(NDEBUG) - , type(type_id()) -#endif - { } - -public: - /// Direct construction with name, default, and description - template - arg_v(const char *name, T &&x, const char *descr = nullptr) - : arg_v(arg(name), std::forward(x), descr) { } - - /// Called internally when invoking `py::arg("a") = value` - template - arg_v(const arg &base, T &&x, const char *descr = nullptr) - : arg_v(arg(base), std::forward(x), descr) { } - - /// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg& - arg_v &noconvert(bool flag = true) { arg::noconvert(flag); return *this; } - - /// Same as `arg::nonone()`, but returns *this as arg_v&, not arg& - arg_v &none(bool flag = true) { arg::none(flag); return *this; } - - /// The default value - object value; - /// The (optional) description of the default value - const char *descr; -#if !defined(NDEBUG) - /// The C++ type name of the default value (only available when compiled in debug mode) - std::string type; -#endif -}; - -template -arg_v arg::operator=(T &&value) const { return {std::move(*this), std::forward(value)}; } - -/// Alias for backward compatibility -- to be removed in version 2.0 -template using arg_t = arg_v; - -inline namespace literals { -/** \rst - String literal version of `arg` - \endrst */ -constexpr arg operator"" _a(const char *name, size_t) { return arg(name); } -} - -NAMESPACE_BEGIN(detail) - -// forward declaration (definition in attr.h) -struct function_record; - -/// Internal data associated with a single function call -struct function_call { - function_call(function_record &f, handle p); // Implementation in attr.h - - /// The function data: - const function_record &func; - - /// Arguments passed to the function: - std::vector args; - - /// The `convert` value the arguments should be loaded with - std::vector args_convert; - - /// The parent, if any - handle parent; -}; - - -/// Helper class which loads arguments for C++ functions called from Python -template -class argument_loader { - using indices = make_index_sequence; - - template using argument_is_args = std::is_same, args>; - template using argument_is_kwargs = std::is_same, kwargs>; - // Get args/kwargs argument positions relative to the end of the argument list: - static constexpr auto args_pos = constexpr_first() - (int) sizeof...(Args), - kwargs_pos = constexpr_first() - (int) sizeof...(Args); - - static constexpr bool args_kwargs_are_last = kwargs_pos >= - 1 && args_pos >= kwargs_pos - 1; - - static_assert(args_kwargs_are_last, "py::args/py::kwargs are only permitted as the last argument(s) of a function"); - -public: - static constexpr bool has_kwargs = kwargs_pos < 0; - static constexpr bool has_args = args_pos < 0; - - static PYBIND11_DESCR arg_names() { return detail::concat(make_caster::name()...); } - - bool load_args(function_call &call) { - return load_impl_sequence(call, indices{}); - } - - template - enable_if_t::value, Return> call(Func &&f) && { - return std::move(*this).template call_impl(std::forward(f), indices{}, Guard{}); - } - - template - enable_if_t::value, void_type> call(Func &&f) && { - std::move(*this).template call_impl(std::forward(f), indices{}, Guard{}); - return void_type(); - } - -private: - - static bool load_impl_sequence(function_call &, index_sequence<>) { return true; } - - template - bool load_impl_sequence(function_call &call, index_sequence) { - for (bool r : {std::get(argcasters).load(call.args[Is], call.args_convert[Is])...}) - if (!r) - return false; - return true; - } - - template - Return call_impl(Func &&f, index_sequence, Guard &&) { - return std::forward(f)(cast_op(std::move(std::get(argcasters)))...); - } - - std::tuple...> argcasters; -}; - -/// Helper class which collects only positional arguments for a Python function call. -/// A fancier version below can collect any argument, but this one is optimal for simple calls. -template -class simple_collector { -public: - template - explicit simple_collector(Ts &&...values) - : m_args(pybind11::make_tuple(std::forward(values)...)) { } - - const tuple &args() const & { return m_args; } - dict kwargs() const { return {}; } - - tuple args() && { return std::move(m_args); } - - /// Call a Python function and pass the collected arguments - object call(PyObject *ptr) const { - PyObject *result = PyObject_CallObject(ptr, m_args.ptr()); - if (!result) - throw error_already_set(); - return reinterpret_steal(result); - } - -private: - tuple m_args; -}; - -/// Helper class which collects positional, keyword, * and ** arguments for a Python function call -template -class unpacking_collector { -public: - template - explicit unpacking_collector(Ts &&...values) { - // Tuples aren't (easily) resizable so a list is needed for collection, - // but the actual function call strictly requires a tuple. - auto args_list = list(); - int _[] = { 0, (process(args_list, std::forward(values)), 0)... }; - ignore_unused(_); - - m_args = std::move(args_list); - } - - const tuple &args() const & { return m_args; } - const dict &kwargs() const & { return m_kwargs; } - - tuple args() && { return std::move(m_args); } - dict kwargs() && { return std::move(m_kwargs); } - - /// Call a Python function and pass the collected arguments - object call(PyObject *ptr) const { - PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr()); - if (!result) - throw error_already_set(); - return reinterpret_steal(result); - } - -private: - template - void process(list &args_list, T &&x) { - auto o = reinterpret_steal(detail::make_caster::cast(std::forward(x), policy, {})); - if (!o) { -#if defined(NDEBUG) - argument_cast_error(); -#else - argument_cast_error(std::to_string(args_list.size()), type_id()); -#endif - } - args_list.append(o); - } - - void process(list &args_list, detail::args_proxy ap) { - for (const auto &a : ap) - args_list.append(a); - } - - void process(list &/*args_list*/, arg_v a) { - if (!a.name) -#if defined(NDEBUG) - nameless_argument_error(); -#else - nameless_argument_error(a.type); -#endif - - if (m_kwargs.contains(a.name)) { -#if defined(NDEBUG) - multiple_values_error(); -#else - multiple_values_error(a.name); -#endif - } - if (!a.value) { -#if defined(NDEBUG) - argument_cast_error(); -#else - argument_cast_error(a.name, a.type); -#endif - } - m_kwargs[a.name] = a.value; - } - - void process(list &/*args_list*/, detail::kwargs_proxy kp) { - if (!kp) - return; - for (const auto &k : reinterpret_borrow(kp)) { - if (m_kwargs.contains(k.first)) { -#if defined(NDEBUG) - multiple_values_error(); -#else - multiple_values_error(str(k.first)); -#endif - } - m_kwargs[k.first] = k.second; - } - } - - [[noreturn]] static void nameless_argument_error() { - throw type_error("Got kwargs without a name; only named arguments " - "may be passed via py::arg() to a python function call. " - "(compile in debug mode for details)"); - } - [[noreturn]] static void nameless_argument_error(std::string type) { - throw type_error("Got kwargs without a name of type '" + type + "'; only named " - "arguments may be passed via py::arg() to a python function call. "); - } - [[noreturn]] static void multiple_values_error() { - throw type_error("Got multiple values for keyword argument " - "(compile in debug mode for details)"); - } - - [[noreturn]] static void multiple_values_error(std::string name) { - throw type_error("Got multiple values for keyword argument '" + name + "'"); - } - - [[noreturn]] static void argument_cast_error() { - throw cast_error("Unable to convert call argument to Python object " - "(compile in debug mode for details)"); - } - - [[noreturn]] static void argument_cast_error(std::string name, std::string type) { - throw cast_error("Unable to convert call argument '" + name - + "' of type '" + type + "' to Python object"); - } - -private: - tuple m_args; - dict m_kwargs; -}; - -/// Collect only positional arguments for a Python function call -template ...>::value>> -simple_collector collect_arguments(Args &&...args) { - return simple_collector(std::forward(args)...); -} - -/// Collect all arguments, including keywords and unpacking (only instantiated when needed) -template ...>::value>> -unpacking_collector collect_arguments(Args &&...args) { - // Following argument order rules for generalized unpacking according to PEP 448 - static_assert( - constexpr_last() < constexpr_first() - && constexpr_last() < constexpr_first(), - "Invalid function call: positional args must precede keywords and ** unpacking; " - "* unpacking must precede ** unpacking" - ); - return unpacking_collector(std::forward(args)...); -} - -template -template -object object_api::operator()(Args &&...args) const { - return detail::collect_arguments(std::forward(args)...).call(derived().ptr()); -} - -template -template -object object_api::call(Args &&...args) const { - return operator()(std::forward(args)...); -} - -NAMESPACE_END(detail) - -#define PYBIND11_MAKE_OPAQUE(Type) \ - namespace pybind11 { namespace detail { \ - template<> class type_caster : public type_caster_base { }; \ - }} - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/chrono.h b/lanms/include/pybind11/chrono.h deleted file mode 100644 index 8a41d08b..00000000 --- a/lanms/include/pybind11/chrono.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime - - Copyright (c) 2016 Trent Houliston and - Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include -#include -#include -#include - -// Backport the PyDateTime_DELTA functions from Python3.3 if required -#ifndef PyDateTime_DELTA_GET_DAYS -#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) -#endif -#ifndef PyDateTime_DELTA_GET_SECONDS -#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) -#endif -#ifndef PyDateTime_DELTA_GET_MICROSECONDS -#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) -#endif - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -template class duration_caster { -public: - typedef typename type::rep rep; - typedef typename type::period period; - - typedef std::chrono::duration> days; - - bool load(handle src, bool) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - if (!src) return false; - // If invoked with datetime.delta object - if (PyDelta_Check(src.ptr())) { - value = type(duration_cast>( - days(PyDateTime_DELTA_GET_DAYS(src.ptr())) - + seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr())) - + microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr())))); - return true; - } - // If invoked with a float we assume it is seconds and convert - else if (PyFloat_Check(src.ptr())) { - value = type(duration_cast>(duration(PyFloat_AsDouble(src.ptr())))); - return true; - } - else return false; - } - - // If this is a duration just return it back - static const std::chrono::duration& get_duration(const std::chrono::duration &src) { - return src; - } - - // If this is a time_point get the time_since_epoch - template static std::chrono::duration get_duration(const std::chrono::time_point> &src) { - return src.time_since_epoch(); - } - - static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) { - using namespace std::chrono; - - // Use overloaded function to get our duration from our source - // Works out if it is a duration or time_point and get the duration - auto d = get_duration(src); - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - // Declare these special duration types so the conversions happen with the correct primitive types (int) - using dd_t = duration>; - using ss_t = duration>; - using us_t = duration; - - auto dd = duration_cast(d); - auto subd = d - dd; - auto ss = duration_cast(subd); - auto us = duration_cast(subd - ss); - return PyDelta_FromDSU(dd.count(), ss.count(), us.count()); - } - - PYBIND11_TYPE_CASTER(type, _("datetime.timedelta")); -}; - -// This is for casting times on the system clock into datetime.datetime instances -template class type_caster> { -public: - typedef std::chrono::time_point type; - bool load(handle src, bool) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - if (!src) return false; - if (PyDateTime_Check(src.ptr())) { - std::tm cal; - cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr()); - cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr()); - cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr()); - cal.tm_mday = PyDateTime_GET_DAY(src.ptr()); - cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1; - cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900; - cal.tm_isdst = -1; - - value = system_clock::from_time_t(std::mktime(&cal)) + microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr())); - return true; - } - else return false; - } - - static handle cast(const std::chrono::time_point &src, return_value_policy /* policy */, handle /* parent */) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - std::time_t tt = system_clock::to_time_t(src); - // this function uses static memory so it's best to copy it out asap just in case - // otherwise other code that is using localtime may break this (not just python code) - std::tm localtime = *std::localtime(&tt); - - // Declare these special duration types so the conversions happen with the correct primitive types (int) - using us_t = duration; - - return PyDateTime_FromDateAndTime(localtime.tm_year + 1900, - localtime.tm_mon + 1, - localtime.tm_mday, - localtime.tm_hour, - localtime.tm_min, - localtime.tm_sec, - (duration_cast(src.time_since_epoch() % seconds(1))).count()); - } - PYBIND11_TYPE_CASTER(type, _("datetime.datetime")); -}; - -// Other clocks that are not the system clock are not measured as datetime.datetime objects -// since they are not measured on calendar time. So instead we just make them timedeltas -// Or if they have passed us a time as a float we convert that -template class type_caster> -: public duration_caster> { -}; - -template class type_caster> -: public duration_caster> { -}; - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/class_support.h b/lanms/include/pybind11/class_support.h deleted file mode 100644 index 8e18c4c6..00000000 --- a/lanms/include/pybind11/class_support.h +++ /dev/null @@ -1,603 +0,0 @@ -/* - pybind11/class_support.h: Python C API implementation details for py::class_ - - Copyright (c) 2017 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "attr.h" - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -inline PyTypeObject *type_incref(PyTypeObject *type) { - Py_INCREF(type); - return type; -} - -#if !defined(PYPY_VERSION) - -/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance. -extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) { - return PyProperty_Type.tp_descr_get(self, cls, cls); -} - -/// `pybind11_static_property.__set__()`: Just like the above `__get__()`. -extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) { - PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj); - return PyProperty_Type.tp_descr_set(self, cls, value); -} - -/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()` - methods are modified to always use the object type instead of a concrete instance. - Return value: New reference. */ -inline PyTypeObject *make_static_property_type() { - constexpr auto *name = "pybind11_static_property"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); - if (!heap_type) - pybind11_fail("make_static_property_type(): error allocating type!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyProperty_Type); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - type->tp_descr_get = pybind11_static_get; - type->tp_descr_set = pybind11_static_set; - - if (PyType_Ready(type) < 0) - pybind11_fail("make_static_property_type(): failure in PyType_Ready()!"); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - - return type; -} - -#else // PYPY - -/** PyPy has some issues with the above C API, so we evaluate Python code instead. - This function will only be called once so performance isn't really a concern. - Return value: New reference. */ -inline PyTypeObject *make_static_property_type() { - auto d = dict(); - PyObject *result = PyRun_String(R"(\ - class pybind11_static_property(property): - def __get__(self, obj, cls): - return property.__get__(self, cls, cls) - - def __set__(self, obj, value): - cls = obj if isinstance(obj, type) else type(obj) - property.__set__(self, cls, value) - )", Py_file_input, d.ptr(), d.ptr() - ); - if (result == nullptr) - throw error_already_set(); - Py_DECREF(result); - return (PyTypeObject *) d["pybind11_static_property"].cast().release().ptr(); -} - -#endif // PYPY - -/** Types with static properties need to handle `Type.static_prop = x` in a specific way. - By default, Python replaces the `static_property` itself, but for wrapped C++ types - we need to call `static_property.__set__()` in order to propagate the new value to - the underlying C++ data structure. */ -extern "C" inline int pybind11_meta_setattro(PyObject* obj, PyObject* name, PyObject* value) { - // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw - // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`). - PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); - - // The following assignment combinations are possible: - // 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)` - // 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop` - // 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment - const auto static_prop = (PyObject *) get_internals().static_property_type; - const auto call_descr_set = descr && PyObject_IsInstance(descr, static_prop) - && !PyObject_IsInstance(value, static_prop); - if (call_descr_set) { - // Call `static_property.__set__()` instead of replacing the `static_property`. -#if !defined(PYPY_VERSION) - return Py_TYPE(descr)->tp_descr_set(descr, obj, value); -#else - if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) { - Py_DECREF(result); - return 0; - } else { - return -1; - } -#endif - } else { - // Replace existing attribute. - return PyType_Type.tp_setattro(obj, name, value); - } -} - -#if PY_MAJOR_VERSION >= 3 -/** - * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing - * methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function, - * when called on a class, or a PyMethod, when called on an instance. Override that behaviour here - * to do a special case bypass for PyInstanceMethod_Types. - */ -extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) { - PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); - if (descr && PyInstanceMethod_Check(descr)) { - Py_INCREF(descr); - return descr; - } - else { - return PyType_Type.tp_getattro(obj, name); - } -} -#endif - -/** This metaclass is assigned by default to all pybind11 types and is required in order - for static properties to function correctly. Users may override this using `py::metaclass`. - Return value: New reference. */ -inline PyTypeObject* make_default_metaclass() { - constexpr auto *name = "pybind11_type"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); - if (!heap_type) - pybind11_fail("make_default_metaclass(): error allocating metaclass!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyType_Type); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - - type->tp_setattro = pybind11_meta_setattro; -#if PY_MAJOR_VERSION >= 3 - type->tp_getattro = pybind11_meta_getattro; -#endif - - if (PyType_Ready(type) < 0) - pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!"); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - - return type; -} - -/// For multiple inheritance types we need to recursively register/deregister base pointers for any -/// base classes with pointers that are difference from the instance value pointer so that we can -/// correctly recognize an offset base class pointer. This calls a function with any offset base ptrs. -inline void traverse_offset_bases(void *valueptr, const detail::type_info *tinfo, instance *self, - bool (*f)(void * /*parentptr*/, instance * /*self*/)) { - for (handle h : reinterpret_borrow(tinfo->type->tp_bases)) { - if (auto parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) { - for (auto &c : parent_tinfo->implicit_casts) { - if (c.first == tinfo->cpptype) { - auto *parentptr = c.second(valueptr); - if (parentptr != valueptr) - f(parentptr, self); - traverse_offset_bases(parentptr, parent_tinfo, self, f); - break; - } - } - } - } -} - -inline bool register_instance_impl(void *ptr, instance *self) { - get_internals().registered_instances.emplace(ptr, self); - return true; // unused, but gives the same signature as the deregister func -} -inline bool deregister_instance_impl(void *ptr, instance *self) { - auto ®istered_instances = get_internals().registered_instances; - auto range = registered_instances.equal_range(ptr); - for (auto it = range.first; it != range.second; ++it) { - if (Py_TYPE(self) == Py_TYPE(it->second)) { - registered_instances.erase(it); - return true; - } - } - return false; -} - -inline void register_instance(instance *self, void *valptr, const type_info *tinfo) { - register_instance_impl(valptr, self); - if (!tinfo->simple_ancestors) - traverse_offset_bases(valptr, tinfo, self, register_instance_impl); -} - -inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) { - bool ret = deregister_instance_impl(valptr, self); - if (!tinfo->simple_ancestors) - traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl); - return ret; -} - -/// Instance creation function for all pybind11 types. It only allocates space for the C++ object -/// (or multiple objects, for Python-side inheritance from multiple pybind11 types), but doesn't -/// call the constructor -- an `__init__` function must do that (followed by an `init_instance` -/// to set up the holder and register the instance). -inline PyObject *make_new_instance(PyTypeObject *type, bool allocate_value /*= true (in cast.h)*/) { -#if defined(PYPY_VERSION) - // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first inherited - // object is a a plain Python type (i.e. not derived from an extension type). Fix it. - ssize_t instance_size = static_cast(sizeof(instance)); - if (type->tp_basicsize < instance_size) { - type->tp_basicsize = instance_size; - } -#endif - PyObject *self = type->tp_alloc(type, 0); - auto inst = reinterpret_cast(self); - // Allocate the value/holder internals: - inst->allocate_layout(); - - inst->owned = true; - // Allocate (if requested) the value pointers; otherwise leave them as nullptr - if (allocate_value) { - for (auto &v_h : values_and_holders(inst)) { - void *&vptr = v_h.value_ptr(); - vptr = v_h.type->operator_new(v_h.type->type_size); - } - } - - return self; -} - -/// Instance creation function for all pybind11 types. It only allocates space for the -/// C++ object, but doesn't call the constructor -- an `__init__` function must do that. -extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) { - return make_new_instance(type); -} - -/// An `__init__` function constructs the C++ object. Users should provide at least one -/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the -/// following default function will be used which simply throws an exception. -extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) { - PyTypeObject *type = Py_TYPE(self); - std::string msg; -#if defined(PYPY_VERSION) - msg += handle((PyObject *) type).attr("__module__").cast() + "."; -#endif - msg += type->tp_name; - msg += ": No constructor defined!"; - PyErr_SetString(PyExc_TypeError, msg.c_str()); - return -1; -} - -inline void add_patient(PyObject *nurse, PyObject *patient) { - auto &internals = get_internals(); - auto instance = reinterpret_cast(nurse); - instance->has_patients = true; - Py_INCREF(patient); - internals.patients[nurse].push_back(patient); -} - -inline void clear_patients(PyObject *self) { - auto instance = reinterpret_cast(self); - auto &internals = get_internals(); - auto pos = internals.patients.find(self); - assert(pos != internals.patients.end()); - // Clearing the patients can cause more Python code to run, which - // can invalidate the iterator. Extract the vector of patients - // from the unordered_map first. - auto patients = std::move(pos->second); - internals.patients.erase(pos); - instance->has_patients = false; - for (PyObject *&patient : patients) - Py_CLEAR(patient); -} - -/// Clears all internal data from the instance and removes it from registered instances in -/// preparation for deallocation. -inline void clear_instance(PyObject *self) { - auto instance = reinterpret_cast(self); - - // Deallocate any values/holders, if present: - for (auto &v_h : values_and_holders(instance)) { - if (v_h) { - - // We have to deregister before we call dealloc because, for virtual MI types, we still - // need to be able to get the parent pointers. - if (v_h.instance_registered() && !deregister_instance(instance, v_h.value_ptr(), v_h.type)) - pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!"); - - if (instance->owned || v_h.holder_constructed()) - v_h.type->dealloc(v_h); - } - } - // Deallocate the value/holder layout internals: - instance->deallocate_layout(); - - if (instance->weakrefs) - PyObject_ClearWeakRefs(self); - - PyObject **dict_ptr = _PyObject_GetDictPtr(self); - if (dict_ptr) - Py_CLEAR(*dict_ptr); - - if (instance->has_patients) - clear_patients(self); -} - -/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc` -/// to destroy the C++ object itself, while the rest is Python bookkeeping. -extern "C" inline void pybind11_object_dealloc(PyObject *self) { - clear_instance(self); - Py_TYPE(self)->tp_free(self); -} - -/** Create the type which can be used as a common base for all classes. This is - needed in order to satisfy Python's requirements for multiple inheritance. - Return value: New reference. */ -inline PyObject *make_object_base_type(PyTypeObject *metaclass) { - constexpr auto *name = "pybind11_object"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); - if (!heap_type) - pybind11_fail("make_object_base_type(): error allocating type!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyBaseObject_Type); - type->tp_basicsize = static_cast(sizeof(instance)); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - - type->tp_new = pybind11_object_new; - type->tp_init = pybind11_object_init; - type->tp_dealloc = pybind11_object_dealloc; - - /* Support weak references (needed for the keep_alive feature) */ - type->tp_weaklistoffset = offsetof(instance, weakrefs); - - if (PyType_Ready(type) < 0) - pybind11_fail("PyType_Ready failed in make_object_base_type():" + error_string()); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - - assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); - return (PyObject *) heap_type; -} - -/// dynamic_attr: Support for `d = instance.__dict__`. -extern "C" inline PyObject *pybind11_get_dict(PyObject *self, void *) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - if (!dict) - dict = PyDict_New(); - Py_XINCREF(dict); - return dict; -} - -/// dynamic_attr: Support for `instance.__dict__ = dict()`. -extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) { - if (!PyDict_Check(new_dict)) { - PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, not a '%.200s'", - Py_TYPE(new_dict)->tp_name); - return -1; - } - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_INCREF(new_dict); - Py_CLEAR(dict); - dict = new_dict; - return 0; -} - -/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`. -extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_VISIT(dict); - return 0; -} - -/// dynamic_attr: Allow the GC to clear the dictionary. -extern "C" inline int pybind11_clear(PyObject *self) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_CLEAR(dict); - return 0; -} - -/// Give instances of this type a `__dict__` and opt into garbage collection. -inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) { - auto type = &heap_type->ht_type; -#if defined(PYPY_VERSION) - pybind11_fail(std::string(type->tp_name) + ": dynamic attributes are " - "currently not supported in " - "conjunction with PyPy!"); -#endif - type->tp_flags |= Py_TPFLAGS_HAVE_GC; - type->tp_dictoffset = type->tp_basicsize; // place dict at the end - type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it - type->tp_traverse = pybind11_traverse; - type->tp_clear = pybind11_clear; - - static PyGetSetDef getset[] = { - {const_cast("__dict__"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr}, - {nullptr, nullptr, nullptr, nullptr, nullptr} - }; - type->tp_getset = getset; -} - -/// buffer_protocol: Fill in the view as specified by flags. -extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) { - // Look for a `get_buffer` implementation in this type's info or any bases (following MRO). - type_info *tinfo = nullptr; - for (auto type : reinterpret_borrow(Py_TYPE(obj)->tp_mro)) { - tinfo = get_type_info((PyTypeObject *) type.ptr()); - if (tinfo && tinfo->get_buffer) - break; - } - if (view == nullptr || obj == nullptr || !tinfo || !tinfo->get_buffer) { - if (view) - view->obj = nullptr; - PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error"); - return -1; - } - std::memset(view, 0, sizeof(Py_buffer)); - buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data); - view->obj = obj; - view->ndim = 1; - view->internal = info; - view->buf = info->ptr; - view->itemsize = info->itemsize; - view->len = view->itemsize; - for (auto s : info->shape) - view->len *= s; - if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) - view->format = const_cast(info->format.c_str()); - if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { - view->ndim = (int) info->ndim; - view->strides = &info->strides[0]; - view->shape = &info->shape[0]; - } - Py_INCREF(view->obj); - return 0; -} - -/// buffer_protocol: Release the resources of the buffer. -extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) { - delete (buffer_info *) view->internal; -} - -/// Give this type a buffer interface. -inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) { - heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer; -#if PY_MAJOR_VERSION < 3 - heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER; -#endif - - heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer; - heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer; -} - -/** Create a brand new Python type according to the `type_record` specification. - Return value: New reference. */ -inline PyObject* make_new_python_type(const type_record &rec) { - auto name = reinterpret_steal(PYBIND11_FROM_STRING(rec.name)); - -#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 - auto ht_qualname = name; - if (rec.scope && hasattr(rec.scope, "__qualname__")) { - ht_qualname = reinterpret_steal( - PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr())); - } -#endif - - object module; - if (rec.scope) { - if (hasattr(rec.scope, "__module__")) - module = rec.scope.attr("__module__"); - else if (hasattr(rec.scope, "__name__")) - module = rec.scope.attr("__name__"); - } - -#if !defined(PYPY_VERSION) - const auto full_name = module ? str(module).cast() + "." + rec.name - : std::string(rec.name); -#else - const auto full_name = std::string(rec.name); -#endif - - char *tp_doc = nullptr; - if (rec.doc && options::show_user_defined_docstrings()) { - /* Allocate memory for docstring (using PyObject_MALLOC, since - Python will free this later on) */ - size_t size = strlen(rec.doc) + 1; - tp_doc = (char *) PyObject_MALLOC(size); - memcpy((void *) tp_doc, rec.doc, size); - } - - auto &internals = get_internals(); - auto bases = tuple(rec.bases); - auto base = (bases.size() == 0) ? internals.instance_base - : bases[0].ptr(); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto metaclass = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() - : internals.default_metaclass; - - auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); - if (!heap_type) - pybind11_fail(std::string(rec.name) + ": Unable to create type object!"); - - heap_type->ht_name = name.release().ptr(); -#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 - heap_type->ht_qualname = ht_qualname.release().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = strdup(full_name.c_str()); - type->tp_doc = tp_doc; - type->tp_base = type_incref((PyTypeObject *)base); - type->tp_basicsize = static_cast(sizeof(instance)); - if (bases.size() > 0) - type->tp_bases = bases.release().ptr(); - - /* Don't inherit base __init__ */ - type->tp_init = pybind11_object_init; - - /* Supported protocols */ - type->tp_as_number = &heap_type->as_number; - type->tp_as_sequence = &heap_type->as_sequence; - type->tp_as_mapping = &heap_type->as_mapping; - - /* Flags */ - type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; -#if PY_MAJOR_VERSION < 3 - type->tp_flags |= Py_TPFLAGS_CHECKTYPES; -#endif - - if (rec.dynamic_attr) - enable_dynamic_attributes(heap_type); - - if (rec.buffer_protocol) - enable_buffer_protocol(heap_type); - - if (PyType_Ready(type) < 0) - pybind11_fail(std::string(rec.name) + ": PyType_Ready failed (" + error_string() + ")!"); - - assert(rec.dynamic_attr ? PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) - : !PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); - - /* Register type with the parent scope */ - if (rec.scope) - setattr(rec.scope, rec.name, (PyObject *) type); - - if (module) // Needed by pydoc - setattr((PyObject *) type, "__module__", module); - - return (PyObject *) type; -} - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/common.h b/lanms/include/pybind11/common.h deleted file mode 100644 index 240f6d8e..00000000 --- a/lanms/include/pybind11/common.h +++ /dev/null @@ -1,857 +0,0 @@ -/* - pybind11/common.h -- Basic macros - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#if !defined(NAMESPACE_BEGIN) -# define NAMESPACE_BEGIN(name) namespace name { -#endif -#if !defined(NAMESPACE_END) -# define NAMESPACE_END(name) } -#endif - -#if !defined(_MSC_VER) && !defined(__INTEL_COMPILER) -# if __cplusplus >= 201402L -# define PYBIND11_CPP14 -# if __cplusplus > 201402L /* Temporary: should be updated to >= the final C++17 value once known */ -# define PYBIND11_CPP17 -# endif -# endif -#elif defined(_MSC_VER) -// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented) -# if _MSVC_LANG >= 201402L -# define PYBIND11_CPP14 -# if _MSVC_LANG > 201402L && _MSC_VER >= 1910 -# define PYBIND11_CPP17 -# endif -# endif -#endif - -// Compiler version assertions -#if defined(__INTEL_COMPILER) -# if __INTEL_COMPILER < 1500 -# error pybind11 requires Intel C++ compiler v15 or newer -# endif -#elif defined(__clang__) && !defined(__apple_build_version__) -# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3) -# error pybind11 requires clang 3.3 or newer -# endif -#elif defined(__clang__) -// Apple changes clang version macros to its Xcode version; the first Xcode release based on -// (upstream) clang 3.3 was Xcode 5: -# if __clang_major__ < 5 -# error pybind11 requires Xcode/clang 5.0 or newer -# endif -#elif defined(__GNUG__) -# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) -# error pybind11 requires gcc 4.8 or newer -# endif -#elif defined(_MSC_VER) -// Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features -// (e.g. std::negation) added in 2015u3: -# if _MSC_FULL_VER < 190024210 -# error pybind11 requires MSVC 2015 update 3 or newer -# endif -#endif - -#if !defined(PYBIND11_EXPORT) -# if defined(WIN32) || defined(_WIN32) -# define PYBIND11_EXPORT __declspec(dllexport) -# else -# define PYBIND11_EXPORT __attribute__ ((visibility("default"))) -# endif -#endif - -#if defined(_MSC_VER) -# define PYBIND11_NOINLINE __declspec(noinline) -#else -# define PYBIND11_NOINLINE __attribute__ ((noinline)) -#endif - -#if defined(PYBIND11_CPP14) -# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]] -#else -# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason))) -#endif - -#define PYBIND11_VERSION_MAJOR 2 -#define PYBIND11_VERSION_MINOR 2 -#define PYBIND11_VERSION_PATCH dev0 - -/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode -#if defined(_MSC_VER) -# if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) -# define HAVE_ROUND 1 -# endif -# pragma warning(push) -# pragma warning(disable: 4510 4610 4512 4005) -# if defined(_DEBUG) -# define PYBIND11_DEBUG_MARKER -# undef _DEBUG -# endif -#endif - -#include -#include -#include - -#if defined(_WIN32) && (defined(min) || defined(max)) -# error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows -#endif - -#if defined(isalnum) -# undef isalnum -# undef isalpha -# undef islower -# undef isspace -# undef isupper -# undef tolower -# undef toupper -#endif - -#if defined(_MSC_VER) -# if defined(PYBIND11_DEBUG_MARKER) -# define _DEBUG -# undef PYBIND11_DEBUG_MARKER -# endif -# pragma warning(pop) -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions -#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr) -#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check -#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION -#define PYBIND11_BYTES_CHECK PyBytes_Check -#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString -#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize -#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize -#define PYBIND11_BYTES_AS_STRING PyBytes_AsString -#define PYBIND11_BYTES_SIZE PyBytes_Size -#define PYBIND11_LONG_CHECK(o) PyLong_Check(o) -#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o) -#define PYBIND11_BYTES_NAME "bytes" -#define PYBIND11_STRING_NAME "str" -#define PYBIND11_SLICE_OBJECT PyObject -#define PYBIND11_FROM_STRING PyUnicode_FromString -#define PYBIND11_STR_TYPE ::pybind11::str -#define PYBIND11_BOOL_ATTR "__bool__" -#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool) -#define PYBIND11_PLUGIN_IMPL(name) \ - extern "C" PYBIND11_EXPORT PyObject *PyInit_##name() - -#else -#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_) -#define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check -#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION -#define PYBIND11_BYTES_CHECK PyString_Check -#define PYBIND11_BYTES_FROM_STRING PyString_FromString -#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize -#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize -#define PYBIND11_BYTES_AS_STRING PyString_AsString -#define PYBIND11_BYTES_SIZE PyString_Size -#define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o)) -#define PYBIND11_LONG_AS_LONGLONG(o) (PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o)) -#define PYBIND11_BYTES_NAME "str" -#define PYBIND11_STRING_NAME "unicode" -#define PYBIND11_SLICE_OBJECT PySliceObject -#define PYBIND11_FROM_STRING PyString_FromString -#define PYBIND11_STR_TYPE ::pybind11::bytes -#define PYBIND11_BOOL_ATTR "__nonzero__" -#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero) -#define PYBIND11_PLUGIN_IMPL(name) \ - static PyObject *pybind11_init_wrapper(); \ - extern "C" PYBIND11_EXPORT void init##name() { \ - (void)pybind11_init_wrapper(); \ - } \ - PyObject *pybind11_init_wrapper() -#endif - -#if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200 -extern "C" { - struct _Py_atomic_address { void *value; }; - PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current; -} -#endif - -#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code -#define PYBIND11_STRINGIFY(x) #x -#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x) -#define PYBIND11_INTERNALS_ID "__pybind11_" \ - PYBIND11_TOSTRING(PYBIND11_VERSION_MAJOR) "_" PYBIND11_TOSTRING(PYBIND11_VERSION_MINOR) "__" - -/** \rst - ***Deprecated in favor of PYBIND11_MODULE*** - - This macro creates the entry point that will be invoked when the Python interpreter - imports a plugin library. Please create a `module` in the function body and return - the pointer to its underlying Python object at the end. - - .. code-block:: cpp - - PYBIND11_PLUGIN(example) { - pybind11::module m("example", "pybind11 example plugin"); - /// Set up bindings here - return m.ptr(); - } -\endrst */ -#define PYBIND11_PLUGIN(name) \ - PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \ - static PyObject *pybind11_init(); \ - PYBIND11_PLUGIN_IMPL(name) { \ - int major, minor; \ - if (sscanf(Py_GetVersion(), "%i.%i", &major, &minor) != 2) { \ - PyErr_SetString(PyExc_ImportError, "Can't parse Python version."); \ - return nullptr; \ - } else if (major != PY_MAJOR_VERSION || minor != PY_MINOR_VERSION) { \ - PyErr_Format(PyExc_ImportError, \ - "Python version mismatch: module was compiled for " \ - "version %i.%i, while the interpreter is running " \ - "version %i.%i.", PY_MAJOR_VERSION, PY_MINOR_VERSION, \ - major, minor); \ - return nullptr; \ - } \ - try { \ - return pybind11_init(); \ - } catch (pybind11::error_already_set &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } catch (const std::exception &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } \ - } \ - PyObject *pybind11_init() - -/** \rst - This macro creates the entry point that will be invoked when the Python interpreter - imports an extension module. The module name is given as the fist argument and it - should not be in quotes. The second macro argument defines a variable of type - `py::module` which can be used to initialize the module. - - .. code-block:: cpp - - PYBIND11_MODULE(example, m) { - m.doc() = "pybind11 example module"; - - // Add bindings here - m.def("foo", []() { - return "Hello, World!"; - }); - } -\endrst */ -#define PYBIND11_MODULE(name, variable) \ - static void pybind11_init_##name(pybind11::module &); \ - PYBIND11_PLUGIN_IMPL(name) { \ - int major, minor; \ - if (sscanf(Py_GetVersion(), "%i.%i", &major, &minor) != 2) { \ - PyErr_SetString(PyExc_ImportError, "Can't parse Python version."); \ - return nullptr; \ - } else if (major != PY_MAJOR_VERSION || minor != PY_MINOR_VERSION) { \ - PyErr_Format(PyExc_ImportError, \ - "Python version mismatch: module was compiled for " \ - "version %i.%i, while the interpreter is running " \ - "version %i.%i.", PY_MAJOR_VERSION, PY_MINOR_VERSION, \ - major, minor); \ - return nullptr; \ - } \ - auto m = pybind11::module(#name); \ - try { \ - pybind11_init_##name(m); \ - return m.ptr(); \ - } catch (pybind11::error_already_set &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } catch (const std::exception &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } \ - } \ - void pybind11_init_##name(pybind11::module &variable) - - -NAMESPACE_BEGIN(pybind11) - -using ssize_t = Py_ssize_t; -using size_t = std::size_t; - -/// Approach used to cast a previously unknown C++ instance into a Python object -enum class return_value_policy : uint8_t { - /** This is the default return value policy, which falls back to the policy - return_value_policy::take_ownership when the return value is a pointer. - Otherwise, it uses return_value::move or return_value::copy for rvalue - and lvalue references, respectively. See below for a description of what - all of these different policies do. */ - automatic = 0, - - /** As above, but use policy return_value_policy::reference when the return - value is a pointer. This is the default conversion policy for function - arguments when calling Python functions manually from C++ code (i.e. via - handle::operator()). You probably won't need to use this. */ - automatic_reference, - - /** Reference an existing object (i.e. do not create a new copy) and take - ownership. Python will call the destructor and delete operator when the - object’s reference count reaches zero. Undefined behavior ensues when - the C++ side does the same.. */ - take_ownership, - - /** Create a new copy of the returned object, which will be owned by - Python. This policy is comparably safe because the lifetimes of the two - instances are decoupled. */ - copy, - - /** Use std::move to move the return value contents into a new instance - that will be owned by Python. This policy is comparably safe because the - lifetimes of the two instances (move source and destination) are - decoupled. */ - move, - - /** Reference an existing object, but do not take ownership. The C++ side - is responsible for managing the object’s lifetime and deallocating it - when it is no longer used. Warning: undefined behavior will ensue when - the C++ side deletes an object that is still referenced and used by - Python. */ - reference, - - /** This policy only applies to methods and properties. It references the - object without taking ownership similar to the above - return_value_policy::reference policy. In contrast to that policy, the - function or property’s implicit this argument (called the parent) is - considered to be the the owner of the return value (the child). - pybind11 then couples the lifetime of the parent to the child via a - reference relationship that ensures that the parent cannot be garbage - collected while Python is still using the child. More advanced - variations of this scheme are also possible using combinations of - return_value_policy::reference and the keep_alive call policy */ - reference_internal -}; - -NAMESPACE_BEGIN(detail) - -inline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); } - -// Returns the size as a multiple of sizeof(void *), rounded up. -inline static constexpr size_t size_in_ptrs(size_t s) { return 1 + ((s - 1) >> log2(sizeof(void *))); } - -/** - * The space to allocate for simple layout instance holders (see below) in multiple of the size of - * a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required - * to holder either a std::unique_ptr or std::shared_ptr (which is almost always - * sizeof(std::shared_ptr)). - */ -constexpr size_t instance_simple_holder_in_ptrs() { - static_assert(sizeof(std::shared_ptr) >= sizeof(std::unique_ptr), - "pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs"); - return size_in_ptrs(sizeof(std::shared_ptr)); -} - -// Forward declarations -struct type_info; -struct value_and_holder; - -/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof') -struct instance { - PyObject_HEAD - /// Storage for pointers and holder; see simple_layout, below, for a description - union { - void *simple_value_holder[1 + instance_simple_holder_in_ptrs()]; - struct { - void **values_and_holders; - uint8_t *status; - } nonsimple; - }; - /// Weak references (needed for keep alive): - PyObject *weakrefs; - /// If true, the pointer is owned which means we're free to manage it with a holder. - bool owned : 1; - /** - * An instance has two possible value/holder layouts. - * - * Simple layout (when this flag is true), means the `simple_value_holder` is set with a pointer - * and the holder object governing that pointer, i.e. [val1*][holder]. This layout is applied - * whenever there is no python-side multiple inheritance of bound C++ types *and* the type's - * holder will fit in the default space (which is large enough to hold either a std::unique_ptr - * or std::shared_ptr). - * - * Non-simple layout applies when using custom holders that require more space than `shared_ptr` - * (which is typically the size of two pointers), or when multiple inheritance is used on the - * python side. Non-simple layout allocates the required amount of memory to have multiple - * bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is set to a - * pointer to allocated space of the required space to hold a a sequence of value pointers and - * holders followed `status`, a set of bit flags (1 byte each), i.e. - * [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple of - * `sizeof(void *)`. `nonsimple.holder_constructed` is, for convenience, a pointer to the - * beginning of the [bb...] block (but not independently allocated). - * - * Status bits indicate whether the associated holder is constructed (& - * status_holder_constructed) and whether the value pointer is registered (& - * status_instance_registered) in `registered_instances`. - */ - bool simple_layout : 1; - /// For simple layout, tracks whether the holder has been constructed - bool simple_holder_constructed : 1; - /// For simple layout, tracks whether the instance is registered in `registered_instances` - bool simple_instance_registered : 1; - /// If true, get_internals().patients has an entry for this object - bool has_patients : 1; - - /// Initializes all of the above type/values/holders data - void allocate_layout(); - - /// Destroys/deallocates all of the above - void deallocate_layout(); - - /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type` - /// omitted) - value_and_holder get_value_and_holder(const type_info *find_type = nullptr); - - /// Bit values for the non-simple status flags - static constexpr uint8_t status_holder_constructed = 1; - static constexpr uint8_t status_instance_registered = 2; -}; - -static_assert(std::is_standard_layout::value, "Internal error: `pybind11::detail::instance` is not standard layout!"); - -struct overload_hash { - inline size_t operator()(const std::pair& v) const { - size_t value = std::hash()(v.first); - value ^= std::hash()(v.second) + 0x9e3779b9 + (value<<6) + (value>>2); - return value; - } -}; - -// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly -// other stls, this means `typeid(A)` from one module won't equal `typeid(A)` from another module -// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under -// stdlibc++, this doesn't happen: equality and the type_index hash are based on the type name, -// which works. If not under a known-good stl, provide our own name-based hasher and equality -// functions that use the type name. -#if defined(__GLIBCXX__) -inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; } -using type_hash = std::hash; -using type_equal_to = std::equal_to; -#else -inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { - return lhs.name() == rhs.name() || - std::strcmp(lhs.name(), rhs.name()) == 0; -} -struct type_hash { - size_t operator()(const std::type_index &t) const { - size_t hash = 5381; - const char *ptr = t.name(); - while (auto c = static_cast(*ptr++)) - hash = (hash * 33) ^ c; - return hash; - } -}; -struct type_equal_to { - bool operator()(const std::type_index &lhs, const std::type_index &rhs) const { - return lhs.name() == rhs.name() || - std::strcmp(lhs.name(), rhs.name()) == 0; - } -}; -#endif - -template -using type_map = std::unordered_map; - -/// Internal data structure used to track registered instances and types -struct internals { - type_map registered_types_cpp; // std::type_index -> type_info - std::unordered_map> registered_types_py; // PyTypeObject* -> base type_info(s) - std::unordered_multimap registered_instances; // void * -> instance* - std::unordered_set, overload_hash> inactive_overload_cache; - type_map> direct_conversions; - std::unordered_map> patients; - std::forward_list registered_exception_translators; - std::unordered_map shared_data; // Custom data to be shared across extensions - std::vector loader_patient_stack; // Used by `loader_life_support` - PyTypeObject *static_property_type; - PyTypeObject *default_metaclass; - PyObject *instance_base; -#if defined(WITH_THREAD) - decltype(PyThread_create_key()) tstate = 0; // Usually an int but a long on Cygwin64 with Python 3.x - PyInterpreterState *istate = nullptr; -#endif -}; - -/// Return a reference to the current 'internals' information -inline internals &get_internals(); - -/// from __cpp_future__ import (convenient aliases from C++14/17) -#if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910) -using std::enable_if_t; -using std::conditional_t; -using std::remove_cv_t; -using std::remove_reference_t; -#else -template using enable_if_t = typename std::enable_if::type; -template using conditional_t = typename std::conditional::type; -template using remove_cv_t = typename std::remove_cv::type; -template using remove_reference_t = typename std::remove_reference::type; -#endif - -/// Index sequences -#if defined(PYBIND11_CPP14) -using std::index_sequence; -using std::make_index_sequence; -#else -template struct index_sequence { }; -template struct make_index_sequence_impl : make_index_sequence_impl { }; -template struct make_index_sequence_impl <0, S...> { typedef index_sequence type; }; -template using make_index_sequence = typename make_index_sequence_impl::type; -#endif - -/// Make an index sequence of the indices of true arguments -template struct select_indices_impl { using type = ISeq; }; -template struct select_indices_impl, I, B, Bs...> - : select_indices_impl, index_sequence>, I + 1, Bs...> {}; -template using select_indices = typename select_indices_impl, 0, Bs...>::type; - -/// Backports of std::bool_constant and std::negation to accomodate older compilers -template using bool_constant = std::integral_constant; -template struct negation : bool_constant { }; - -template struct void_t_impl { using type = void; }; -template using void_t = typename void_t_impl::type; - -/// Compile-time all/any/none of that check the boolean value of all template types -#ifdef __cpp_fold_expressions -template using all_of = bool_constant<(Ts::value && ...)>; -template using any_of = bool_constant<(Ts::value || ...)>; -#elif !defined(_MSC_VER) -template struct bools {}; -template using all_of = std::is_same< - bools, - bools>; -template using any_of = negation...>>; -#else -// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit -// at a slight loss of compilation efficiency). -template using all_of = std::conjunction; -template using any_of = std::disjunction; -#endif -template using none_of = negation>; - -template class... Predicates> using satisfies_all_of = all_of...>; -template class... Predicates> using satisfies_any_of = any_of...>; -template class... Predicates> using satisfies_none_of = none_of...>; - -/// Strip the class from a method type -template struct remove_class { }; -template struct remove_class { typedef R type(A...); }; -template struct remove_class { typedef R type(A...); }; - -/// Helper template to strip away type modifiers -template struct intrinsic_type { typedef T type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template struct intrinsic_type { typedef typename intrinsic_type::type type; }; -template using intrinsic_t = typename intrinsic_type::type; - -/// Helper type to replace 'void' in some expressions -struct void_type { }; - -/// Helper template which holds a list of types -template struct type_list { }; - -/// Compile-time integer sum -#ifdef __cpp_fold_expressions -template constexpr size_t constexpr_sum(Ts... ns) { return (0 + ... + size_t{ns}); } -#else -constexpr size_t constexpr_sum() { return 0; } -template -constexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); } -#endif - -NAMESPACE_BEGIN(constexpr_impl) -/// Implementation details for constexpr functions -constexpr int first(int i) { return i; } -template -constexpr int first(int i, T v, Ts... vs) { return v ? i : first(i + 1, vs...); } - -constexpr int last(int /*i*/, int result) { return result; } -template -constexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); } -NAMESPACE_END(constexpr_impl) - -/// Return the index of the first type in Ts which satisfies Predicate. Returns sizeof...(Ts) if -/// none match. -template class Predicate, typename... Ts> -constexpr int constexpr_first() { return constexpr_impl::first(0, Predicate::value...); } - -/// Return the index of the last type in Ts which satisfies Predicate, or -1 if none match. -template class Predicate, typename... Ts> -constexpr int constexpr_last() { return constexpr_impl::last(0, -1, Predicate::value...); } - -/// Return the Nth element from the parameter pack -template -struct pack_element { using type = typename pack_element::type; }; -template -struct pack_element<0, T, Ts...> { using type = T; }; - -/// Return the one and only type which matches the predicate, or Default if none match. -/// If more than one type matches the predicate, fail at compile-time. -template class Predicate, typename Default, typename... Ts> -struct exactly_one { - static constexpr auto found = constexpr_sum(Predicate::value...); - static_assert(found <= 1, "Found more than one type matching the predicate"); - - static constexpr auto index = found ? constexpr_first() : 0; - using type = conditional_t::type, Default>; -}; -template class P, typename Default> -struct exactly_one { using type = Default; }; - -template class Predicate, typename Default, typename... Ts> -using exactly_one_t = typename exactly_one::type; - -/// Defer the evaluation of type T until types Us are instantiated -template struct deferred_type { using type = T; }; -template using deferred_t = typename deferred_type::type; - -/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of::value == false`, -/// unlike `std::is_base_of`) -template using is_strict_base_of = bool_constant< - std::is_base_of::value && !std::is_same::value>; - -template class Base> -struct is_template_base_of_impl { - template static std::true_type check(Base *); - static std::false_type check(...); -}; - -/// Check if a template is the base of a type. For example: -/// `is_template_base_of` is true if `struct T : Base {}` where U can be anything -template class Base, typename T> -#if !defined(_MSC_VER) -using is_template_base_of = decltype(is_template_base_of_impl::check((remove_cv_t*)nullptr)); -#else // MSVC2015 has trouble with decltype in template aliases -struct is_template_base_of : decltype(is_template_base_of_impl::check((remove_cv_t*)nullptr)) { }; -#endif - -/// Check if T is an instantiation of the template `Class`. For example: -/// `is_instantiation` is true if `T == shared_ptr` where U can be anything. -template class Class, typename T> -struct is_instantiation : std::false_type { }; -template class Class, typename... Us> -struct is_instantiation> : std::true_type { }; - -/// Check if T is std::shared_ptr where U can be anything -template using is_shared_ptr = is_instantiation; - -/// Check if T looks like an input iterator -template struct is_input_iterator : std::false_type {}; -template -struct is_input_iterator()), decltype(++std::declval())>> - : std::true_type {}; - -/// Ignore that a variable is unused in compiler warnings -inline void ignore_unused(const int *) { } - -/// Apply a function over each element of a parameter pack -#ifdef __cpp_fold_expressions -#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...) -#else -using expand_side_effects = bool[]; -#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false } -#endif - -NAMESPACE_END(detail) - -/// Returns a named pointer that is shared among all extension modules (using the same -/// pybind11 version) running in the current interpreter. Names starting with underscores -/// are reserved for internal usage. Returns `nullptr` if no matching entry was found. -inline PYBIND11_NOINLINE void* get_shared_data(const std::string& name) { - auto& internals = detail::get_internals(); - auto it = internals.shared_data.find(name); - return it != internals.shared_data.end() ? it->second : nullptr; -} - -/// Set the shared data that can be later recovered by `get_shared_data()`. -inline PYBIND11_NOINLINE void *set_shared_data(const std::string& name, void *data) { - detail::get_internals().shared_data[name] = data; - return data; -} - -/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if -/// such entry exists. Otherwise, a new object of default-constructible type `T` is -/// added to the shared data under the given name and a reference to it is returned. -template T& get_or_create_shared_data(const std::string& name) { - auto& internals = detail::get_internals(); - auto it = internals.shared_data.find(name); - T* ptr = (T*) (it != internals.shared_data.end() ? it->second : nullptr); - if (!ptr) { - ptr = new T(); - internals.shared_data[name] = ptr; - } - return *ptr; -} - -/// C++ bindings of builtin Python exceptions -class builtin_exception : public std::runtime_error { -public: - using std::runtime_error::runtime_error; - /// Set the error using the Python C API - virtual void set_error() const = 0; -}; - -#define PYBIND11_RUNTIME_EXCEPTION(name, type) \ - class name : public builtin_exception { public: \ - using builtin_exception::builtin_exception; \ - name() : name("") { } \ - void set_error() const override { PyErr_SetString(type, what()); } \ - }; - -PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration) -PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError) -PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError) -PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError) -PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError) -PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error -PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally - -[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const char *reason) { throw std::runtime_error(reason); } -[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const std::string &reason) { throw std::runtime_error(reason); } - -template struct format_descriptor { }; - -NAMESPACE_BEGIN(detail) -// Returns the index of the given type in the type char array below, and in the list in numpy.h -// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double; -// complex float,double,long double. Note that the long double types only participate when long -// double is actually longer than double (it isn't under MSVC). -// NB: not only the string below but also complex.h and numpy.h rely on this order. -template struct is_fmt_numeric { static constexpr bool value = false; }; -template struct is_fmt_numeric::value>> { - static constexpr bool value = true; - static constexpr int index = std::is_same::value ? 0 : 1 + ( - std::is_integral::value ? detail::log2(sizeof(T))*2 + std::is_unsigned::value : 8 + ( - std::is_same::value ? 1 : std::is_same::value ? 2 : 0)); -}; -NAMESPACE_END(detail) - -template struct format_descriptor::value>> { - static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric::index]; - static constexpr const char value[2] = { c, '\0' }; - static std::string format() { return std::string(1, c); } -}; - -template constexpr const char format_descriptor< - T, detail::enable_if_t::value>>::value[2]; - -/// RAII wrapper that temporarily clears any Python error state -struct error_scope { - PyObject *type, *value, *trace; - error_scope() { PyErr_Fetch(&type, &value, &trace); } - ~error_scope() { PyErr_Restore(type, value, trace); } -}; - -/// Dummy destructor wrapper that can be used to expose classes with a private destructor -struct nodelete { template void operator()(T*) { } }; - -// overload_cast requires variable templates: C++14 -#if defined(PYBIND11_CPP14) -#define PYBIND11_OVERLOAD_CAST 1 - -NAMESPACE_BEGIN(detail) -template -struct overload_cast_impl { - template - constexpr auto operator()(Return (*pf)(Args...)) const noexcept - -> decltype(pf) { return pf; } - - template - constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept - -> decltype(pmf) { return pmf; } - - template - constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept - -> decltype(pmf) { return pmf; } -}; -NAMESPACE_END(detail) - -/// Syntax sugar for resolving overloaded function pointers: -/// - regular: static_cast(&Class::func) -/// - sweet: overload_cast(&Class::func) -template -static constexpr detail::overload_cast_impl overload_cast = {}; -// MSVC 2015 only accepts this particular initialization syntax for this variable template. - -/// Const member function selector for overload_cast -/// - regular: static_cast(&Class::func) -/// - sweet: overload_cast(&Class::func, const_) -static constexpr auto const_ = std::true_type{}; - -#else // no overload_cast: providing something that static_assert-fails: -template struct overload_cast { - static_assert(detail::deferred_t::value, - "pybind11::overload_cast<...> requires compiling in C++14 mode"); -}; -#endif // overload_cast - -NAMESPACE_BEGIN(detail) - -// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from -// any standard container (or C-style array) supporting std::begin/std::end, any singleton -// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair. -template -class any_container { - std::vector v; -public: - any_container() = default; - - // Can construct from a pair of iterators - template ::value>> - any_container(It first, It last) : v(first, last) { } - - // Implicit conversion constructor from any arbitrary container type with values convertible to T - template ())), T>::value>> - any_container(const Container &c) : any_container(std::begin(c), std::end(c)) { } - - // initializer_list's aren't deducible, so don't get matched by the above template; we need this - // to explicitly allow implicit conversion from one: - template ::value>> - any_container(const std::initializer_list &c) : any_container(c.begin(), c.end()) { } - - // Avoid copying if given an rvalue vector of the correct type. - any_container(std::vector &&v) : v(std::move(v)) { } - - // Moves the vector out of an rvalue any_container - operator std::vector &&() && { return std::move(v); } - - // Dereferencing obtains a reference to the underlying vector - std::vector &operator*() { return v; } - const std::vector &operator*() const { return v; } - - // -> lets you call methods on the underlying vector - std::vector *operator->() { return &v; } - const std::vector *operator->() const { return &v; } -}; - -NAMESPACE_END(detail) - - - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/complex.h b/lanms/include/pybind11/complex.h deleted file mode 100644 index 7d422e20..00000000 --- a/lanms/include/pybind11/complex.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - pybind11/complex.h: Complex number support - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include - -/// glibc defines I as a macro which breaks things, e.g., boost template names -#ifdef I -# undef I -#endif - -NAMESPACE_BEGIN(pybind11) - -template struct format_descriptor, detail::enable_if_t::value>> { - static constexpr const char c = format_descriptor::c; - static constexpr const char value[3] = { 'Z', c, '\0' }; - static std::string format() { return std::string(value); } -}; - -template constexpr const char format_descriptor< - std::complex, detail::enable_if_t::value>>::value[3]; - -NAMESPACE_BEGIN(detail) - -template struct is_fmt_numeric, detail::enable_if_t::value>> { - static constexpr bool value = true; - static constexpr int index = is_fmt_numeric::index + 3; -}; - -template class type_caster> { -public: - bool load(handle src, bool convert) { - if (!src) - return false; - if (!convert && !PyComplex_Check(src.ptr())) - return false; - Py_complex result = PyComplex_AsCComplex(src.ptr()); - if (result.real == -1.0 && PyErr_Occurred()) { - PyErr_Clear(); - return false; - } - value = std::complex((T) result.real, (T) result.imag); - return true; - } - - static handle cast(const std::complex &src, return_value_policy /* policy */, handle /* parent */) { - return PyComplex_FromDoubles((double) src.real(), (double) src.imag()); - } - - PYBIND11_TYPE_CASTER(std::complex, _("complex")); -}; -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/descr.h b/lanms/include/pybind11/descr.h deleted file mode 100644 index 23a099cf..00000000 --- a/lanms/include/pybind11/descr.h +++ /dev/null @@ -1,185 +0,0 @@ -/* - pybind11/descr.h: Helper type for concatenating type signatures - either at runtime (C++11) or compile time (C++14) - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "common.h" - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -/* Concatenate type signatures at compile time using C++14 */ -#if defined(PYBIND11_CPP14) && !defined(_MSC_VER) -#define PYBIND11_CONSTEXPR_DESCR - -template class descr { - template friend class descr; -public: - constexpr descr(char const (&text) [Size1+1], const std::type_info * const (&types)[Size2+1]) - : descr(text, types, - make_index_sequence(), - make_index_sequence()) { } - - constexpr const char *text() const { return m_text; } - constexpr const std::type_info * const * types() const { return m_types; } - - template - constexpr descr operator+(const descr &other) const { - return concat(other, - make_index_sequence(), - make_index_sequence(), - make_index_sequence(), - make_index_sequence()); - } - -protected: - template - constexpr descr( - char const (&text) [Size1+1], - const std::type_info * const (&types) [Size2+1], - index_sequence, index_sequence) - : m_text{text[Indices1]..., '\0'}, - m_types{types[Indices2]..., nullptr } {} - - template - constexpr descr - concat(const descr &other, - index_sequence, index_sequence, - index_sequence, index_sequence) const { - return descr( - { m_text[Indices1]..., other.m_text[OtherIndices1]..., '\0' }, - { m_types[Indices2]..., other.m_types[OtherIndices2]..., nullptr } - ); - } - -protected: - char m_text[Size1 + 1]; - const std::type_info * m_types[Size2 + 1]; -}; - -template constexpr descr _(char const(&text)[Size]) { - return descr(text, { nullptr }); -} - -template struct int_to_str : int_to_str { }; -template struct int_to_str<0, Digits...> { - static constexpr auto digits = descr({ ('0' + Digits)..., '\0' }, { nullptr }); -}; - -// Ternary description (like std::conditional) -template -constexpr enable_if_t> _(char const(&text1)[Size1], char const(&)[Size2]) { - return _(text1); -} -template -constexpr enable_if_t> _(char const(&)[Size1], char const(&text2)[Size2]) { - return _(text2); -} -template -constexpr enable_if_t> _(descr d, descr) { return d; } -template -constexpr enable_if_t> _(descr, descr d) { return d; } - -template auto constexpr _() -> decltype(int_to_str::digits) { - return int_to_str::digits; -} - -template constexpr descr<1, 1> _() { - return descr<1, 1>({ '%', '\0' }, { &typeid(Type), nullptr }); -} - -inline constexpr descr<0, 0> concat() { return _(""); } -template auto constexpr concat(descr descr) { return descr; } -template auto constexpr concat(descr descr, Args&&... args) { return descr + _(", ") + concat(args...); } -template auto constexpr type_descr(descr descr) { return _("{") + descr + _("}"); } - -#define PYBIND11_DESCR constexpr auto - -#else /* Simpler C++11 implementation based on run-time memory allocation and copying */ - -class descr { -public: - PYBIND11_NOINLINE descr(const char *text, const std::type_info * const * types) { - size_t nChars = len(text), nTypes = len(types); - m_text = new char[nChars]; - m_types = new const std::type_info *[nTypes]; - memcpy(m_text, text, nChars * sizeof(char)); - memcpy(m_types, types, nTypes * sizeof(const std::type_info *)); - } - - PYBIND11_NOINLINE descr operator+(descr &&d2) && { - descr r; - - size_t nChars1 = len(m_text), nTypes1 = len(m_types); - size_t nChars2 = len(d2.m_text), nTypes2 = len(d2.m_types); - - r.m_text = new char[nChars1 + nChars2 - 1]; - r.m_types = new const std::type_info *[nTypes1 + nTypes2 - 1]; - memcpy(r.m_text, m_text, (nChars1-1) * sizeof(char)); - memcpy(r.m_text + nChars1 - 1, d2.m_text, nChars2 * sizeof(char)); - memcpy(r.m_types, m_types, (nTypes1-1) * sizeof(std::type_info *)); - memcpy(r.m_types + nTypes1 - 1, d2.m_types, nTypes2 * sizeof(std::type_info *)); - - delete[] m_text; delete[] m_types; - delete[] d2.m_text; delete[] d2.m_types; - - return r; - } - - char *text() { return m_text; } - const std::type_info * * types() { return m_types; } - -protected: - PYBIND11_NOINLINE descr() { } - - template static size_t len(const T *ptr) { // return length including null termination - const T *it = ptr; - while (*it++ != (T) 0) - ; - return static_cast(it - ptr); - } - - const std::type_info **m_types = nullptr; - char *m_text = nullptr; -}; - -/* The 'PYBIND11_NOINLINE inline' combinations below are intentional to get the desired linkage while producing as little object code as possible */ - -PYBIND11_NOINLINE inline descr _(const char *text) { - const std::type_info *types[1] = { nullptr }; - return descr(text, types); -} - -template PYBIND11_NOINLINE enable_if_t _(const char *text1, const char *) { return _(text1); } -template PYBIND11_NOINLINE enable_if_t _(char const *, const char *text2) { return _(text2); } -template PYBIND11_NOINLINE enable_if_t _(descr d, descr) { return d; } -template PYBIND11_NOINLINE enable_if_t _(descr, descr d) { return d; } - -template PYBIND11_NOINLINE descr _() { - const std::type_info *types[2] = { &typeid(Type), nullptr }; - return descr("%", types); -} - -template PYBIND11_NOINLINE descr _() { - const std::type_info *types[1] = { nullptr }; - return descr(std::to_string(Size).c_str(), types); -} - -PYBIND11_NOINLINE inline descr concat() { return _(""); } -PYBIND11_NOINLINE inline descr concat(descr &&d) { return d; } -template PYBIND11_NOINLINE descr concat(descr &&d, Args&&... args) { return std::move(d) + _(", ") + concat(std::forward(args)...); } -PYBIND11_NOINLINE inline descr type_descr(descr&& d) { return _("{") + std::move(d) + _("}"); } - -#define PYBIND11_DESCR ::pybind11::detail::descr -#endif - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/eigen.h b/lanms/include/pybind11/eigen.h deleted file mode 100644 index fc070516..00000000 --- a/lanms/include/pybind11/eigen.h +++ /dev/null @@ -1,610 +0,0 @@ -/* - pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "numpy.h" - -#if defined(__INTEL_COMPILER) -# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) -#elif defined(__GNUG__) || defined(__clang__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wconversion" -# pragma GCC diagnostic ignored "-Wdeprecated-declarations" -# if __GNUC__ >= 7 -# pragma GCC diagnostic ignored "-Wint-in-bool-context" -# endif -#endif - -#include -#include - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant -#endif - -// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit -// move constructors that break things. We could detect this an explicitly copy, but an extra copy -// of matrices seems highly undesirable. -static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7"); - -NAMESPACE_BEGIN(pybind11) - -// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides: -using EigenDStride = Eigen::Stride; -template using EigenDRef = Eigen::Ref; -template using EigenDMap = Eigen::Map; - -NAMESPACE_BEGIN(detail) - -#if EIGEN_VERSION_AT_LEAST(3,3,0) -using EigenIndex = Eigen::Index; -#else -using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE; -#endif - -// Matches Eigen::Map, Eigen::Ref, blocks, etc: -template using is_eigen_dense_map = all_of, std::is_base_of, T>>; -template using is_eigen_mutable_map = std::is_base_of, T>; -template using is_eigen_dense_plain = all_of>, is_template_base_of>; -template using is_eigen_sparse = is_template_base_of; -// Test for objects inheriting from EigenBase that aren't captured by the above. This -// basically covers anything that can be assigned to a dense matrix but that don't have a typical -// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and -// SelfAdjointView fall into this category. -template using is_eigen_other = all_of< - is_template_base_of, - negation, is_eigen_dense_plain, is_eigen_sparse>> ->; - -// Captures numpy/eigen conformability status (returned by EigenProps::conformable()): -template struct EigenConformable { - bool conformable = false; - EigenIndex rows = 0, cols = 0; - EigenDStride stride{0, 0}; // Only valid if negativestrides is false! - bool negativestrides = false; // If true, do not use stride! - - EigenConformable(bool fits = false) : conformable{fits} {} - // Matrix type: - EigenConformable(EigenIndex r, EigenIndex c, - EigenIndex rstride, EigenIndex cstride) : - conformable{true}, rows{r}, cols{c} { - // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747 - if (rstride < 0 || cstride < 0) { - negativestrides = true; - } else { - stride = {EigenRowMajor ? rstride : cstride /* outer stride */, - EigenRowMajor ? cstride : rstride /* inner stride */ }; - } - } - // Vector type: - EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride) - : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {} - - template bool stride_compatible() const { - // To have compatible strides, we need (on both dimensions) one of fully dynamic strides, - // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant) - return - !negativestrides && - (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() || - (EigenRowMajor ? cols : rows) == 1) && - (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() || - (EigenRowMajor ? rows : cols) == 1); - } - operator bool() const { return conformable; } -}; - -template struct eigen_extract_stride { using type = Type; }; -template -struct eigen_extract_stride> { using type = StrideType; }; -template -struct eigen_extract_stride> { using type = StrideType; }; - -// Helper struct for extracting information from an Eigen type -template struct EigenProps { - using Type = Type_; - using Scalar = typename Type::Scalar; - using StrideType = typename eigen_extract_stride::type; - static constexpr EigenIndex - rows = Type::RowsAtCompileTime, - cols = Type::ColsAtCompileTime, - size = Type::SizeAtCompileTime; - static constexpr bool - row_major = Type::IsRowMajor, - vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1 - fixed_rows = rows != Eigen::Dynamic, - fixed_cols = cols != Eigen::Dynamic, - fixed = size != Eigen::Dynamic, // Fully-fixed size - dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size - - template using if_zero = std::integral_constant; - static constexpr EigenIndex inner_stride = if_zero::value, - outer_stride = if_zero::value; - static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic; - static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1; - static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1; - - // Takes an input array and determines whether we can make it fit into the Eigen type. If - // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector - // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type). - static EigenConformable conformable(const array &a) { - const auto dims = a.ndim(); - if (dims < 1 || dims > 2) - return false; - - if (dims == 2) { // Matrix type: require exact match (or dynamic) - - EigenIndex - np_rows = a.shape(0), - np_cols = a.shape(1), - np_rstride = a.strides(0) / static_cast(sizeof(Scalar)), - np_cstride = a.strides(1) / static_cast(sizeof(Scalar)); - if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols)) - return false; - - return {np_rows, np_cols, np_rstride, np_cstride}; - } - - // Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever - // is used, we want the (single) numpy stride value. - const EigenIndex n = a.shape(0), - stride = a.strides(0) / static_cast(sizeof(Scalar)); - - if (vector) { // Eigen type is a compile-time vector - if (fixed && size != n) - return false; // Vector size mismatch - return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride}; - } - else if (fixed) { - // The type has a fixed size, but is not a vector: abort - return false; - } - else if (fixed_cols) { - // Since this isn't a vector, cols must be != 1. We allow this only if it exactly - // equals the number of elements (rows is Dynamic, and so 1 row is allowed). - if (cols != n) return false; - return {1, n, stride}; - } - else { - // Otherwise it's either fully dynamic, or column dynamic; both become a column vector - if (fixed_rows && rows != n) return false; - return {n, 1, stride}; - } - } - - static PYBIND11_DESCR descriptor() { - constexpr bool show_writeable = is_eigen_dense_map::value && is_eigen_mutable_map::value; - constexpr bool show_order = is_eigen_dense_map::value; - constexpr bool show_c_contiguous = show_order && requires_row_major; - constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major; - - return type_descr(_("numpy.ndarray[") + npy_format_descriptor::name() + - _("[") + _(_<(size_t) rows>(), _("m")) + - _(", ") + _(_<(size_t) cols>(), _("n")) + - _("]") + - // For a reference type (e.g. Ref) we have other constraints that might need to be - // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride - // options, possibly f_contiguous or c_contiguous. We include them in the descriptor output - // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to - // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you - // *gave* a numpy.ndarray of the right type and dimensions. - _(", flags.writeable", "") + - _(", flags.c_contiguous", "") + - _(", flags.f_contiguous", "") + - _("]") - ); - } -}; - -// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data, -// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array. -template handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) { - constexpr ssize_t elem_size = sizeof(typename props::Scalar); - array a; - if (props::vector) - a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base); - else - a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() }, - src.data(), base); - - if (!writeable) - array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; - - return a.release(); -} - -// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that -// reference the Eigen object's data with `base` as the python-registered base class (if omitted, -// the base will be set to None, and lifetime management is up to the caller). The numpy array is -// non-writeable if the given type is const. -template -handle eigen_ref_array(Type &src, handle parent = none()) { - // none here is to get past array's should-we-copy detection, which currently always - // copies when there is no base. Setting the base to None should be harmless. - return eigen_array_cast(src, parent, !std::is_const::value); -} - -// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy -// array that references the encapsulated data with a python-side reference to the capsule to tie -// its destruction to that of any dependent python objects. Const-ness is determined by whether or -// not the Type of the pointer given is const. -template ::value>> -handle eigen_encapsulate(Type *src) { - capsule base(src, [](void *o) { delete static_cast(o); }); - return eigen_ref_array(*src, base); -} - -// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense -// types. -template -struct type_caster::value>> { - using Scalar = typename Type::Scalar; - using props = EigenProps; - - bool load(handle src, bool convert) { - // If we're in no-convert mode, only load if given an array of the correct type - if (!convert && !isinstance>(src)) - return false; - - // Coerce into an array, but don't do type conversion yet; the copy below handles it. - auto buf = array::ensure(src); - - if (!buf) - return false; - - auto dims = buf.ndim(); - if (dims < 1 || dims > 2) - return false; - - auto fits = props::conformable(buf); - if (!fits) - return false; - - // Allocate the new type, then build a numpy reference into it - value = Type(fits.rows, fits.cols); - auto ref = reinterpret_steal(eigen_ref_array(value)); - if (dims == 1) ref = ref.squeeze(); - - int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr()); - - if (result < 0) { // Copy failed! - PyErr_Clear(); - return false; - } - - return true; - } - -private: - - // Cast implementation - template - static handle cast_impl(CType *src, return_value_policy policy, handle parent) { - switch (policy) { - case return_value_policy::take_ownership: - case return_value_policy::automatic: - return eigen_encapsulate(src); - case return_value_policy::move: - return eigen_encapsulate(new CType(std::move(*src))); - case return_value_policy::copy: - return eigen_array_cast(*src); - case return_value_policy::reference: - case return_value_policy::automatic_reference: - return eigen_ref_array(*src); - case return_value_policy::reference_internal: - return eigen_ref_array(*src, parent); - default: - throw cast_error("unhandled return_value_policy: should not happen!"); - }; - } - -public: - - // Normal returned non-reference, non-const value: - static handle cast(Type &&src, return_value_policy /* policy */, handle parent) { - return cast_impl(&src, return_value_policy::move, parent); - } - // If you return a non-reference const, we mark the numpy array readonly: - static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) { - return cast_impl(&src, return_value_policy::move, parent); - } - // lvalue reference return; default (automatic) becomes copy - static handle cast(Type &src, return_value_policy policy, handle parent) { - if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) - policy = return_value_policy::copy; - return cast_impl(&src, policy, parent); - } - // const lvalue reference return; default (automatic) becomes copy - static handle cast(const Type &src, return_value_policy policy, handle parent) { - if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) - policy = return_value_policy::copy; - return cast(&src, policy, parent); - } - // non-const pointer return - static handle cast(Type *src, return_value_policy policy, handle parent) { - return cast_impl(src, policy, parent); - } - // const pointer return - static handle cast(const Type *src, return_value_policy policy, handle parent) { - return cast_impl(src, policy, parent); - } - - static PYBIND11_DESCR name() { return props::descriptor(); } - - operator Type*() { return &value; } - operator Type&() { return value; } - operator Type&&() && { return std::move(value); } - template using cast_op_type = movable_cast_op_type; - -private: - Type value; -}; - -// Eigen Ref/Map classes have slightly different policy requirements, meaning we don't want to force -// `move` when a Ref/Map rvalue is returned; we treat Ref<> sort of like a pointer (we care about -// the underlying data, not the outer shell). -template -struct return_value_policy_override::value>> { - static return_value_policy policy(return_value_policy p) { return p; } -}; - -// Base class for casting reference/map/block/etc. objects back to python. -template struct eigen_map_caster { -private: - using props = EigenProps; - -public: - - // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has - // to stay around), but we'll allow it under the assumption that you know what you're doing (and - // have an appropriate keep_alive in place). We return a numpy array pointing directly at the - // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note - // that this means you need to ensure you don't destroy the object in some other way (e.g. with - // an appropriate keep_alive, or with a reference to a statically allocated matrix). - static handle cast(const MapType &src, return_value_policy policy, handle parent) { - switch (policy) { - case return_value_policy::copy: - return eigen_array_cast(src); - case return_value_policy::reference_internal: - return eigen_array_cast(src, parent, is_eigen_mutable_map::value); - case return_value_policy::reference: - case return_value_policy::automatic: - case return_value_policy::automatic_reference: - return eigen_array_cast(src, none(), is_eigen_mutable_map::value); - default: - // move, take_ownership don't make any sense for a ref/map: - pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type"); - } - } - - static PYBIND11_DESCR name() { return props::descriptor(); } - - // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return - // types but not bound arguments). We still provide them (with an explicitly delete) so that - // you end up here if you try anyway. - bool load(handle, bool) = delete; - operator MapType() = delete; - template using cast_op_type = MapType; -}; - -// We can return any map-like object (but can only load Refs, specialized next): -template struct type_caster::value>> - : eigen_map_caster {}; - -// Loader for Ref<...> arguments. See the documentation for info on how to make this work without -// copying (it requires some extra effort in many cases). -template -struct type_caster< - Eigen::Ref, - enable_if_t>::value> -> : public eigen_map_caster> { -private: - using Type = Eigen::Ref; - using props = EigenProps; - using Scalar = typename props::Scalar; - using MapType = Eigen::Map; - using Array = array_t; - static constexpr bool need_writeable = is_eigen_mutable_map::value; - // Delay construction (these have no default constructor) - std::unique_ptr map; - std::unique_ptr ref; - // Our array. When possible, this is just a numpy array pointing to the source data, but - // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible - // layout, or is an array of a type that needs to be converted). Using a numpy temporary - // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and - // storage order conversion. (Note that we refuse to use this temporary copy when loading an - // argument for a Ref with M non-const, i.e. a read-write reference). - Array copy_or_ref; -public: - bool load(handle src, bool convert) { - // First check whether what we have is already an array of the right type. If not, we can't - // avoid a copy (because the copy is also going to do type conversion). - bool need_copy = !isinstance(src); - - EigenConformable fits; - if (!need_copy) { - // We don't need a converting copy, but we also need to check whether the strides are - // compatible with the Ref's stride requirements - Array aref = reinterpret_borrow(src); - - if (aref && (!need_writeable || aref.writeable())) { - fits = props::conformable(aref); - if (!fits) return false; // Incompatible dimensions - if (!fits.template stride_compatible()) - need_copy = true; - else - copy_or_ref = std::move(aref); - } - else { - need_copy = true; - } - } - - if (need_copy) { - // We need to copy: If we need a mutable reference, or we're not supposed to convert - // (either because we're in the no-convert overload pass, or because we're explicitly - // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading. - if (!convert || need_writeable) return false; - - Array copy = Array::ensure(src); - if (!copy) return false; - fits = props::conformable(copy); - if (!fits || !fits.template stride_compatible()) - return false; - copy_or_ref = std::move(copy); - loader_life_support::add_patient(copy_or_ref); - } - - ref.reset(); - map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner()))); - ref.reset(new Type(*map)); - - return true; - } - - operator Type*() { return ref.get(); } - operator Type&() { return *ref; } - template using cast_op_type = pybind11::detail::cast_op_type<_T>; - -private: - template ::value, int> = 0> - Scalar *data(Array &a) { return a.mutable_data(); } - - template ::value, int> = 0> - const Scalar *data(Array &a) { return a.data(); } - - // Attempt to figure out a constructor of `Stride` that will work. - // If both strides are fixed, use a default constructor: - template using stride_ctor_default = bool_constant< - S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic && - std::is_default_constructible::value>; - // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like - // Eigen::Stride, and use it: - template using stride_ctor_dual = bool_constant< - !stride_ctor_default::value && std::is_constructible::value>; - // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use - // it (passing whichever stride is dynamic). - template using stride_ctor_outer = bool_constant< - !any_of, stride_ctor_dual>::value && - S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic && - std::is_constructible::value>; - template using stride_ctor_inner = bool_constant< - !any_of, stride_ctor_dual>::value && - S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic && - std::is_constructible::value>; - - template ::value, int> = 0> - static S make_stride(EigenIndex, EigenIndex) { return S(); } - template ::value, int> = 0> - static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); } - template ::value, int> = 0> - static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); } - template ::value, int> = 0> - static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); } - -}; - -// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not -// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout). -// load() is not supported, but we can cast them into the python domain by first copying to a -// regular Eigen::Matrix, then casting that. -template -struct type_caster::value>> { -protected: - using Matrix = Eigen::Matrix; - using props = EigenProps; -public: - static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { - handle h = eigen_encapsulate(new Matrix(src)); - return h; - } - static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); } - - static PYBIND11_DESCR name() { return props::descriptor(); } - - // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return - // types but not bound arguments). We still provide them (with an explicitly delete) so that - // you end up here if you try anyway. - bool load(handle, bool) = delete; - operator Type() = delete; - template using cast_op_type = Type; -}; - -template -struct type_caster::value>> { - typedef typename Type::Scalar Scalar; - typedef remove_reference_t().outerIndexPtr())> StorageIndex; - typedef typename Type::Index Index; - static constexpr bool rowMajor = Type::IsRowMajor; - - bool load(handle src, bool) { - if (!src) - return false; - - auto obj = reinterpret_borrow(src); - object sparse_module = module::import("scipy.sparse"); - object matrix_type = sparse_module.attr( - rowMajor ? "csr_matrix" : "csc_matrix"); - - if (!obj.get_type().is(matrix_type)) { - try { - obj = matrix_type(obj); - } catch (const error_already_set &) { - return false; - } - } - - auto values = array_t((object) obj.attr("data")); - auto innerIndices = array_t((object) obj.attr("indices")); - auto outerIndices = array_t((object) obj.attr("indptr")); - auto shape = pybind11::tuple((pybind11::object) obj.attr("shape")); - auto nnz = obj.attr("nnz").cast(); - - if (!values || !innerIndices || !outerIndices) - return false; - - value = Eigen::MappedSparseMatrix( - shape[0].cast(), shape[1].cast(), nnz, - outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data()); - - return true; - } - - static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { - const_cast(src).makeCompressed(); - - object matrix_type = module::import("scipy.sparse").attr( - rowMajor ? "csr_matrix" : "csc_matrix"); - - array data(src.nonZeros(), src.valuePtr()); - array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr()); - array innerIndices(src.nonZeros(), src.innerIndexPtr()); - - return matrix_type( - std::make_tuple(data, innerIndices, outerIndices), - std::make_pair(src.rows(), src.cols()) - ).release(); - } - - PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[") - + npy_format_descriptor::name() + _("]")); -}; - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) - -#if defined(__GNUG__) || defined(__clang__) -# pragma GCC diagnostic pop -#elif defined(_MSC_VER) -# pragma warning(pop) -#endif diff --git a/lanms/include/pybind11/embed.h b/lanms/include/pybind11/embed.h deleted file mode 100644 index 0eb656b0..00000000 --- a/lanms/include/pybind11/embed.h +++ /dev/null @@ -1,194 +0,0 @@ -/* - pybind11/embed.h: Support for embedding the interpreter - - Copyright (c) 2017 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include "eval.h" - -#if defined(PYPY_VERSION) -# error Embedding the interpreter is not supported with PyPy -#endif - -#if PY_MAJOR_VERSION >= 3 -# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \ - extern "C" PyObject *pybind11_init_impl_##name() { \ - return pybind11_init_wrapper_##name(); \ - } -#else -# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \ - extern "C" void pybind11_init_impl_##name() { \ - pybind11_init_wrapper_##name(); \ - } -#endif - -/** \rst - Add a new module to the table of builtins for the interpreter. Must be - defined in global scope. The first macro parameter is the name of the - module (without quotes). The second parameter is the variable which will - be used as the interface to add functions and classes to the module. - - .. code-block:: cpp - - PYBIND11_EMBEDDED_MODULE(example, m) { - // ... initialize functions and classes here - m.def("foo", []() { - return "Hello, World!"; - }); - } - \endrst */ -#define PYBIND11_EMBEDDED_MODULE(name, variable) \ - static void pybind11_init_##name(pybind11::module &); \ - static PyObject *pybind11_init_wrapper_##name() { \ - auto m = pybind11::module(#name); \ - try { \ - pybind11_init_##name(m); \ - return m.ptr(); \ - } catch (pybind11::error_already_set &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } catch (const std::exception &e) { \ - PyErr_SetString(PyExc_ImportError, e.what()); \ - return nullptr; \ - } \ - } \ - PYBIND11_EMBEDDED_MODULE_IMPL(name) \ - pybind11::detail::embedded_module name(#name, pybind11_init_impl_##name); \ - void pybind11_init_##name(pybind11::module &variable) - - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks. -struct embedded_module { -#if PY_MAJOR_VERSION >= 3 - using init_t = PyObject *(*)(); -#else - using init_t = void (*)(); -#endif - embedded_module(const char *name, init_t init) { - if (Py_IsInitialized()) - pybind11_fail("Can't add new modules after the interpreter has been initialized"); - - auto result = PyImport_AppendInittab(name, init); - if (result == -1) - pybind11_fail("Insufficient memory to add a new module"); - } -}; - -NAMESPACE_END(detail) - -/** \rst - Initialize the Python interpreter. No other pybind11 or CPython API functions can be - called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The - optional parameter can be used to skip the registration of signal handlers (see the - Python documentation for details). Calling this function again after the interpreter - has already been initialized is a fatal error. - \endrst */ -inline void initialize_interpreter(bool init_signal_handlers = true) { - if (Py_IsInitialized()) - pybind11_fail("The interpreter is already running"); - - Py_InitializeEx(init_signal_handlers ? 1 : 0); - - // Make .py files in the working directory available by default - auto sys_path = reinterpret_borrow(module::import("sys").attr("path")); - sys_path.append("."); -} - -/** \rst - Shut down the Python interpreter. No pybind11 or CPython API functions can be called - after this. In addition, pybind11 objects must not outlive the interpreter: - - .. code-block:: cpp - - { // BAD - py::initialize_interpreter(); - auto hello = py::str("Hello, World!"); - py::finalize_interpreter(); - } // <-- BOOM, hello's destructor is called after interpreter shutdown - - { // GOOD - py::initialize_interpreter(); - { // scoped - auto hello = py::str("Hello, World!"); - } // <-- OK, hello is cleaned up properly - py::finalize_interpreter(); - } - - { // BETTER - py::scoped_interpreter guard{}; - auto hello = py::str("Hello, World!"); - } - - .. warning:: - - The interpreter can be restarted by calling `initialize_interpreter` again. - Modules created using pybind11 can be safely re-initialized. However, Python - itself cannot completely unload binary extension modules and there are several - caveats with regard to interpreter restarting. All the details can be found - in the CPython documentation. In short, not all interpreter memory may be - freed, either due to reference cycles or user-created global data. - - \endrst */ -inline void finalize_interpreter() { - handle builtins(PyEval_GetBuiltins()); - const char *id = PYBIND11_INTERNALS_ID; - - // Get the internals pointer (without creating it if it doesn't exist). It's possible for the - // internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()` - // during destruction), so we get the pointer-pointer here and check it after Py_Finalize(). - detail::internals **internals_ptr_ptr = &detail::get_internals_ptr(); - // It could also be stashed in builtins, so look there too: - if (builtins.contains(id) && isinstance(builtins[id])) - internals_ptr_ptr = capsule(builtins[id]); - - Py_Finalize(); - - if (internals_ptr_ptr) { - delete *internals_ptr_ptr; - *internals_ptr_ptr = nullptr; - } -} - -/** \rst - Scope guard version of `initialize_interpreter` and `finalize_interpreter`. - This a move-only guard and only a single instance can exist. - - .. code-block:: cpp - - #include - - int main() { - py::scoped_interpreter guard{}; - py::print(Hello, World!); - } // <-- interpreter shutdown - \endrst */ -class scoped_interpreter { -public: - scoped_interpreter(bool init_signal_handlers = true) { - initialize_interpreter(init_signal_handlers); - } - - scoped_interpreter(const scoped_interpreter &) = delete; - scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; } - scoped_interpreter &operator=(const scoped_interpreter &) = delete; - scoped_interpreter &operator=(scoped_interpreter &&) = delete; - - ~scoped_interpreter() { - if (is_valid) - finalize_interpreter(); - } - -private: - bool is_valid = true; -}; - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/eval.h b/lanms/include/pybind11/eval.h deleted file mode 100644 index 165003b8..00000000 --- a/lanms/include/pybind11/eval.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - pybind11/exec.h: Support for evaluating Python expressions and statements - from strings and files - - Copyright (c) 2016 Klemens Morgenstern and - Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" - -NAMESPACE_BEGIN(pybind11) - -enum eval_mode { - /// Evaluate a string containing an isolated expression - eval_expr, - - /// Evaluate a string containing a single statement. Returns \c none - eval_single_statement, - - /// Evaluate a string containing a sequence of statement. Returns \c none - eval_statements -}; - -template -object eval(str expr, object global = globals(), object local = object()) { - if (!local) - local = global; - - /* PyRun_String does not accept a PyObject / encoding specifier, - this seems to be the only alternative */ - std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr; - - int start; - switch (mode) { - case eval_expr: start = Py_eval_input; break; - case eval_single_statement: start = Py_single_input; break; - case eval_statements: start = Py_file_input; break; - default: pybind11_fail("invalid evaluation mode"); - } - - PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr()); - if (!result) - throw error_already_set(); - return reinterpret_steal(result); -} - -template -object eval(const char (&s)[N], object global = globals(), object local = object()) { - /* Support raw string literals by removing common leading whitespace */ - auto expr = (s[0] == '\n') ? str(module::import("textwrap").attr("dedent")(s)) - : str(s); - return eval(expr, global, local); -} - -inline void exec(str expr, object global = globals(), object local = object()) { - eval(expr, global, local); -} - -template -void exec(const char (&s)[N], object global = globals(), object local = object()) { - eval(s, global, local); -} - -template -object eval_file(str fname, object global = globals(), object local = object()) { - if (!local) - local = global; - - int start; - switch (mode) { - case eval_expr: start = Py_eval_input; break; - case eval_single_statement: start = Py_single_input; break; - case eval_statements: start = Py_file_input; break; - default: pybind11_fail("invalid evaluation mode"); - } - - int closeFile = 1; - std::string fname_str = (std::string) fname; -#if PY_VERSION_HEX >= 0x03040000 - FILE *f = _Py_fopen_obj(fname.ptr(), "r"); -#elif PY_VERSION_HEX >= 0x03000000 - FILE *f = _Py_fopen(fname.ptr(), "r"); -#else - /* No unicode support in open() :( */ - auto fobj = reinterpret_steal(PyFile_FromString( - const_cast(fname_str.c_str()), - const_cast("r"))); - FILE *f = nullptr; - if (fobj) - f = PyFile_AsFile(fobj.ptr()); - closeFile = 0; -#endif - if (!f) { - PyErr_Clear(); - pybind11_fail("File \"" + fname_str + "\" could not be opened!"); - } - -#if PY_VERSION_HEX < 0x03000000 && defined(PYPY_VERSION) - PyObject *result = PyRun_File(f, fname_str.c_str(), start, global.ptr(), - local.ptr()); - (void) closeFile; -#else - PyObject *result = PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(), - local.ptr(), closeFile); -#endif - - if (!result) - throw error_already_set(); - return reinterpret_steal(result); -} - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/functional.h b/lanms/include/pybind11/functional.h deleted file mode 100644 index fdb6b330..00000000 --- a/lanms/include/pybind11/functional.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - pybind11/functional.h: std::function<> support - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -template -struct type_caster> { - using type = std::function; - using retval_type = conditional_t::value, void_type, Return>; - using function_type = Return (*) (Args...); - -public: - bool load(handle src, bool convert) { - if (src.is_none()) { - // Defer accepting None to other overloads (if we aren't in convert mode): - if (!convert) return false; - return true; - } - - if (!isinstance(src)) - return false; - - auto func = reinterpret_borrow(src); - - /* - When passing a C++ function as an argument to another C++ - function via Python, every function call would normally involve - a full C++ -> Python -> C++ roundtrip, which can be prohibitive. - Here, we try to at least detect the case where the function is - stateless (i.e. function pointer or lambda function without - captured variables), in which case the roundtrip can be avoided. - */ - if (auto cfunc = func.cpp_function()) { - auto c = reinterpret_borrow(PyCFunction_GET_SELF(cfunc.ptr())); - auto rec = (function_record *) c; - - if (rec && rec->is_stateless && - same_type(typeid(function_type), *reinterpret_cast(rec->data[1]))) { - struct capture { function_type f; }; - value = ((capture *) &rec->data)->f; - return true; - } - } - - value = [func](Args... args) -> Return { - gil_scoped_acquire acq; - object retval(func(std::forward(args)...)); - /* Visual studio 2015 parser issue: need parentheses around this expression */ - return (retval.template cast()); - }; - return true; - } - - template - static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) { - if (!f_) - return none().inc_ref(); - - auto result = f_.template target(); - if (result) - return cpp_function(*result, policy).release(); - else - return cpp_function(std::forward(f_), policy).release(); - } - - PYBIND11_TYPE_CASTER(type, _("Callable[[") + - argument_loader::arg_names() + _("], ") + - make_caster::name() + - _("]")); -}; - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/numpy.h b/lanms/include/pybind11/numpy.h deleted file mode 100644 index 388e2122..00000000 --- a/lanms/include/pybind11/numpy.h +++ /dev/null @@ -1,1598 +0,0 @@ -/* - pybind11/numpy.h: Basic NumPy support, vectorize() wrapper - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include "complex.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant -#endif - -/* This will be true on all flat address space platforms and allows us to reduce the - whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size - and dimension types (e.g. shape, strides, indexing), instead of inflicting this - upon the library user. */ -static_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t"); - -NAMESPACE_BEGIN(pybind11) - -class array; // Forward declaration - -NAMESPACE_BEGIN(detail) -template struct npy_format_descriptor; - -struct PyArrayDescr_Proxy { - PyObject_HEAD - PyObject *typeobj; - char kind; - char type; - char byteorder; - char flags; - int type_num; - int elsize; - int alignment; - char *subarray; - PyObject *fields; - PyObject *names; -}; - -struct PyArray_Proxy { - PyObject_HEAD - char *data; - int nd; - ssize_t *dimensions; - ssize_t *strides; - PyObject *base; - PyObject *descr; - int flags; -}; - -struct PyVoidScalarObject_Proxy { - PyObject_VAR_HEAD - char *obval; - PyArrayDescr_Proxy *descr; - int flags; - PyObject *base; -}; - -struct numpy_type_info { - PyObject* dtype_ptr; - std::string format_str; -}; - -struct numpy_internals { - std::unordered_map registered_dtypes; - - numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) { - auto it = registered_dtypes.find(std::type_index(tinfo)); - if (it != registered_dtypes.end()) - return &(it->second); - if (throw_if_missing) - pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name()); - return nullptr; - } - - template numpy_type_info *get_type_info(bool throw_if_missing = true) { - return get_type_info(typeid(typename std::remove_cv::type), throw_if_missing); - } -}; - -inline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) { - ptr = &get_or_create_shared_data("_numpy_internals"); -} - -inline numpy_internals& get_numpy_internals() { - static numpy_internals* ptr = nullptr; - if (!ptr) - load_numpy_internals(ptr); - return *ptr; -} - -struct npy_api { - enum constants { - NPY_ARRAY_C_CONTIGUOUS_ = 0x0001, - NPY_ARRAY_F_CONTIGUOUS_ = 0x0002, - NPY_ARRAY_OWNDATA_ = 0x0004, - NPY_ARRAY_FORCECAST_ = 0x0010, - NPY_ARRAY_ENSUREARRAY_ = 0x0040, - NPY_ARRAY_ALIGNED_ = 0x0100, - NPY_ARRAY_WRITEABLE_ = 0x0400, - NPY_BOOL_ = 0, - NPY_BYTE_, NPY_UBYTE_, - NPY_SHORT_, NPY_USHORT_, - NPY_INT_, NPY_UINT_, - NPY_LONG_, NPY_ULONG_, - NPY_LONGLONG_, NPY_ULONGLONG_, - NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_, - NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_, - NPY_OBJECT_ = 17, - NPY_STRING_, NPY_UNICODE_, NPY_VOID_ - }; - - typedef struct { - Py_intptr_t *ptr; - int len; - } PyArray_Dims; - - static npy_api& get() { - static npy_api api = lookup(); - return api; - } - - bool PyArray_Check_(PyObject *obj) const { - return (bool) PyObject_TypeCheck(obj, PyArray_Type_); - } - bool PyArrayDescr_Check_(PyObject *obj) const { - return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_); - } - - unsigned int (*PyArray_GetNDArrayCFeatureVersion_)(); - PyObject *(*PyArray_DescrFromType_)(int); - PyObject *(*PyArray_NewFromDescr_) - (PyTypeObject *, PyObject *, int, Py_intptr_t *, - Py_intptr_t *, void *, int, PyObject *); - PyObject *(*PyArray_DescrNewFromType_)(int); - int (*PyArray_CopyInto_)(PyObject *, PyObject *); - PyObject *(*PyArray_NewCopy_)(PyObject *, int); - PyTypeObject *PyArray_Type_; - PyTypeObject *PyVoidArrType_Type_; - PyTypeObject *PyArrayDescr_Type_; - PyObject *(*PyArray_DescrFromScalar_)(PyObject *); - PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *); - int (*PyArray_DescrConverter_) (PyObject *, PyObject **); - bool (*PyArray_EquivTypes_) (PyObject *, PyObject *); - int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, char, PyObject **, int *, - Py_ssize_t *, PyObject **, PyObject *); - PyObject *(*PyArray_Squeeze_)(PyObject *); - int (*PyArray_SetBaseObject_)(PyObject *, PyObject *); - PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int); -private: - enum functions { - API_PyArray_GetNDArrayCFeatureVersion = 211, - API_PyArray_Type = 2, - API_PyArrayDescr_Type = 3, - API_PyVoidArrType_Type = 39, - API_PyArray_DescrFromType = 45, - API_PyArray_DescrFromScalar = 57, - API_PyArray_FromAny = 69, - API_PyArray_Resize = 80, - API_PyArray_CopyInto = 82, - API_PyArray_NewCopy = 85, - API_PyArray_NewFromDescr = 94, - API_PyArray_DescrNewFromType = 9, - API_PyArray_DescrConverter = 174, - API_PyArray_EquivTypes = 182, - API_PyArray_GetArrayParamsFromObject = 278, - API_PyArray_Squeeze = 136, - API_PyArray_SetBaseObject = 282 - }; - - static npy_api lookup() { - module m = module::import("numpy.core.multiarray"); - auto c = m.attr("_ARRAY_API"); -#if PY_MAJOR_VERSION >= 3 - void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL); -#else - void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr()); -#endif - npy_api api; -#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func]; - DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion); - if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7) - pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0"); - DECL_NPY_API(PyArray_Type); - DECL_NPY_API(PyVoidArrType_Type); - DECL_NPY_API(PyArrayDescr_Type); - DECL_NPY_API(PyArray_DescrFromType); - DECL_NPY_API(PyArray_DescrFromScalar); - DECL_NPY_API(PyArray_FromAny); - DECL_NPY_API(PyArray_Resize); - DECL_NPY_API(PyArray_CopyInto); - DECL_NPY_API(PyArray_NewCopy); - DECL_NPY_API(PyArray_NewFromDescr); - DECL_NPY_API(PyArray_DescrNewFromType); - DECL_NPY_API(PyArray_DescrConverter); - DECL_NPY_API(PyArray_EquivTypes); - DECL_NPY_API(PyArray_GetArrayParamsFromObject); - DECL_NPY_API(PyArray_Squeeze); - DECL_NPY_API(PyArray_SetBaseObject); -#undef DECL_NPY_API - return api; - } -}; - -inline PyArray_Proxy* array_proxy(void* ptr) { - return reinterpret_cast(ptr); -} - -inline const PyArray_Proxy* array_proxy(const void* ptr) { - return reinterpret_cast(ptr); -} - -inline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) { - return reinterpret_cast(ptr); -} - -inline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) { - return reinterpret_cast(ptr); -} - -inline bool check_flags(const void* ptr, int flag) { - return (flag == (array_proxy(ptr)->flags & flag)); -} - -template struct is_std_array : std::false_type { }; -template struct is_std_array> : std::true_type { }; -template struct is_complex : std::false_type { }; -template struct is_complex> : std::true_type { }; - -template struct array_info_scalar { - typedef T type; - static constexpr bool is_array = false; - static constexpr bool is_empty = false; - static PYBIND11_DESCR extents() { return _(""); } - static void append_extents(list& /* shape */) { } -}; -// Computes underlying type and a comma-separated list of extents for array -// types (any mix of std::array and built-in arrays). An array of char is -// treated as scalar because it gets special handling. -template struct array_info : array_info_scalar { }; -template struct array_info> { - using type = typename array_info::type; - static constexpr bool is_array = true; - static constexpr bool is_empty = (N == 0) || array_info::is_empty; - static constexpr size_t extent = N; - - // appends the extents to shape - static void append_extents(list& shape) { - shape.append(N); - array_info::append_extents(shape); - } - - template::is_array, int> = 0> - static PYBIND11_DESCR extents() { - return _(); - } - - template::is_array, int> = 0> - static PYBIND11_DESCR extents() { - return concat(_(), array_info::extents()); - } -}; -// For numpy we have special handling for arrays of characters, so we don't include -// the size in the array extents. -template struct array_info : array_info_scalar { }; -template struct array_info> : array_info_scalar> { }; -template struct array_info : array_info> { }; -template using remove_all_extents_t = typename array_info::type; - -template using is_pod_struct = all_of< - std::is_standard_layout, // since we're accessing directly in memory we need a standard layout type -#if !defined(__GNUG__) || defined(__clang__) || __GNUC__ >= 5 - std::is_trivially_copyable, -#else - // GCC 4 doesn't implement is_trivially_copyable, so approximate it - std::is_trivially_destructible, - satisfies_any_of, -#endif - satisfies_none_of ->; - -template ssize_t byte_offset_unsafe(const Strides &) { return 0; } -template -ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) { - return i * strides[Dim] + byte_offset_unsafe(strides, index...); -} - -/** - * Proxy class providing unsafe, unchecked const access to array data. This is constructed through - * the `unchecked()` method of `array` or the `unchecked()` method of `array_t`. `Dims` - * will be -1 for dimensions determined at runtime. - */ -template -class unchecked_reference { -protected: - static constexpr bool Dynamic = Dims < 0; - const unsigned char *data_; - // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to - // make large performance gains on big, nested loops, but requires compile-time dimensions - conditional_t> - shape_, strides_; - const ssize_t dims_; - - friend class pybind11::array; - // Constructor for compile-time dimensions: - template - unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t) - : data_{reinterpret_cast(data)}, dims_{Dims} { - for (size_t i = 0; i < (size_t) dims_; i++) { - shape_[i] = shape[i]; - strides_[i] = strides[i]; - } - } - // Constructor for runtime dimensions: - template - unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t dims) - : data_{reinterpret_cast(data)}, shape_{shape}, strides_{strides}, dims_{dims} {} - -public: - /** - * Unchecked const reference access to data at the given indices. For a compile-time known - * number of dimensions, this requires the correct number of arguments; for run-time - * dimensionality, this is not checked (and so is up to the caller to use safely). - */ - template const T &operator()(Ix... index) const { - static_assert(sizeof...(Ix) == Dims || Dynamic, - "Invalid number of indices for unchecked array reference"); - return *reinterpret_cast(data_ + byte_offset_unsafe(strides_, ssize_t(index)...)); - } - /** - * Unchecked const reference access to data; this operator only participates if the reference - * is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`. - */ - template > - const T &operator[](ssize_t index) const { return operator()(index); } - - /// Pointer access to the data at the given indices. - template const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); } - - /// Returns the item size, i.e. sizeof(T) - constexpr static ssize_t itemsize() { return sizeof(T); } - - /// Returns the shape (i.e. size) of dimension `dim` - ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; } - - /// Returns the number of dimensions of the array - ssize_t ndim() const { return dims_; } - - /// Returns the total number of elements in the referenced array, i.e. the product of the shapes - template - enable_if_t size() const { - return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies()); - } - template - enable_if_t size() const { - return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies()); - } - - /// Returns the total number of bytes used by the referenced data. Note that the actual span in - /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice). - ssize_t nbytes() const { - return size() * itemsize(); - } -}; - -template -class unchecked_mutable_reference : public unchecked_reference { - friend class pybind11::array; - using ConstBase = unchecked_reference; - using ConstBase::ConstBase; - using ConstBase::Dynamic; -public: - /// Mutable, unchecked access to data at the given indices. - template T& operator()(Ix... index) { - static_assert(sizeof...(Ix) == Dims || Dynamic, - "Invalid number of indices for unchecked array reference"); - return const_cast(ConstBase::operator()(index...)); - } - /** - * Mutable, unchecked access data at the given index; this operator only participates if the - * reference is to a 1-dimensional array (or has runtime dimensions). When present, this is - * exactly equivalent to `obj(index)`. - */ - template > - T &operator[](ssize_t index) { return operator()(index); } - - /// Mutable pointer access to the data at the given indices. - template T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); } -}; - -template -struct type_caster> { - static_assert(Dim == 0 && Dim > 0 /* always fail */, "unchecked array proxy object is not castable"); -}; -template -struct type_caster> : type_caster> {}; - -NAMESPACE_END(detail) - -class dtype : public object { -public: - PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_); - - explicit dtype(const buffer_info &info) { - dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format))); - // If info.itemsize == 0, use the value calculated from the format string - m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr(); - } - - explicit dtype(const std::string &format) { - m_ptr = from_args(pybind11::str(format)).release().ptr(); - } - - dtype(const char *format) : dtype(std::string(format)) { } - - dtype(list names, list formats, list offsets, ssize_t itemsize) { - dict args; - args["names"] = names; - args["formats"] = formats; - args["offsets"] = offsets; - args["itemsize"] = pybind11::int_(itemsize); - m_ptr = from_args(args).release().ptr(); - } - - /// This is essentially the same as calling numpy.dtype(args) in Python. - static dtype from_args(object args) { - PyObject *ptr = nullptr; - if (!detail::npy_api::get().PyArray_DescrConverter_(args.release().ptr(), &ptr) || !ptr) - throw error_already_set(); - return reinterpret_steal(ptr); - } - - /// Return dtype associated with a C++ type. - template static dtype of() { - return detail::npy_format_descriptor::type>::dtype(); - } - - /// Size of the data type in bytes. - ssize_t itemsize() const { - return detail::array_descriptor_proxy(m_ptr)->elsize; - } - - /// Returns true for structured data types. - bool has_fields() const { - return detail::array_descriptor_proxy(m_ptr)->names != nullptr; - } - - /// Single-character type code. - char kind() const { - return detail::array_descriptor_proxy(m_ptr)->kind; - } - -private: - static object _dtype_from_pep3118() { - static PyObject *obj = module::import("numpy.core._internal") - .attr("_dtype_from_pep3118").cast().release().ptr(); - return reinterpret_borrow(obj); - } - - dtype strip_padding(ssize_t itemsize) { - // Recursively strip all void fields with empty names that are generated for - // padding fields (as of NumPy v1.11). - if (!has_fields()) - return *this; - - struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; }; - std::vector field_descriptors; - - for (auto field : attr("fields").attr("items")()) { - auto spec = field.cast(); - auto name = spec[0].cast(); - auto format = spec[1].cast()[0].cast(); - auto offset = spec[1].cast()[1].cast(); - if (!len(name) && format.kind() == 'V') - continue; - field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset}); - } - - std::sort(field_descriptors.begin(), field_descriptors.end(), - [](const field_descr& a, const field_descr& b) { - return a.offset.cast() < b.offset.cast(); - }); - - list names, formats, offsets; - for (auto& descr : field_descriptors) { - names.append(descr.name); - formats.append(descr.format); - offsets.append(descr.offset); - } - return dtype(names, formats, offsets, itemsize); - } -}; - -class array : public buffer { -public: - PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array) - - enum { - c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_, - f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_, - forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_ - }; - - array() : array({{0}}, static_cast(nullptr)) {} - - using ShapeContainer = detail::any_container; - using StridesContainer = detail::any_container; - - // Constructs an array taking shape/strides from arbitrary container types - array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides, - const void *ptr = nullptr, handle base = handle()) { - - if (strides->empty()) - *strides = c_strides(*shape, dt.itemsize()); - - auto ndim = shape->size(); - if (ndim != strides->size()) - pybind11_fail("NumPy: shape ndim doesn't match strides ndim"); - auto descr = dt; - - int flags = 0; - if (base && ptr) { - if (isinstance(base)) - /* Copy flags from base (except ownership bit) */ - flags = reinterpret_borrow(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_; - else - /* Writable by default, easy to downgrade later on if needed */ - flags = detail::npy_api::NPY_ARRAY_WRITEABLE_; - } - - auto &api = detail::npy_api::get(); - auto tmp = reinterpret_steal(api.PyArray_NewFromDescr_( - api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(), - const_cast(ptr), flags, nullptr)); - if (!tmp) - throw error_already_set(); - if (ptr) { - if (base) { - api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr()); - } else { - tmp = reinterpret_steal(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */)); - } - } - m_ptr = tmp.release().ptr(); - } - - array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle()) - : array(dt, std::move(shape), {}, ptr, base) { } - - template ::value && !std::is_same::value>> - array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle()) - : array(dt, {{count}}, ptr, base) { } - - template - array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle()) - : array(pybind11::dtype::of(), std::move(shape), std::move(strides), ptr, base) { } - - template - array(ShapeContainer shape, const T *ptr, handle base = handle()) - : array(std::move(shape), {}, ptr, base) { } - - template - explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { } - - explicit array(const buffer_info &info) - : array(pybind11::dtype(info), info.shape, info.strides, info.ptr) { } - - /// Array descriptor (dtype) - pybind11::dtype dtype() const { - return reinterpret_borrow(detail::array_proxy(m_ptr)->descr); - } - - /// Total number of elements - ssize_t size() const { - return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies()); - } - - /// Byte size of a single element - ssize_t itemsize() const { - return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize; - } - - /// Total number of bytes - ssize_t nbytes() const { - return size() * itemsize(); - } - - /// Number of dimensions - ssize_t ndim() const { - return detail::array_proxy(m_ptr)->nd; - } - - /// Base object - object base() const { - return reinterpret_borrow(detail::array_proxy(m_ptr)->base); - } - - /// Dimensions of the array - const ssize_t* shape() const { - return detail::array_proxy(m_ptr)->dimensions; - } - - /// Dimension along a given axis - ssize_t shape(ssize_t dim) const { - if (dim >= ndim()) - fail_dim_check(dim, "invalid axis"); - return shape()[dim]; - } - - /// Strides of the array - const ssize_t* strides() const { - return detail::array_proxy(m_ptr)->strides; - } - - /// Stride along a given axis - ssize_t strides(ssize_t dim) const { - if (dim >= ndim()) - fail_dim_check(dim, "invalid axis"); - return strides()[dim]; - } - - /// Return the NumPy array flags - int flags() const { - return detail::array_proxy(m_ptr)->flags; - } - - /// If set, the array is writeable (otherwise the buffer is read-only) - bool writeable() const { - return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_); - } - - /// If set, the array owns the data (will be freed when the array is deleted) - bool owndata() const { - return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_); - } - - /// Pointer to the contained data. If index is not provided, points to the - /// beginning of the buffer. May throw if the index would lead to out of bounds access. - template const void* data(Ix... index) const { - return static_cast(detail::array_proxy(m_ptr)->data + offset_at(index...)); - } - - /// Mutable pointer to the contained data. If index is not provided, points to the - /// beginning of the buffer. May throw if the index would lead to out of bounds access. - /// May throw if the array is not writeable. - template void* mutable_data(Ix... index) { - check_writeable(); - return static_cast(detail::array_proxy(m_ptr)->data + offset_at(index...)); - } - - /// Byte offset from beginning of the array to a given index (full or partial). - /// May throw if the index would lead to out of bounds access. - template ssize_t offset_at(Ix... index) const { - if ((ssize_t) sizeof...(index) > ndim()) - fail_dim_check(sizeof...(index), "too many indices for an array"); - return byte_offset(ssize_t(index)...); - } - - ssize_t offset_at() const { return 0; } - - /// Item count from beginning of the array to a given index (full or partial). - /// May throw if the index would lead to out of bounds access. - template ssize_t index_at(Ix... index) const { - return offset_at(index...) / itemsize(); - } - - /** - * Returns a proxy object that provides access to the array's data without bounds or - * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with - * care: the array must not be destroyed or reshaped for the duration of the returned object, - * and the caller must take care not to access invalid dimensions or dimension indices. - */ - template detail::unchecked_mutable_reference mutable_unchecked() { - if (Dims >= 0 && ndim() != Dims) - throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) + - "; expected " + std::to_string(Dims)); - return detail::unchecked_mutable_reference(mutable_data(), shape(), strides(), ndim()); - } - - /** - * Returns a proxy object that provides const access to the array's data without bounds or - * dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the - * underlying array have the `writable` flag. Use with care: the array must not be destroyed or - * reshaped for the duration of the returned object, and the caller must take care not to access - * invalid dimensions or dimension indices. - */ - template detail::unchecked_reference unchecked() const { - if (Dims >= 0 && ndim() != Dims) - throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) + - "; expected " + std::to_string(Dims)); - return detail::unchecked_reference(data(), shape(), strides(), ndim()); - } - - /// Return a new view with all of the dimensions of length 1 removed - array squeeze() { - auto& api = detail::npy_api::get(); - return reinterpret_steal(api.PyArray_Squeeze_(m_ptr)); - } - - /// Resize array to given shape - /// If refcheck is true and more that one reference exist to this array - /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change - void resize(ShapeContainer new_shape, bool refcheck = true) { - detail::npy_api::PyArray_Dims d = { - new_shape->data(), int(new_shape->size()) - }; - // try to resize, set ordering param to -1 cause it's not used anyway - object new_array = reinterpret_steal( - detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1) - ); - if (!new_array) throw error_already_set(); - if (isinstance(new_array)) { *this = std::move(new_array); } - } - - /// Ensure that the argument is a NumPy array - /// In case of an error, nullptr is returned and the Python error is cleared. - static array ensure(handle h, int ExtraFlags = 0) { - auto result = reinterpret_steal(raw_array(h.ptr(), ExtraFlags)); - if (!result) - PyErr_Clear(); - return result; - } - -protected: - template friend struct detail::npy_format_descriptor; - - void fail_dim_check(ssize_t dim, const std::string& msg) const { - throw index_error(msg + ": " + std::to_string(dim) + - " (ndim = " + std::to_string(ndim()) + ")"); - } - - template ssize_t byte_offset(Ix... index) const { - check_dimensions(index...); - return detail::byte_offset_unsafe(strides(), ssize_t(index)...); - } - - void check_writeable() const { - if (!writeable()) - throw std::domain_error("array is not writeable"); - } - - // Default, C-style strides - static std::vector c_strides(const std::vector &shape, ssize_t itemsize) { - auto ndim = shape.size(); - std::vector strides(ndim, itemsize); - for (size_t i = ndim - 1; i > 0; --i) - strides[i - 1] = strides[i] * shape[i]; - return strides; - } - - // F-style strides; default when constructing an array_t with `ExtraFlags & f_style` - static std::vector f_strides(const std::vector &shape, ssize_t itemsize) { - auto ndim = shape.size(); - std::vector strides(ndim, itemsize); - for (size_t i = 1; i < ndim; ++i) - strides[i] = strides[i - 1] * shape[i - 1]; - return strides; - } - - template void check_dimensions(Ix... index) const { - check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...); - } - - void check_dimensions_impl(ssize_t, const ssize_t*) const { } - - template void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const { - if (i >= *shape) { - throw index_error(std::string("index ") + std::to_string(i) + - " is out of bounds for axis " + std::to_string(axis) + - " with size " + std::to_string(*shape)); - } - check_dimensions_impl(axis + 1, shape + 1, index...); - } - - /// Create array from any object -- always returns a new reference - static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) { - if (ptr == nullptr) { - PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr"); - return nullptr; - } - return detail::npy_api::get().PyArray_FromAny_( - ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr); - } -}; - -template class array_t : public array { -private: - struct private_ctor {}; - // Delegating constructor needed when both moving and accessing in the same constructor - array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base) - : array(std::move(shape), std::move(strides), ptr, base) {} -public: - static_assert(!detail::array_info::is_array, "Array types cannot be used with array_t"); - - using value_type = T; - - array_t() : array(0, static_cast(nullptr)) {} - array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { } - array_t(handle h, stolen_t) : array(h, stolen_t{}) { } - - PYBIND11_DEPRECATED("Use array_t::ensure() instead") - array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) { - if (!m_ptr) PyErr_Clear(); - if (!is_borrowed) Py_XDECREF(h.ptr()); - } - - array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) { - if (!m_ptr) throw error_already_set(); - } - - explicit array_t(const buffer_info& info) : array(info) { } - - array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle()) - : array(std::move(shape), std::move(strides), ptr, base) { } - - explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle()) - : array_t(private_ctor{}, std::move(shape), - ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()), - ptr, base) { } - - explicit array_t(size_t count, const T *ptr = nullptr, handle base = handle()) - : array({count}, {}, ptr, base) { } - - constexpr ssize_t itemsize() const { - return sizeof(T); - } - - template ssize_t index_at(Ix... index) const { - return offset_at(index...) / itemsize(); - } - - template const T* data(Ix... index) const { - return static_cast(array::data(index...)); - } - - template T* mutable_data(Ix... index) { - return static_cast(array::mutable_data(index...)); - } - - // Reference to element at a given index - template const T& at(Ix... index) const { - if (sizeof...(index) != ndim()) - fail_dim_check(sizeof...(index), "index dimension mismatch"); - return *(static_cast(array::data()) + byte_offset(ssize_t(index)...) / itemsize()); - } - - // Mutable reference to element at a given index - template T& mutable_at(Ix... index) { - if (sizeof...(index) != ndim()) - fail_dim_check(sizeof...(index), "index dimension mismatch"); - return *(static_cast(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize()); - } - - /** - * Returns a proxy object that provides access to the array's data without bounds or - * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with - * care: the array must not be destroyed or reshaped for the duration of the returned object, - * and the caller must take care not to access invalid dimensions or dimension indices. - */ - template detail::unchecked_mutable_reference mutable_unchecked() { - return array::mutable_unchecked(); - } - - /** - * Returns a proxy object that provides const access to the array's data without bounds or - * dimensionality checking. Unlike `unchecked()`, this does not require that the underlying - * array have the `writable` flag. Use with care: the array must not be destroyed or reshaped - * for the duration of the returned object, and the caller must take care not to access invalid - * dimensions or dimension indices. - */ - template detail::unchecked_reference unchecked() const { - return array::unchecked(); - } - - /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert - /// it). In case of an error, nullptr is returned and the Python error is cleared. - static array_t ensure(handle h) { - auto result = reinterpret_steal(raw_array_t(h.ptr())); - if (!result) - PyErr_Clear(); - return result; - } - - static bool check_(handle h) { - const auto &api = detail::npy_api::get(); - return api.PyArray_Check_(h.ptr()) - && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of().ptr()); - } - -protected: - /// Create array from any object -- always returns a new reference - static PyObject *raw_array_t(PyObject *ptr) { - if (ptr == nullptr) { - PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr"); - return nullptr; - } - return detail::npy_api::get().PyArray_FromAny_( - ptr, dtype::of().release().ptr(), 0, 0, - detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr); - } -}; - -template -struct format_descriptor::value>> { - static std::string format() { - return detail::npy_format_descriptor::type>::format(); - } -}; - -template struct format_descriptor { - static std::string format() { return std::to_string(N) + "s"; } -}; -template struct format_descriptor> { - static std::string format() { return std::to_string(N) + "s"; } -}; - -template -struct format_descriptor::value>> { - static std::string format() { - return format_descriptor< - typename std::remove_cv::type>::type>::format(); - } -}; - -template -struct format_descriptor::is_array>> { - static std::string format() { - using detail::_; - PYBIND11_DESCR extents = _("(") + detail::array_info::extents() + _(")"); - return extents.text() + format_descriptor>::format(); - } -}; - -NAMESPACE_BEGIN(detail) -template -struct pyobject_caster> { - using type = array_t; - - bool load(handle src, bool convert) { - if (!convert && !type::check_(src)) - return false; - value = type::ensure(src); - return static_cast(value); - } - - static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) { - return src.inc_ref(); - } - PYBIND11_TYPE_CASTER(type, handle_type_name::name()); -}; - -template -struct compare_buffer_info::value>> { - static bool compare(const buffer_info& b) { - return npy_api::get().PyArray_EquivTypes_(dtype::of().ptr(), dtype(b).ptr()); - } -}; - -template struct npy_format_descriptor::value>> { -private: - // NB: the order here must match the one in common.h - constexpr static const int values[15] = { - npy_api::NPY_BOOL_, - npy_api::NPY_BYTE_, npy_api::NPY_UBYTE_, npy_api::NPY_SHORT_, npy_api::NPY_USHORT_, - npy_api::NPY_INT_, npy_api::NPY_UINT_, npy_api::NPY_LONGLONG_, npy_api::NPY_ULONGLONG_, - npy_api::NPY_FLOAT_, npy_api::NPY_DOUBLE_, npy_api::NPY_LONGDOUBLE_, - npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_ - }; - -public: - static constexpr int value = values[detail::is_fmt_numeric::index]; - - static pybind11::dtype dtype() { - if (auto ptr = npy_api::get().PyArray_DescrFromType_(value)) - return reinterpret_borrow(ptr); - pybind11_fail("Unsupported buffer format!"); - } - template ::value, int> = 0> - static PYBIND11_DESCR name() { - return _::value>(_("bool"), - _::value>("int", "uint") + _()); - } - template ::value, int> = 0> - static PYBIND11_DESCR name() { - return _::value || std::is_same::value>( - _("float") + _(), _("longdouble")); - } - template ::value, int> = 0> - static PYBIND11_DESCR name() { - return _::value || std::is_same::value>( - _("complex") + _(), _("longcomplex")); - } -}; - -#define PYBIND11_DECL_CHAR_FMT \ - static PYBIND11_DESCR name() { return _("S") + _(); } \ - static pybind11::dtype dtype() { return pybind11::dtype(std::string("S") + std::to_string(N)); } -template struct npy_format_descriptor { PYBIND11_DECL_CHAR_FMT }; -template struct npy_format_descriptor> { PYBIND11_DECL_CHAR_FMT }; -#undef PYBIND11_DECL_CHAR_FMT - -template struct npy_format_descriptor::is_array>> { -private: - using base_descr = npy_format_descriptor::type>; -public: - static_assert(!array_info::is_empty, "Zero-sized arrays are not supported"); - - static PYBIND11_DESCR name() { return _("(") + array_info::extents() + _(")") + base_descr::name(); } - static pybind11::dtype dtype() { - list shape; - array_info::append_extents(shape); - return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape)); - } -}; - -template struct npy_format_descriptor::value>> { -private: - using base_descr = npy_format_descriptor::type>; -public: - static PYBIND11_DESCR name() { return base_descr::name(); } - static pybind11::dtype dtype() { return base_descr::dtype(); } -}; - -struct field_descriptor { - const char *name; - ssize_t offset; - ssize_t size; - std::string format; - dtype descr; -}; - -inline PYBIND11_NOINLINE void register_structured_dtype( - const std::initializer_list& fields, - const std::type_info& tinfo, ssize_t itemsize, - bool (*direct_converter)(PyObject *, void *&)) { - - auto& numpy_internals = get_numpy_internals(); - if (numpy_internals.get_type_info(tinfo, false)) - pybind11_fail("NumPy: dtype is already registered"); - - list names, formats, offsets; - for (auto field : fields) { - if (!field.descr) - pybind11_fail(std::string("NumPy: unsupported field dtype: `") + - field.name + "` @ " + tinfo.name()); - names.append(PYBIND11_STR_TYPE(field.name)); - formats.append(field.descr); - offsets.append(pybind11::int_(field.offset)); - } - auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr(); - - // There is an existing bug in NumPy (as of v1.11): trailing bytes are - // not encoded explicitly into the format string. This will supposedly - // get fixed in v1.12; for further details, see these: - // - https://github.com/numpy/numpy/issues/7797 - // - https://github.com/numpy/numpy/pull/7798 - // Because of this, we won't use numpy's logic to generate buffer format - // strings and will just do it ourselves. - std::vector ordered_fields(fields); - std::sort(ordered_fields.begin(), ordered_fields.end(), - [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; }); - ssize_t offset = 0; - std::ostringstream oss; - // mark the structure as unaligned with '^', because numpy and C++ don't - // always agree about alignment (particularly for complex), and we're - // explicitly listing all our padding. This depends on none of the fields - // overriding the endianness. Putting the ^ in front of individual fields - // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049 - oss << "^T{"; - for (auto& field : ordered_fields) { - if (field.offset > offset) - oss << (field.offset - offset) << 'x'; - oss << field.format << ':' << field.name << ':'; - offset = field.offset + field.size; - } - if (itemsize > offset) - oss << (itemsize - offset) << 'x'; - oss << '}'; - auto format_str = oss.str(); - - // Sanity check: verify that NumPy properly parses our buffer format string - auto& api = npy_api::get(); - auto arr = array(buffer_info(nullptr, itemsize, format_str, 1)); - if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr())) - pybind11_fail("NumPy: invalid buffer descriptor!"); - - auto tindex = std::type_index(tinfo); - numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str }; - get_internals().direct_conversions[tindex].push_back(direct_converter); -} - -template struct npy_format_descriptor { - static_assert(is_pod_struct::value, "Attempt to use a non-POD or unimplemented POD type as a numpy dtype"); - - static PYBIND11_DESCR name() { return make_caster::name(); } - - static pybind11::dtype dtype() { - return reinterpret_borrow(dtype_ptr()); - } - - static std::string format() { - static auto format_str = get_numpy_internals().get_type_info(true)->format_str; - return format_str; - } - - static void register_dtype(const std::initializer_list& fields) { - register_structured_dtype(fields, typeid(typename std::remove_cv::type), - sizeof(T), &direct_converter); - } - -private: - static PyObject* dtype_ptr() { - static PyObject* ptr = get_numpy_internals().get_type_info(true)->dtype_ptr; - return ptr; - } - - static bool direct_converter(PyObject *obj, void*& value) { - auto& api = npy_api::get(); - if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_)) - return false; - if (auto descr = reinterpret_steal(api.PyArray_DescrFromScalar_(obj))) { - if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) { - value = ((PyVoidScalarObject_Proxy *) obj)->obval; - return true; - } - } - return false; - } -}; - -#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code) -# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0) -# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0) -#else - -#define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \ - ::pybind11::detail::field_descriptor { \ - Name, offsetof(T, Field), sizeof(decltype(std::declval().Field)), \ - ::pybind11::format_descriptor().Field)>::format(), \ - ::pybind11::detail::npy_format_descriptor().Field)>::dtype() \ - } - -// Extract name, offset and format descriptor for a struct field -#define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field) - -// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro -// (C) William Swanson, Paul Fultz -#define PYBIND11_EVAL0(...) __VA_ARGS__ -#define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__))) -#define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__))) -#define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__))) -#define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__))) -#define PYBIND11_EVAL(...) PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__))) -#define PYBIND11_MAP_END(...) -#define PYBIND11_MAP_OUT -#define PYBIND11_MAP_COMMA , -#define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END -#define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT -#define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0) -#define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next) -#ifdef _MSC_VER // MSVC is not as eager to expand macros, hence this workaround -#define PYBIND11_MAP_LIST_NEXT1(test, next) \ - PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)) -#else -#define PYBIND11_MAP_LIST_NEXT1(test, next) \ - PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0) -#endif -#define PYBIND11_MAP_LIST_NEXT(test, next) \ - PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next) -#define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \ - f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__) -#define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \ - f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__) -// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ... -#define PYBIND11_MAP_LIST(f, t, ...) \ - PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0)) - -#define PYBIND11_NUMPY_DTYPE(Type, ...) \ - ::pybind11::detail::npy_format_descriptor::register_dtype \ - ({PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)}) - -#ifdef _MSC_VER -#define PYBIND11_MAP2_LIST_NEXT1(test, next) \ - PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)) -#else -#define PYBIND11_MAP2_LIST_NEXT1(test, next) \ - PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0) -#endif -#define PYBIND11_MAP2_LIST_NEXT(test, next) \ - PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next) -#define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \ - f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__) -#define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \ - f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__) -// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ... -#define PYBIND11_MAP2_LIST(f, t, ...) \ - PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0)) - -#define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \ - ::pybind11::detail::npy_format_descriptor::register_dtype \ - ({PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)}) - -#endif // __CLION_IDE__ - -template -using array_iterator = typename std::add_pointer::type; - -template -array_iterator array_begin(const buffer_info& buffer) { - return array_iterator(reinterpret_cast(buffer.ptr)); -} - -template -array_iterator array_end(const buffer_info& buffer) { - return array_iterator(reinterpret_cast(buffer.ptr) + buffer.size); -} - -class common_iterator { -public: - using container_type = std::vector; - using value_type = container_type::value_type; - using size_type = container_type::size_type; - - common_iterator() : p_ptr(0), m_strides() {} - - common_iterator(void* ptr, const container_type& strides, const container_type& shape) - : p_ptr(reinterpret_cast(ptr)), m_strides(strides.size()) { - m_strides.back() = static_cast(strides.back()); - for (size_type i = m_strides.size() - 1; i != 0; --i) { - size_type j = i - 1; - value_type s = static_cast(shape[i]); - m_strides[j] = strides[j] + m_strides[i] - strides[i] * s; - } - } - - void increment(size_type dim) { - p_ptr += m_strides[dim]; - } - - void* data() const { - return p_ptr; - } - -private: - char* p_ptr; - container_type m_strides; -}; - -template class multi_array_iterator { -public: - using container_type = std::vector; - - multi_array_iterator(const std::array &buffers, - const container_type &shape) - : m_shape(shape.size()), m_index(shape.size(), 0), - m_common_iterator() { - - // Manual copy to avoid conversion warning if using std::copy - for (size_t i = 0; i < shape.size(); ++i) - m_shape[i] = shape[i]; - - container_type strides(shape.size()); - for (size_t i = 0; i < N; ++i) - init_common_iterator(buffers[i], shape, m_common_iterator[i], strides); - } - - multi_array_iterator& operator++() { - for (size_t j = m_index.size(); j != 0; --j) { - size_t i = j - 1; - if (++m_index[i] != m_shape[i]) { - increment_common_iterator(i); - break; - } else { - m_index[i] = 0; - } - } - return *this; - } - - template T* data() const { - return reinterpret_cast(m_common_iterator[K].data()); - } - -private: - - using common_iter = common_iterator; - - void init_common_iterator(const buffer_info &buffer, - const container_type &shape, - common_iter &iterator, - container_type &strides) { - auto buffer_shape_iter = buffer.shape.rbegin(); - auto buffer_strides_iter = buffer.strides.rbegin(); - auto shape_iter = shape.rbegin(); - auto strides_iter = strides.rbegin(); - - while (buffer_shape_iter != buffer.shape.rend()) { - if (*shape_iter == *buffer_shape_iter) - *strides_iter = *buffer_strides_iter; - else - *strides_iter = 0; - - ++buffer_shape_iter; - ++buffer_strides_iter; - ++shape_iter; - ++strides_iter; - } - - std::fill(strides_iter, strides.rend(), 0); - iterator = common_iter(buffer.ptr, strides, shape); - } - - void increment_common_iterator(size_t dim) { - for (auto &iter : m_common_iterator) - iter.increment(dim); - } - - container_type m_shape; - container_type m_index; - std::array m_common_iterator; -}; - -enum class broadcast_trivial { non_trivial, c_trivial, f_trivial }; - -// Populates the shape and number of dimensions for the set of buffers. Returns a broadcast_trivial -// enum value indicating whether the broadcast is "trivial"--that is, has each buffer being either a -// singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage -// buffer; returns `non_trivial` otherwise. -template -broadcast_trivial broadcast(const std::array &buffers, ssize_t &ndim, std::vector &shape) { - ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) { - return std::max(res, buf.ndim); - }); - - shape.clear(); - shape.resize((size_t) ndim, 1); - - // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or - // the full size). - for (size_t i = 0; i < N; ++i) { - auto res_iter = shape.rbegin(); - auto end = buffers[i].shape.rend(); - for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) { - const auto &dim_size_in = *shape_iter; - auto &dim_size_out = *res_iter; - - // Each input dimension can either be 1 or `n`, but `n` values must match across buffers - if (dim_size_out == 1) - dim_size_out = dim_size_in; - else if (dim_size_in != 1 && dim_size_in != dim_size_out) - pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!"); - } - } - - bool trivial_broadcast_c = true; - bool trivial_broadcast_f = true; - for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) { - if (buffers[i].size == 1) - continue; - - // Require the same number of dimensions: - if (buffers[i].ndim != ndim) - return broadcast_trivial::non_trivial; - - // Require all dimensions be full-size: - if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin())) - return broadcast_trivial::non_trivial; - - // Check for C contiguity (but only if previous inputs were also C contiguous) - if (trivial_broadcast_c) { - ssize_t expect_stride = buffers[i].itemsize; - auto end = buffers[i].shape.crend(); - for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin(); - trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) { - if (expect_stride == *stride_iter) - expect_stride *= *shape_iter; - else - trivial_broadcast_c = false; - } - } - - // Check for Fortran contiguity (if previous inputs were also F contiguous) - if (trivial_broadcast_f) { - ssize_t expect_stride = buffers[i].itemsize; - auto end = buffers[i].shape.cend(); - for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin(); - trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) { - if (expect_stride == *stride_iter) - expect_stride *= *shape_iter; - else - trivial_broadcast_f = false; - } - } - } - - return - trivial_broadcast_c ? broadcast_trivial::c_trivial : - trivial_broadcast_f ? broadcast_trivial::f_trivial : - broadcast_trivial::non_trivial; -} - -template -struct vectorize_arg { - static_assert(!std::is_rvalue_reference::value, "Functions with rvalue reference arguments cannot be vectorized"); - // The wrapped function gets called with this type: - using call_type = remove_reference_t; - // Is this a vectorized argument? - static constexpr bool vectorize = - satisfies_any_of::value && - satisfies_none_of::value && - (!std::is_reference::value || - (std::is_lvalue_reference::value && std::is_const::value)); - // Accept this type: an array for vectorized types, otherwise the type as-is: - using type = conditional_t, array::forcecast>, T>; -}; - -template -struct vectorize_helper { -private: - static constexpr size_t N = sizeof...(Args); - static constexpr size_t NVectorized = constexpr_sum(vectorize_arg::vectorize...); - static_assert(NVectorized >= 1, - "pybind11::vectorize(...) requires a function with at least one vectorizable argument"); - -public: - template - explicit vectorize_helper(T &&f) : f(std::forward(f)) { } - - object operator()(typename vectorize_arg::type... args) { - return run(args..., - make_index_sequence(), - select_indices::vectorize...>(), - make_index_sequence()); - } - -private: - remove_reference_t f; - - template using param_n_t = typename pack_element::call_type...>::type; - - // Runs a vectorized function given arguments tuple and three index sequences: - // - Index is the full set of 0 ... (N-1) argument indices; - // - VIndex is the subset of argument indices with vectorized parameters, letting us access - // vectorized arguments (anything not in this sequence is passed through) - // - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that - // we can store vectorized buffer_infos in an array (argument VIndex has its buffer at - // index BIndex in the array). - template object run( - typename vectorize_arg::type &...args, - index_sequence i_seq, index_sequence vi_seq, index_sequence bi_seq) { - - // Pointers to values the function was called with; the vectorized ones set here will start - // out as array_t pointers, but they will be changed them to T pointers before we make - // call the wrapped function. Non-vectorized pointers are left as-is. - std::array params{{ &args... }}; - - // The array of `buffer_info`s of vectorized arguments: - std::array buffers{{ reinterpret_cast(params[VIndex])->request()... }}; - - /* Determine dimensions parameters of output array */ - ssize_t nd = 0; - std::vector shape(0); - auto trivial = broadcast(buffers, nd, shape); - size_t ndim = (size_t) nd; - - size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies()); - - // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e. - // not wrapped in an array). - if (size == 1 && ndim == 0) { - PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr); - return cast(f(*reinterpret_cast *>(params[Index])...)); - } - - array_t result; - if (trivial == broadcast_trivial::f_trivial) result = array_t(shape); - else result = array_t(shape); - - if (size == 0) return result; - - /* Call the function */ - if (trivial == broadcast_trivial::non_trivial) - apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq); - else - apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq); - - return result; - } - - template - void apply_trivial(std::array &buffers, - std::array ¶ms, - Return *out, - size_t size, - index_sequence, index_sequence, index_sequence) { - - // Initialize an array of mutable byte references and sizes with references set to the - // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size - // (except for singletons, which get an increment of 0). - std::array, NVectorized> vecparams{{ - std::pair( - reinterpret_cast(params[VIndex] = buffers[BIndex].ptr), - buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t) - )... - }}; - - for (size_t i = 0; i < size; ++i) { - out[i] = f(*reinterpret_cast *>(params[Index])...); - for (auto &x : vecparams) x.first += x.second; - } - } - - template - void apply_broadcast(std::array &buffers, - std::array ¶ms, - array_t &output_array, - index_sequence, index_sequence, index_sequence) { - - buffer_info output = output_array.request(); - multi_array_iterator input_iter(buffers, output.shape); - - for (array_iterator iter = array_begin(output), end = array_end(output); - iter != end; - ++iter, ++input_iter) { - PYBIND11_EXPAND_SIDE_EFFECTS(( - params[VIndex] = input_iter.template data() - )); - *iter = f(*reinterpret_cast *>(std::get(params))...); - } - } -}; - -template -vectorize_helper -vectorize_extractor(const Func &f, Return (*) (Args ...)) { - return detail::vectorize_helper(f); -} - -template struct handle_type_name> { - static PYBIND11_DESCR name() { - return _("numpy.ndarray[") + npy_format_descriptor::name() + _("]"); - } -}; - -NAMESPACE_END(detail) - -// Vanilla pointer vectorizer: -template -detail::vectorize_helper -vectorize(Return (*f) (Args ...)) { - return detail::vectorize_helper(f); -} - -// lambda vectorizer: -template ::operator())>::type> -auto vectorize(Func &&f) -> decltype( - detail::vectorize_extractor(std::forward(f), (FuncType *) nullptr)) { - return detail::vectorize_extractor(std::forward(f), (FuncType *) nullptr); -} - -// Vectorize a class method (non-const): -template ())), Return, Class *, Args...>> -Helper vectorize(Return (Class::*f)(Args...)) { - return Helper(std::mem_fn(f)); -} - -// Vectorize a class method (non-const): -template ())), Return, const Class *, Args...>> -Helper vectorize(Return (Class::*f)(Args...) const) { - return Helper(std::mem_fn(f)); -} - -NAMESPACE_END(pybind11) - -#if defined(_MSC_VER) -#pragma warning(pop) -#endif diff --git a/lanms/include/pybind11/operators.h b/lanms/include/pybind11/operators.h deleted file mode 100644 index 562987b8..00000000 --- a/lanms/include/pybind11/operators.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - pybind11/operator.h: Metatemplates for operator overloading - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" - -#if defined(__clang__) && !defined(__INTEL_COMPILER) -# pragma clang diagnostic ignored "-Wunsequenced" // multiple unsequenced modifications to 'self' (when using def(py::self OP Type())) -#elif defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant -#endif - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -/// Enumeration with all supported operator types -enum op_id : int { - op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift, - op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert, - op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le, - op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift, - op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero, - op_repr, op_truediv, op_itruediv -}; - -enum op_type : int { - op_l, /* base type on left */ - op_r, /* base type on right */ - op_u /* unary operator */ -}; - -struct self_t { }; -static const self_t self = self_t(); - -/// Type for an unused type slot -struct undefined_t { }; - -/// Don't warn about an unused variable -inline self_t __self() { return self; } - -/// base template of operator implementations -template struct op_impl { }; - -/// Operator implementation generator -template struct op_ { - template void execute(Class &cl, const Extra&... extra) const { - using Base = typename Class::type; - using L_type = conditional_t::value, Base, L>; - using R_type = conditional_t::value, Base, R>; - using op = op_impl; - cl.def(op::name(), &op::execute, is_operator(), extra...); - #if PY_MAJOR_VERSION < 3 - if (id == op_truediv || id == op_itruediv) - cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", - &op::execute, is_operator(), extra...); - #endif - } - template void execute_cast(Class &cl, const Extra&... extra) const { - using Base = typename Class::type; - using L_type = conditional_t::value, Base, L>; - using R_type = conditional_t::value, Base, R>; - using op = op_impl; - cl.def(op::name(), &op::execute_cast, is_operator(), extra...); - #if PY_MAJOR_VERSION < 3 - if (id == op_truediv || id == op_itruediv) - cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", - &op::execute, is_operator(), extra...); - #endif - } -}; - -#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \ -template struct op_impl { \ - static char const* name() { return "__" #id "__"; } \ - static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \ - static B execute_cast(const L &l, const R &r) { return B(expr); } \ -}; \ -template struct op_impl { \ - static char const* name() { return "__" #rid "__"; } \ - static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \ - static B execute_cast(const R &r, const L &l) { return B(expr); } \ -}; \ -inline op_ op(const self_t &, const self_t &) { \ - return op_(); \ -} \ -template op_ op(const self_t &, const T &) { \ - return op_(); \ -} \ -template op_ op(const T &, const self_t &) { \ - return op_(); \ -} - -#define PYBIND11_INPLACE_OPERATOR(id, op, expr) \ -template struct op_impl { \ - static char const* name() { return "__" #id "__"; } \ - static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \ - static B execute_cast(L &l, const R &r) { return B(expr); } \ -}; \ -template op_ op(const self_t &, const T &) { \ - return op_(); \ -} - -#define PYBIND11_UNARY_OPERATOR(id, op, expr) \ -template struct op_impl { \ - static char const* name() { return "__" #id "__"; } \ - static auto execute(const L &l) -> decltype(expr) { return expr; } \ - static B execute_cast(const L &l) { return B(expr); } \ -}; \ -inline op_ op(const self_t &) { \ - return op_(); \ -} - -PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r) -PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r) -PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l * r) -PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r) -PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r) -PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r) -PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r) -PYBIND11_BINARY_OPERATOR(and, rand, operator&, l & r) -PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r) -PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r) -PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r) -PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r) -PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r) -PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r) -PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r) -PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r) -//PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r)) -PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r) -PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r) -PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r) -PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r) -PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r) -PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r) -PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r) -PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r) -PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r) -PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r) -PYBIND11_UNARY_OPERATOR(neg, operator-, -l) -PYBIND11_UNARY_OPERATOR(pos, operator+, +l) -PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l)) -PYBIND11_UNARY_OPERATOR(invert, operator~, (~l)) -PYBIND11_UNARY_OPERATOR(bool, operator!, !!l) -PYBIND11_UNARY_OPERATOR(int, int_, (int) l) -PYBIND11_UNARY_OPERATOR(float, float_, (double) l) - -#undef PYBIND11_BINARY_OPERATOR -#undef PYBIND11_INPLACE_OPERATOR -#undef PYBIND11_UNARY_OPERATOR -NAMESPACE_END(detail) - -using detail::self; - -NAMESPACE_END(pybind11) - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif diff --git a/lanms/include/pybind11/options.h b/lanms/include/pybind11/options.h deleted file mode 100644 index 3105551d..00000000 --- a/lanms/include/pybind11/options.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - pybind11/options.h: global settings that are configurable at runtime. - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "common.h" - -NAMESPACE_BEGIN(pybind11) - -class options { -public: - - // Default RAII constructor, which leaves settings as they currently are. - options() : previous_state(global_state()) {} - - // Class is non-copyable. - options(const options&) = delete; - options& operator=(const options&) = delete; - - // Destructor, which restores settings that were in effect before. - ~options() { - global_state() = previous_state; - } - - // Setter methods (affect the global state): - - options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; } - - options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; } - - options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; } - - options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; } - - // Getter methods (return the global state): - - static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; } - - static bool show_function_signatures() { return global_state().show_function_signatures; } - - // This type is not meant to be allocated on the heap. - void* operator new(size_t) = delete; - -private: - - struct state { - bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings. - bool show_function_signatures = true; //< Include auto-generated function signatures in docstrings. - }; - - static state &global_state() { - static state instance; - return instance; - } - - state previous_state; -}; - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/pybind11.h b/lanms/include/pybind11/pybind11.h deleted file mode 100644 index d3f34ee6..00000000 --- a/lanms/include/pybind11/pybind11.h +++ /dev/null @@ -1,1869 +0,0 @@ -/* - pybind11/pybind11.h: Main header file of the C++11 python - binding generator library - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable: 4100) // warning C4100: Unreferenced formal parameter -# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant -# pragma warning(disable: 4512) // warning C4512: Assignment operator was implicitly defined as deleted -# pragma warning(disable: 4800) // warning C4800: 'int': forcing value to bool 'true' or 'false' (performance warning) -# pragma warning(disable: 4996) // warning C4996: The POSIX name for this item is deprecated. Instead, use the ISO C and C++ conformant name -# pragma warning(disable: 4702) // warning C4702: unreachable code -# pragma warning(disable: 4522) // warning C4522: multiple assignment operators specified -#elif defined(__INTEL_COMPILER) -# pragma warning(push) -# pragma warning(disable: 68) // integer conversion resulted in a change of sign -# pragma warning(disable: 186) // pointless comparison of unsigned integer with zero -# pragma warning(disable: 878) // incompatible exception specifications -# pragma warning(disable: 1334) // the "template" keyword used for syntactic disambiguation may only be used within a template -# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) -# pragma warning(disable: 1875) // offsetof applied to non-POD (Plain Old Data) types is nonstandard -# pragma warning(disable: 2196) // warning #2196: routine is both "inline" and "noinline" -#elif defined(__GNUG__) && !defined(__clang__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wunused-but-set-parameter" -# pragma GCC diagnostic ignored "-Wunused-but-set-variable" -# pragma GCC diagnostic ignored "-Wmissing-field-initializers" -# pragma GCC diagnostic ignored "-Wstrict-aliasing" -# pragma GCC diagnostic ignored "-Wattributes" -# if __GNUC__ >= 7 -# pragma GCC diagnostic ignored "-Wnoexcept-type" -# endif -#endif - -#include "attr.h" -#include "options.h" -#include "class_support.h" - -NAMESPACE_BEGIN(pybind11) - -/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object -class cpp_function : public function { -public: - cpp_function() { } - - /// Construct a cpp_function from a vanilla function pointer - template - cpp_function(Return (*f)(Args...), const Extra&... extra) { - initialize(f, f, extra...); - } - - /// Construct a cpp_function from a lambda function (possibly with internal state) - template , - std::is_function, std::is_pointer, std::is_member_pointer - >::value> - > - cpp_function(Func &&f, const Extra&... extra) { - using FuncType = typename detail::remove_class::operator())>::type; - initialize(std::forward(f), - (FuncType *) nullptr, extra...); - } - - /// Construct a cpp_function from a class method (non-const) - template - cpp_function(Return (Class::*f)(Arg...), const Extra&... extra) { - initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(args...); }, - (Return (*) (Class *, Arg...)) nullptr, extra...); - } - - /// Construct a cpp_function from a class method (const) - template - cpp_function(Return (Class::*f)(Arg...) const, const Extra&... extra) { - initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(args...); }, - (Return (*)(const Class *, Arg ...)) nullptr, extra...); - } - - /// Return the function name - object name() const { return attr("__name__"); } - -protected: - /// Space optimization: don't inline this frequently instantiated fragment - PYBIND11_NOINLINE detail::function_record *make_function_record() { - return new detail::function_record(); - } - - /// Special internal constructor for functors, lambda functions, etc. - template - void initialize(Func &&f, Return (*)(Args...), const Extra&... extra) { - - struct capture { detail::remove_reference_t f; }; - - /* Store the function including any extra state it might have (e.g. a lambda capture object) */ - auto rec = make_function_record(); - - /* Store the capture object directly in the function record if there is enough space */ - if (sizeof(capture) <= sizeof(rec->data)) { - /* Without these pragmas, GCC warns that there might not be - enough space to use the placement new operator. However, the - 'if' statement above ensures that this is the case. */ -#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6 -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wplacement-new" -#endif - new ((capture *) &rec->data) capture { std::forward(f) }; -#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6 -# pragma GCC diagnostic pop -#endif - if (!std::is_trivially_destructible::value) - rec->free_data = [](detail::function_record *r) { ((capture *) &r->data)->~capture(); }; - } else { - rec->data[0] = new capture { std::forward(f) }; - rec->free_data = [](detail::function_record *r) { delete ((capture *) r->data[0]); }; - } - - /* Type casters for the function arguments and return value */ - using cast_in = detail::argument_loader; - using cast_out = detail::make_caster< - detail::conditional_t::value, detail::void_type, Return> - >; - - static_assert(detail::expected_num_args(sizeof...(Args), cast_in::has_args, cast_in::has_kwargs), - "The number of argument annotations does not match the number of function arguments"); - - /* Dispatch code which converts function arguments and performs the actual function call */ - rec->impl = [](detail::function_call &call) -> handle { - cast_in args_converter; - - /* Try to cast the function arguments into the C++ domain */ - if (!args_converter.load_args(call)) - return PYBIND11_TRY_NEXT_OVERLOAD; - - /* Invoke call policy pre-call hook */ - detail::process_attributes::precall(call); - - /* Get a pointer to the capture object */ - auto data = (sizeof(capture) <= sizeof(call.func.data) - ? &call.func.data : call.func.data[0]); - capture *cap = const_cast(reinterpret_cast(data)); - - /* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */ - const auto policy = detail::return_value_policy_override::policy(call.func.policy); - - /* Function scope guard -- defaults to the compile-to-nothing `void_type` */ - using Guard = detail::extract_guard_t; - - /* Perform the function call */ - handle result = cast_out::cast( - std::move(args_converter).template call(cap->f), policy, call.parent); - - /* Invoke call policy post-call hook */ - detail::process_attributes::postcall(call, result); - - return result; - }; - - /* Process any user-provided function attributes */ - detail::process_attributes::init(extra..., rec); - - /* Generate a readable signature describing the function's arguments and return value types */ - using detail::descr; using detail::_; - PYBIND11_DESCR signature = _("(") + cast_in::arg_names() + _(") -> ") + cast_out::name(); - - /* Register the function with Python from generic (non-templated) code */ - initialize_generic(rec, signature.text(), signature.types(), sizeof...(Args)); - - if (cast_in::has_args) rec->has_args = true; - if (cast_in::has_kwargs) rec->has_kwargs = true; - - /* Stash some additional information used by an important optimization in 'functional.h' */ - using FunctionType = Return (*)(Args...); - constexpr bool is_function_ptr = - std::is_convertible::value && - sizeof(capture) == sizeof(void *); - if (is_function_ptr) { - rec->is_stateless = true; - rec->data[1] = const_cast(reinterpret_cast(&typeid(FunctionType))); - } - } - - /// Register a function call with Python (generic non-templated code goes here) - void initialize_generic(detail::function_record *rec, const char *text, - const std::type_info *const *types, size_t args) { - - /* Create copies of all referenced C-style strings */ - rec->name = strdup(rec->name ? rec->name : ""); - if (rec->doc) rec->doc = strdup(rec->doc); - for (auto &a: rec->args) { - if (a.name) - a.name = strdup(a.name); - if (a.descr) - a.descr = strdup(a.descr); - else if (a.value) - a.descr = strdup(a.value.attr("__repr__")().cast().c_str()); - } - - /* Generate a proper function signature */ - std::string signature; - size_t type_depth = 0, char_index = 0, type_index = 0, arg_index = 0; - while (true) { - char c = text[char_index++]; - if (c == '\0') - break; - - if (c == '{') { - // Write arg name for everything except *args, **kwargs and return type. - if (type_depth == 0 && text[char_index] != '*' && arg_index < args) { - if (!rec->args.empty() && rec->args[arg_index].name) { - signature += rec->args[arg_index].name; - } else if (arg_index == 0 && rec->is_method) { - signature += "self"; - } else { - signature += "arg" + std::to_string(arg_index - (rec->is_method ? 1 : 0)); - } - signature += ": "; - } - ++type_depth; - } else if (c == '}') { - --type_depth; - if (type_depth == 0) { - if (arg_index < rec->args.size() && rec->args[arg_index].descr) { - signature += "="; - signature += rec->args[arg_index].descr; - } - arg_index++; - } - } else if (c == '%') { - const std::type_info *t = types[type_index++]; - if (!t) - pybind11_fail("Internal error while parsing type signature (1)"); - if (auto tinfo = detail::get_type_info(*t)) { -#if defined(PYPY_VERSION) - signature += handle((PyObject *) tinfo->type) - .attr("__module__") - .cast() + "."; -#endif - signature += tinfo->type->tp_name; - } else { - std::string tname(t->name()); - detail::clean_type_id(tname); - signature += tname; - } - } else { - signature += c; - } - } - if (type_depth != 0 || types[type_index] != nullptr) - pybind11_fail("Internal error while parsing type signature (2)"); - - #if !defined(PYBIND11_CONSTEXPR_DESCR) - delete[] types; - delete[] text; - #endif - -#if PY_MAJOR_VERSION < 3 - if (strcmp(rec->name, "__next__") == 0) { - std::free(rec->name); - rec->name = strdup("next"); - } else if (strcmp(rec->name, "__bool__") == 0) { - std::free(rec->name); - rec->name = strdup("__nonzero__"); - } -#endif - rec->signature = strdup(signature.c_str()); - rec->args.shrink_to_fit(); - rec->is_constructor = !strcmp(rec->name, "__init__") || !strcmp(rec->name, "__setstate__"); - rec->nargs = (std::uint16_t) args; - - if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr())) - rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr()); - - detail::function_record *chain = nullptr, *chain_start = rec; - if (rec->sibling) { - if (PyCFunction_Check(rec->sibling.ptr())) { - auto rec_capsule = reinterpret_borrow(PyCFunction_GET_SELF(rec->sibling.ptr())); - chain = (detail::function_record *) rec_capsule; - /* Never append a method to an overload chain of a parent class; - instead, hide the parent's overloads in this case */ - if (!chain->scope.is(rec->scope)) - chain = nullptr; - } - // Don't trigger for things like the default __init__, which are wrapper_descriptors that we are intentionally replacing - else if (!rec->sibling.is_none() && rec->name[0] != '_') - pybind11_fail("Cannot overload existing non-function object \"" + std::string(rec->name) + - "\" with a function of the same name"); - } - - if (!chain) { - /* No existing overload was found, create a new function object */ - rec->def = new PyMethodDef(); - std::memset(rec->def, 0, sizeof(PyMethodDef)); - rec->def->ml_name = rec->name; - rec->def->ml_meth = reinterpret_cast(*dispatcher); - rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS; - - capsule rec_capsule(rec, [](void *ptr) { - destruct((detail::function_record *) ptr); - }); - - object scope_module; - if (rec->scope) { - if (hasattr(rec->scope, "__module__")) { - scope_module = rec->scope.attr("__module__"); - } else if (hasattr(rec->scope, "__name__")) { - scope_module = rec->scope.attr("__name__"); - } - } - - m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr()); - if (!m_ptr) - pybind11_fail("cpp_function::cpp_function(): Could not allocate function object"); - } else { - /* Append at the end of the overload chain */ - m_ptr = rec->sibling.ptr(); - inc_ref(); - chain_start = chain; - if (chain->is_method != rec->is_method) - pybind11_fail("overloading a method with both static and instance methods is not supported; " - #if defined(NDEBUG) - "compile in debug mode for more details" - #else - "error while attempting to bind " + std::string(rec->is_method ? "instance" : "static") + " method " + - std::string(pybind11::str(rec->scope.attr("__name__"))) + "." + std::string(rec->name) + signature - #endif - ); - while (chain->next) - chain = chain->next; - chain->next = rec; - } - - std::string signatures; - int index = 0; - /* Create a nice pydoc rec including all signatures and - docstrings of the functions in the overload chain */ - if (chain && options::show_function_signatures()) { - // First a generic signature - signatures += rec->name; - signatures += "(*args, **kwargs)\n"; - signatures += "Overloaded function.\n\n"; - } - // Then specific overload signatures - bool first_user_def = true; - for (auto it = chain_start; it != nullptr; it = it->next) { - if (options::show_function_signatures()) { - if (index > 0) signatures += "\n"; - if (chain) - signatures += std::to_string(++index) + ". "; - signatures += rec->name; - signatures += it->signature; - signatures += "\n"; - } - if (it->doc && strlen(it->doc) > 0 && options::show_user_defined_docstrings()) { - // If we're appending another docstring, and aren't printing function signatures, we - // need to append a newline first: - if (!options::show_function_signatures()) { - if (first_user_def) first_user_def = false; - else signatures += "\n"; - } - if (options::show_function_signatures()) signatures += "\n"; - signatures += it->doc; - if (options::show_function_signatures()) signatures += "\n"; - } - } - - /* Install docstring */ - PyCFunctionObject *func = (PyCFunctionObject *) m_ptr; - if (func->m_ml->ml_doc) - std::free(const_cast(func->m_ml->ml_doc)); - func->m_ml->ml_doc = strdup(signatures.c_str()); - - if (rec->is_method) { - m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr()); - if (!m_ptr) - pybind11_fail("cpp_function::cpp_function(): Could not allocate instance method object"); - Py_DECREF(func); - } - } - - /// When a cpp_function is GCed, release any memory allocated by pybind11 - static void destruct(detail::function_record *rec) { - while (rec) { - detail::function_record *next = rec->next; - if (rec->free_data) - rec->free_data(rec); - std::free((char *) rec->name); - std::free((char *) rec->doc); - std::free((char *) rec->signature); - for (auto &arg: rec->args) { - std::free(const_cast(arg.name)); - std::free(const_cast(arg.descr)); - arg.value.dec_ref(); - } - if (rec->def) { - std::free(const_cast(rec->def->ml_doc)); - delete rec->def; - } - delete rec; - rec = next; - } - } - - /// Main dispatch logic for calls to functions bound using pybind11 - static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) { - using namespace detail; - - /* Iterator over the list of potentially admissible overloads */ - function_record *overloads = (function_record *) PyCapsule_GetPointer(self, nullptr), - *it = overloads; - - /* Need to know how many arguments + keyword arguments there are to pick the right overload */ - const size_t n_args_in = (size_t) PyTuple_GET_SIZE(args_in); - - handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr, - result = PYBIND11_TRY_NEXT_OVERLOAD; - - try { - // We do this in two passes: in the first pass, we load arguments with `convert=false`; - // in the second, we allow conversion (except for arguments with an explicit - // py::arg().noconvert()). This lets us prefer calls without conversion, with - // conversion as a fallback. - std::vector second_pass; - - // However, if there are no overloads, we can just skip the no-convert pass entirely - const bool overloaded = it != nullptr && it->next != nullptr; - - for (; it != nullptr; it = it->next) { - - /* For each overload: - 1. Copy all positional arguments we were given, also checking to make sure that - named positional arguments weren't *also* specified via kwarg. - 2. If we weren't given enough, try to make up the omitted ones by checking - whether they were provided by a kwarg matching the `py::arg("name")` name. If - so, use it (and remove it from kwargs; if not, see if the function binding - provided a default that we can use. - 3. Ensure that either all keyword arguments were "consumed", or that the function - takes a kwargs argument to accept unconsumed kwargs. - 4. Any positional arguments still left get put into a tuple (for args), and any - leftover kwargs get put into a dict. - 5. Pack everything into a vector; if we have py::args or py::kwargs, they are an - extra tuple or dict at the end of the positional arguments. - 6. Call the function call dispatcher (function_record::impl) - - If one of these fail, move on to the next overload and keep trying until we get a - result other than PYBIND11_TRY_NEXT_OVERLOAD. - */ - - function_record &func = *it; - size_t pos_args = func.nargs; // Number of positional arguments that we need - if (func.has_args) --pos_args; // (but don't count py::args - if (func.has_kwargs) --pos_args; // or py::kwargs) - - if (!func.has_args && n_args_in > pos_args) - continue; // Too many arguments for this overload - - if (n_args_in < pos_args && func.args.size() < pos_args) - continue; // Not enough arguments given, and not enough defaults to fill in the blanks - - function_call call(func, parent); - - size_t args_to_copy = std::min(pos_args, n_args_in); - size_t args_copied = 0; - - // 1. Copy any position arguments given. - bool bad_arg = false; - for (; args_copied < args_to_copy; ++args_copied) { - argument_record *arg_rec = args_copied < func.args.size() ? &func.args[args_copied] : nullptr; - if (kwargs_in && arg_rec && arg_rec->name && PyDict_GetItemString(kwargs_in, arg_rec->name)) { - bad_arg = true; - break; - } - - handle arg(PyTuple_GET_ITEM(args_in, args_copied)); - if (arg_rec && !arg_rec->none && arg.is_none()) { - bad_arg = true; - break; - } - call.args.push_back(arg); - call.args_convert.push_back(arg_rec ? arg_rec->convert : true); - } - if (bad_arg) - continue; // Maybe it was meant for another overload (issue #688) - - // We'll need to copy this if we steal some kwargs for defaults - dict kwargs = reinterpret_borrow(kwargs_in); - - // 2. Check kwargs and, failing that, defaults that may help complete the list - if (args_copied < pos_args) { - bool copied_kwargs = false; - - for (; args_copied < pos_args; ++args_copied) { - const auto &arg = func.args[args_copied]; - - handle value; - if (kwargs_in && arg.name) - value = PyDict_GetItemString(kwargs.ptr(), arg.name); - - if (value) { - // Consume a kwargs value - if (!copied_kwargs) { - kwargs = reinterpret_steal(PyDict_Copy(kwargs.ptr())); - copied_kwargs = true; - } - PyDict_DelItemString(kwargs.ptr(), arg.name); - } else if (arg.value) { - value = arg.value; - } - - if (value) { - call.args.push_back(value); - call.args_convert.push_back(arg.convert); - } - else - break; - } - - if (args_copied < pos_args) - continue; // Not enough arguments, defaults, or kwargs to fill the positional arguments - } - - // 3. Check everything was consumed (unless we have a kwargs arg) - if (kwargs && kwargs.size() > 0 && !func.has_kwargs) - continue; // Unconsumed kwargs, but no py::kwargs argument to accept them - - // 4a. If we have a py::args argument, create a new tuple with leftovers - tuple extra_args; - if (func.has_args) { - if (args_to_copy == 0) { - // We didn't copy out any position arguments from the args_in tuple, so we - // can reuse it directly without copying: - extra_args = reinterpret_borrow(args_in); - } else if (args_copied >= n_args_in) { - extra_args = tuple(0); - } else { - size_t args_size = n_args_in - args_copied; - extra_args = tuple(args_size); - for (size_t i = 0; i < args_size; ++i) { - handle item = PyTuple_GET_ITEM(args_in, args_copied + i); - extra_args[i] = item.inc_ref().ptr(); - } - } - call.args.push_back(extra_args); - call.args_convert.push_back(false); - } - - // 4b. If we have a py::kwargs, pass on any remaining kwargs - if (func.has_kwargs) { - if (!kwargs.ptr()) - kwargs = dict(); // If we didn't get one, send an empty one - call.args.push_back(kwargs); - call.args_convert.push_back(false); - } - - // 5. Put everything in a vector. Not technically step 5, we've been building it - // in `call.args` all along. - #if !defined(NDEBUG) - if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs) - pybind11_fail("Internal error: function call dispatcher inserted wrong number of arguments!"); - #endif - - std::vector second_pass_convert; - if (overloaded) { - // We're in the first no-convert pass, so swap out the conversion flags for a - // set of all-false flags. If the call fails, we'll swap the flags back in for - // the conversion-allowed call below. - second_pass_convert.resize(func.nargs, false); - call.args_convert.swap(second_pass_convert); - } - - // 6. Call the function. - try { - loader_life_support guard{}; - result = func.impl(call); - } catch (reference_cast_error &) { - result = PYBIND11_TRY_NEXT_OVERLOAD; - } - - if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) - break; - - if (overloaded) { - // The (overloaded) call failed; if the call has at least one argument that - // permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`) - // then add this call to the list of second pass overloads to try. - for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) { - if (second_pass_convert[i]) { - // Found one: swap the converting flags back in and store the call for - // the second pass. - call.args_convert.swap(second_pass_convert); - second_pass.push_back(std::move(call)); - break; - } - } - } - } - - if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) { - // The no-conversion pass finished without success, try again with conversion allowed - for (auto &call : second_pass) { - try { - loader_life_support guard{}; - result = call.func.impl(call); - } catch (reference_cast_error &) { - result = PYBIND11_TRY_NEXT_OVERLOAD; - } - - if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) - break; - } - } - } catch (error_already_set &e) { - e.restore(); - return nullptr; - } catch (...) { - /* When an exception is caught, give each registered exception - translator a chance to translate it to a Python exception - in reverse order of registration. - - A translator may choose to do one of the following: - - - catch the exception and call PyErr_SetString or PyErr_SetObject - to set a standard (or custom) Python exception, or - - do nothing and let the exception fall through to the next translator, or - - delegate translation to the next translator by throwing a new type of exception. */ - - auto last_exception = std::current_exception(); - auto ®istered_exception_translators = get_internals().registered_exception_translators; - for (auto& translator : registered_exception_translators) { - try { - translator(last_exception); - } catch (...) { - last_exception = std::current_exception(); - continue; - } - return nullptr; - } - PyErr_SetString(PyExc_SystemError, "Exception escaped from default exception translator!"); - return nullptr; - } - - if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) { - if (overloads->is_operator) - return handle(Py_NotImplemented).inc_ref().ptr(); - - std::string msg = std::string(overloads->name) + "(): incompatible " + - std::string(overloads->is_constructor ? "constructor" : "function") + - " arguments. The following argument types are supported:\n"; - - int ctr = 0; - for (function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) { - msg += " "+ std::to_string(++ctr) + ". "; - - bool wrote_sig = false; - if (overloads->is_constructor) { - // For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as `Object(arg0, ...)` - std::string sig = it2->signature; - size_t start = sig.find('(') + 7; // skip "(self: " - if (start < sig.size()) { - // End at the , for the next argument - size_t end = sig.find(", "), next = end + 2; - size_t ret = sig.rfind(" -> "); - // Or the ), if there is no comma: - if (end >= sig.size()) next = end = sig.find(')'); - if (start < end && next < sig.size()) { - msg.append(sig, start, end - start); - msg += '('; - msg.append(sig, next, ret - next); - wrote_sig = true; - } - } - } - if (!wrote_sig) msg += it2->signature; - - msg += "\n"; - } - msg += "\nInvoked with: "; - auto args_ = reinterpret_borrow(args_in); - bool some_args = false; - for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) { - if (!some_args) some_args = true; - else msg += ", "; - msg += pybind11::repr(args_[ti]); - } - if (kwargs_in) { - auto kwargs = reinterpret_borrow(kwargs_in); - if (kwargs.size() > 0) { - if (some_args) msg += "; "; - msg += "kwargs: "; - bool first = true; - for (auto kwarg : kwargs) { - if (first) first = false; - else msg += ", "; - msg += pybind11::str("{}={!r}").format(kwarg.first, kwarg.second); - } - } - } - - PyErr_SetString(PyExc_TypeError, msg.c_str()); - return nullptr; - } else if (!result) { - std::string msg = "Unable to convert function return value to a " - "Python type! The signature was\n\t"; - msg += it->signature; - PyErr_SetString(PyExc_TypeError, msg.c_str()); - return nullptr; - } else { - if (overloads->is_constructor) { - auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr()); - tinfo->init_instance(reinterpret_cast(parent.ptr()), nullptr); - } - return result.ptr(); - } - } -}; - -/// Wrapper for Python extension modules -class module : public object { -public: - PYBIND11_OBJECT_DEFAULT(module, object, PyModule_Check) - - /// Create a new top-level Python module with the given name and docstring - explicit module(const char *name, const char *doc = nullptr) { - if (!options::show_user_defined_docstrings()) doc = nullptr; -#if PY_MAJOR_VERSION >= 3 - PyModuleDef *def = new PyModuleDef(); - std::memset(def, 0, sizeof(PyModuleDef)); - def->m_name = name; - def->m_doc = doc; - def->m_size = -1; - Py_INCREF(def); - m_ptr = PyModule_Create(def); -#else - m_ptr = Py_InitModule3(name, nullptr, doc); -#endif - if (m_ptr == nullptr) - pybind11_fail("Internal error in module::module()"); - inc_ref(); - } - - /** \rst - Create Python binding for a new function within the module scope. ``Func`` - can be a plain C++ function, a function pointer, or a lambda function. For - details on the ``Extra&& ... extra`` argument, see section :ref:`extras`. - \endrst */ - template - module &def(const char *name_, Func &&f, const Extra& ... extra) { - cpp_function func(std::forward(f), name(name_), scope(*this), - sibling(getattr(*this, name_, none())), extra...); - // NB: allow overwriting here because cpp_function sets up a chain with the intention of - // overwriting (and has already checked internally that it isn't overwriting non-functions). - add_object(name_, func, true /* overwrite */); - return *this; - } - - /** \rst - Create and return a new Python submodule with the given name and docstring. - This also works recursively, i.e. - - .. code-block:: cpp - - py::module m("example", "pybind11 example plugin"); - py::module m2 = m.def_submodule("sub", "A submodule of 'example'"); - py::module m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'"); - \endrst */ - module def_submodule(const char *name, const char *doc = nullptr) { - std::string full_name = std::string(PyModule_GetName(m_ptr)) - + std::string(".") + std::string(name); - auto result = reinterpret_borrow(PyImport_AddModule(full_name.c_str())); - if (doc && options::show_user_defined_docstrings()) - result.attr("__doc__") = pybind11::str(doc); - attr(name) = result; - return result; - } - - /// Import and return a module or throws `error_already_set`. - static module import(const char *name) { - PyObject *obj = PyImport_ImportModule(name); - if (!obj) - throw error_already_set(); - return reinterpret_steal(obj); - } - - // Adds an object to the module using the given name. Throws if an object with the given name - // already exists. - // - // overwrite should almost always be false: attempting to overwrite objects that pybind11 has - // established will, in most cases, break things. - PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) { - if (!overwrite && hasattr(*this, name)) - pybind11_fail("Error during initialization: multiple incompatible definitions with name \"" + - std::string(name) + "\""); - - PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */); - } -}; - -/// \ingroup python_builtins -/// Return a dictionary representing the global variables in the current execution frame, -/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded). -inline dict globals() { - PyObject *p = PyEval_GetGlobals(); - return reinterpret_borrow(p ? p : module::import("__main__").attr("__dict__").ptr()); -} - -NAMESPACE_BEGIN(detail) -/// Generic support for creating new Python heap types -class generic_type : public object { - template friend class class_; -public: - PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check) -protected: - void initialize(const type_record &rec) { - if (rec.scope && hasattr(rec.scope, rec.name)) - pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name) + - "\": an object with that name is already defined"); - - if (get_type_info(*rec.type)) - pybind11_fail("generic_type: type \"" + std::string(rec.name) + - "\" is already registered!"); - - m_ptr = make_new_python_type(rec); - - /* Register supplemental type information in C++ dict */ - auto *tinfo = new detail::type_info(); - tinfo->type = (PyTypeObject *) m_ptr; - tinfo->cpptype = rec.type; - tinfo->type_size = rec.type_size; - tinfo->operator_new = rec.operator_new; - tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size); - tinfo->init_instance = rec.init_instance; - tinfo->dealloc = rec.dealloc; - tinfo->simple_type = true; - tinfo->simple_ancestors = true; - - auto &internals = get_internals(); - auto tindex = std::type_index(*rec.type); - tinfo->direct_conversions = &internals.direct_conversions[tindex]; - tinfo->default_holder = rec.default_holder; - internals.registered_types_cpp[tindex] = tinfo; - internals.registered_types_py[(PyTypeObject *) m_ptr] = { tinfo }; - - if (rec.bases.size() > 1 || rec.multiple_inheritance) { - mark_parents_nonsimple(tinfo->type); - tinfo->simple_ancestors = false; - } - else if (rec.bases.size() == 1) { - auto parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr()); - tinfo->simple_ancestors = parent_tinfo->simple_ancestors; - } - } - - /// Helper function which tags all parents of a type using mult. inheritance - void mark_parents_nonsimple(PyTypeObject *value) { - auto t = reinterpret_borrow(value->tp_bases); - for (handle h : t) { - auto tinfo2 = get_type_info((PyTypeObject *) h.ptr()); - if (tinfo2) - tinfo2->simple_type = false; - mark_parents_nonsimple((PyTypeObject *) h.ptr()); - } - } - - void install_buffer_funcs( - buffer_info *(*get_buffer)(PyObject *, void *), - void *get_buffer_data) { - PyHeapTypeObject *type = (PyHeapTypeObject*) m_ptr; - auto tinfo = detail::get_type_info(&type->ht_type); - - if (!type->ht_type.tp_as_buffer) - pybind11_fail( - "To be able to register buffer protocol support for the type '" + - std::string(tinfo->type->tp_name) + - "' the associated class<>(..) invocation must " - "include the pybind11::buffer_protocol() annotation!"); - - tinfo->get_buffer = get_buffer; - tinfo->get_buffer_data = get_buffer_data; - } - - void def_property_static_impl(const char *name, - handle fget, handle fset, - detail::function_record *rec_fget) { - const auto is_static = !(rec_fget->is_method && rec_fget->scope); - const auto has_doc = rec_fget->doc && pybind11::options::show_user_defined_docstrings(); - - auto property = handle((PyObject *) (is_static ? get_internals().static_property_type - : &PyProperty_Type)); - attr(name) = property(fget.ptr() ? fget : none(), - fset.ptr() ? fset : none(), - /*deleter*/none(), - pybind11::str(has_doc ? rec_fget->doc : "")); - } -}; - -/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded. -template (T::operator new))>> -void set_operator_new(type_record *r) { r->operator_new = &T::operator new; } - -template void set_operator_new(...) { } - -template struct has_operator_delete : std::false_type { }; -template struct has_operator_delete(T::operator delete))>> - : std::true_type { }; -template struct has_operator_delete_size : std::false_type { }; -template struct has_operator_delete_size(T::operator delete))>> - : std::true_type { }; -/// Call class-specific delete if it exists or global otherwise. Can also be an overload set. -template ::value, int> = 0> -void call_operator_delete(T *p, size_t) { T::operator delete(p); } -template ::value && has_operator_delete_size::value, int> = 0> -void call_operator_delete(T *p, size_t s) { T::operator delete(p, s); } - -inline void call_operator_delete(void *p, size_t) { ::operator delete(p); } - -NAMESPACE_END(detail) - -/// Given a pointer to a member function, cast it to its `Derived` version. -/// Forward everything else unchanged. -template -auto method_adaptor(F &&f) -> decltype(std::forward(f)) { return std::forward(f); } - -template -auto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) { return pmf; } - -template -auto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const { return pmf; } - -template -class class_ : public detail::generic_type { - template using is_holder = detail::is_holder_type; - template using is_subtype = detail::is_strict_base_of; - template using is_base = detail::is_strict_base_of; - // struct instead of using here to help MSVC: - template struct is_valid_class_option : - detail::any_of, is_subtype, is_base> {}; - -public: - using type = type_; - using type_alias = detail::exactly_one_t; - constexpr static bool has_alias = !std::is_void::value; - using holder_type = detail::exactly_one_t, options...>; - - static_assert(detail::all_of...>::value, - "Unknown/invalid class_ template parameters provided"); - - PYBIND11_OBJECT(class_, generic_type, PyType_Check) - - template - class_(handle scope, const char *name, const Extra &... extra) { - using namespace detail; - - // MI can only be specified via class_ template options, not constructor parameters - static_assert( - none_of...>::value || // no base class arguments, or: - ( constexpr_sum(is_pyobject::value...) == 1 && // Exactly one base - constexpr_sum(is_base::value...) == 0 && // no template option bases - none_of...>::value), // no multiple_inheritance attr - "Error: multiple inheritance bases must be specified via class_ template options"); - - type_record record; - record.scope = scope; - record.name = name; - record.type = &typeid(type); - record.type_size = sizeof(conditional_t); - record.holder_size = sizeof(holder_type); - record.init_instance = init_instance; - record.dealloc = dealloc; - record.default_holder = std::is_same>::value; - - set_operator_new(&record); - - /* Register base classes specified via template arguments to class_, if any */ - PYBIND11_EXPAND_SIDE_EFFECTS(add_base(record)); - - /* Process optional arguments, if any */ - process_attributes::init(extra..., &record); - - generic_type::initialize(record); - - if (has_alias) { - auto &instances = get_internals().registered_types_cpp; - instances[std::type_index(typeid(type_alias))] = instances[std::type_index(typeid(type))]; - } - } - - template ::value, int> = 0> - static void add_base(detail::type_record &rec) { - rec.add_base(typeid(Base), [](void *src) -> void * { - return static_cast(reinterpret_cast(src)); - }); - } - - template ::value, int> = 0> - static void add_base(detail::type_record &) { } - - template - class_ &def(const char *name_, Func&& f, const Extra&... extra) { - cpp_function cf(method_adaptor(std::forward(f)), name(name_), is_method(*this), - sibling(getattr(*this, name_, none())), extra...); - attr(cf.name()) = cf; - return *this; - } - - template class_ & - def_static(const char *name_, Func &&f, const Extra&... extra) { - static_assert(!std::is_member_function_pointer::value, - "def_static(...) called with a non-static member function pointer"); - cpp_function cf(std::forward(f), name(name_), scope(*this), - sibling(getattr(*this, name_, none())), extra...); - attr(cf.name()) = cf; - return *this; - } - - template - class_ &def(const detail::op_ &op, const Extra&... extra) { - op.execute(*this, extra...); - return *this; - } - - template - class_ & def_cast(const detail::op_ &op, const Extra&... extra) { - op.execute_cast(*this, extra...); - return *this; - } - - template - class_ &def(const detail::init &init, const Extra&... extra) { - init.execute(*this, extra...); - return *this; - } - - template - class_ &def(const detail::init_alias &init, const Extra&... extra) { - init.execute(*this, extra...); - return *this; - } - - template class_& def_buffer(Func &&func) { - struct capture { Func func; }; - capture *ptr = new capture { std::forward(func) }; - install_buffer_funcs([](PyObject *obj, void *ptr) -> buffer_info* { - detail::make_caster caster; - if (!caster.load(obj, false)) - return nullptr; - return new buffer_info(((capture *) ptr)->func(caster)); - }, ptr); - return *this; - } - - template - class_ &def_buffer(Return (Class::*func)(Args...)) { - return def_buffer([func] (type &obj) { return (obj.*func)(); }); - } - - template - class_ &def_buffer(Return (Class::*func)(Args...) const) { - return def_buffer([func] (const type &obj) { return (obj.*func)(); }); - } - - template - class_ &def_readwrite(const char *name, D C::*pm, const Extra&... extra) { - static_assert(std::is_base_of::value, "def_readwrite() requires a class member (or base class member)"); - cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)), - fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this)); - def_property(name, fget, fset, return_value_policy::reference_internal, extra...); - return *this; - } - - template - class_ &def_readonly(const char *name, const D C::*pm, const Extra& ...extra) { - static_assert(std::is_base_of::value, "def_readonly() requires a class member (or base class member)"); - cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)); - def_property_readonly(name, fget, return_value_policy::reference_internal, extra...); - return *this; - } - - template - class_ &def_readwrite_static(const char *name, D *pm, const Extra& ...extra) { - cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)), - fset([pm](object, const D &value) { *pm = value; }, scope(*this)); - def_property_static(name, fget, fset, return_value_policy::reference, extra...); - return *this; - } - - template - class_ &def_readonly_static(const char *name, const D *pm, const Extra& ...extra) { - cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)); - def_property_readonly_static(name, fget, return_value_policy::reference, extra...); - return *this; - } - - /// Uses return_value_policy::reference_internal by default - template - class_ &def_property_readonly(const char *name, const Getter &fget, const Extra& ...extra) { - return def_property_readonly(name, cpp_function(method_adaptor(fget)), - return_value_policy::reference_internal, extra...); - } - - /// Uses cpp_function's return_value_policy by default - template - class_ &def_property_readonly(const char *name, const cpp_function &fget, const Extra& ...extra) { - return def_property(name, fget, cpp_function(), extra...); - } - - /// Uses return_value_policy::reference by default - template - class_ &def_property_readonly_static(const char *name, const Getter &fget, const Extra& ...extra) { - return def_property_readonly_static(name, cpp_function(fget), return_value_policy::reference, extra...); - } - - /// Uses cpp_function's return_value_policy by default - template - class_ &def_property_readonly_static(const char *name, const cpp_function &fget, const Extra& ...extra) { - return def_property_static(name, fget, cpp_function(), extra...); - } - - /// Uses return_value_policy::reference_internal by default - template - class_ &def_property(const char *name, const Getter &fget, const Setter &fset, const Extra& ...extra) { - return def_property(name, fget, cpp_function(method_adaptor(fset)), extra...); - } - template - class_ &def_property(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) { - return def_property(name, cpp_function(method_adaptor(fget)), fset, - return_value_policy::reference_internal, extra...); - } - - /// Uses cpp_function's return_value_policy by default - template - class_ &def_property(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) { - return def_property_static(name, fget, fset, is_method(*this), extra...); - } - - /// Uses return_value_policy::reference by default - template - class_ &def_property_static(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) { - return def_property_static(name, cpp_function(fget), fset, return_value_policy::reference, extra...); - } - - /// Uses cpp_function's return_value_policy by default - template - class_ &def_property_static(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) { - auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset); - char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific documentation string */ - detail::process_attributes::init(extra..., rec_fget); - if (rec_fget->doc && rec_fget->doc != doc_prev) { - free(doc_prev); - rec_fget->doc = strdup(rec_fget->doc); - } - if (rec_fset) { - doc_prev = rec_fset->doc; - detail::process_attributes::init(extra..., rec_fset); - if (rec_fset->doc && rec_fset->doc != doc_prev) { - free(doc_prev); - rec_fset->doc = strdup(rec_fset->doc); - } - } - def_property_static_impl(name, fget, fset, rec_fget); - return *this; - } - -private: - /// Initialize holder object, variant 1: object derives from enable_shared_from_this - template - static void init_holder(detail::instance *inst, detail::value_and_holder &v_h, - const holder_type * /* unused */, const std::enable_shared_from_this * /* dummy */) { - try { - auto sh = std::dynamic_pointer_cast( - v_h.value_ptr()->shared_from_this()); - if (sh) { - new (&v_h.holder()) holder_type(std::move(sh)); - v_h.set_holder_constructed(); - } - } catch (const std::bad_weak_ptr &) {} - - if (!v_h.holder_constructed() && inst->owned) { - new (&v_h.holder()) holder_type(v_h.value_ptr()); - v_h.set_holder_constructed(); - } - } - - static void init_holder_from_existing(const detail::value_and_holder &v_h, - const holder_type *holder_ptr, std::true_type /*is_copy_constructible*/) { - new (&v_h.holder()) holder_type(*reinterpret_cast(holder_ptr)); - } - - static void init_holder_from_existing(const detail::value_and_holder &v_h, - const holder_type *holder_ptr, std::false_type /*is_copy_constructible*/) { - new (&v_h.holder()) holder_type(std::move(*const_cast(holder_ptr))); - } - - /// Initialize holder object, variant 2: try to construct from existing holder object, if possible - static void init_holder(detail::instance *inst, detail::value_and_holder &v_h, - const holder_type *holder_ptr, const void * /* dummy -- not enable_shared_from_this) */) { - if (holder_ptr) { - init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible()); - v_h.set_holder_constructed(); - } else if (inst->owned || detail::always_construct_holder::value) { - new (&v_h.holder()) holder_type(v_h.value_ptr()); - v_h.set_holder_constructed(); - } - } - - /// Performs instance initialization including constructing a holder and registering the known - /// instance. Should be called as soon as the `type` value_ptr is set for an instance. Takes an - /// optional pointer to an existing holder to use; if not specified and the instance is - /// `.owned`, a new holder will be constructed to manage the value pointer. - static void init_instance(detail::instance *inst, const void *holder_ptr) { - auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type))); - if (!v_h.instance_registered()) { - register_instance(inst, v_h.value_ptr(), v_h.type); - v_h.set_instance_registered(); - } - init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr()); - } - - /// Deallocates an instance; via holder, if constructed; otherwise via operator delete. - static void dealloc(const detail::value_and_holder &v_h) { - if (v_h.holder_constructed()) - v_h.holder().~holder_type(); - else - detail::call_operator_delete(v_h.value_ptr(), v_h.type->type_size); - } - - static detail::function_record *get_function_record(handle h) { - h = detail::get_function(h); - return h ? (detail::function_record *) reinterpret_borrow(PyCFunction_GET_SELF(h.ptr())) - : nullptr; - } -}; - -/// Binds C++ enumerations and enumeration classes to Python -template class enum_ : public class_ { -public: - using class_::def; - using class_::def_property_readonly_static; - using Scalar = typename std::underlying_type::type; - - template - enum_(const handle &scope, const char *name, const Extra&... extra) - : class_(scope, name, extra...), m_entries(), m_parent(scope) { - - constexpr bool is_arithmetic = detail::any_of...>::value; - - auto m_entries_ptr = m_entries.inc_ref().ptr(); - def("__repr__", [name, m_entries_ptr](Type value) -> pybind11::str { - for (const auto &kv : reinterpret_borrow(m_entries_ptr)) { - if (pybind11::cast(kv.second) == value) - return pybind11::str("{}.{}").format(name, kv.first); - } - return pybind11::str("{}.???").format(name); - }); - def_property_readonly_static("__members__", [m_entries_ptr](object /* self */) { - dict m; - for (const auto &kv : reinterpret_borrow(m_entries_ptr)) - m[kv.first] = kv.second; - return m; - }, return_value_policy::copy); - def("__init__", [](Type& value, Scalar i) { value = (Type)i; }); - def("__int__", [](Type value) { return (Scalar) value; }); - #if PY_MAJOR_VERSION < 3 - def("__long__", [](Type value) { return (Scalar) value; }); - #endif - def("__eq__", [](const Type &value, Type *value2) { return value2 && value == *value2; }); - def("__ne__", [](const Type &value, Type *value2) { return !value2 || value != *value2; }); - if (is_arithmetic) { - def("__lt__", [](const Type &value, Type *value2) { return value2 && value < *value2; }); - def("__gt__", [](const Type &value, Type *value2) { return value2 && value > *value2; }); - def("__le__", [](const Type &value, Type *value2) { return value2 && value <= *value2; }); - def("__ge__", [](const Type &value, Type *value2) { return value2 && value >= *value2; }); - } - if (std::is_convertible::value) { - // Don't provide comparison with the underlying type if the enum isn't convertible, - // i.e. if Type is a scoped enum, mirroring the C++ behaviour. (NB: we explicitly - // convert Type to Scalar below anyway because this needs to compile). - def("__eq__", [](const Type &value, Scalar value2) { return (Scalar) value == value2; }); - def("__ne__", [](const Type &value, Scalar value2) { return (Scalar) value != value2; }); - if (is_arithmetic) { - def("__lt__", [](const Type &value, Scalar value2) { return (Scalar) value < value2; }); - def("__gt__", [](const Type &value, Scalar value2) { return (Scalar) value > value2; }); - def("__le__", [](const Type &value, Scalar value2) { return (Scalar) value <= value2; }); - def("__ge__", [](const Type &value, Scalar value2) { return (Scalar) value >= value2; }); - def("__invert__", [](const Type &value) { return ~((Scalar) value); }); - def("__and__", [](const Type &value, Scalar value2) { return (Scalar) value & value2; }); - def("__or__", [](const Type &value, Scalar value2) { return (Scalar) value | value2; }); - def("__xor__", [](const Type &value, Scalar value2) { return (Scalar) value ^ value2; }); - def("__rand__", [](const Type &value, Scalar value2) { return (Scalar) value & value2; }); - def("__ror__", [](const Type &value, Scalar value2) { return (Scalar) value | value2; }); - def("__rxor__", [](const Type &value, Scalar value2) { return (Scalar) value ^ value2; }); - def("__and__", [](const Type &value, const Type &value2) { return (Scalar) value & (Scalar) value2; }); - def("__or__", [](const Type &value, const Type &value2) { return (Scalar) value | (Scalar) value2; }); - def("__xor__", [](const Type &value, const Type &value2) { return (Scalar) value ^ (Scalar) value2; }); - } - } - def("__hash__", [](const Type &value) { return (Scalar) value; }); - // Pickling and unpickling -- needed for use with the 'multiprocessing' module - def("__getstate__", [](const Type &value) { return pybind11::make_tuple((Scalar) value); }); - def("__setstate__", [](Type &p, tuple t) { new (&p) Type((Type) t[0].cast()); }); - } - - /// Export enumeration entries into the parent scope - enum_& export_values() { - for (const auto &kv : m_entries) - m_parent.attr(kv.first) = kv.second; - return *this; - } - - /// Add an enumeration entry - enum_& value(char const* name, Type value) { - auto v = pybind11::cast(value, return_value_policy::copy); - this->attr(name) = v; - m_entries[pybind11::str(name)] = v; - return *this; - } - -private: - dict m_entries; - handle m_parent; -}; - -NAMESPACE_BEGIN(detail) -template struct init { - template = 0> - static void execute(Class &cl, const Extra&... extra) { - using Base = typename Class::type; - /// Function which calls a specific C++ in-place constructor - cl.def("__init__", [](Base *self_, Args... args) { new (self_) Base(args...); }, extra...); - } - - template ::value, int> = 0> - static void execute(Class &cl, const Extra&... extra) { - using Base = typename Class::type; - using Alias = typename Class::type_alias; - handle cl_type = cl; - cl.def("__init__", [cl_type](handle self_, Args... args) { - if (self_.get_type().is(cl_type)) - new (self_.cast()) Base(args...); - else - new (self_.cast()) Alias(args...); - }, extra...); - } - - template ::value, int> = 0> - static void execute(Class &cl, const Extra&... extra) { - init_alias::execute(cl, extra...); - } -}; -template struct init_alias { - template ::value, int> = 0> - static void execute(Class &cl, const Extra&... extra) { - using Alias = typename Class::type_alias; - cl.def("__init__", [](Alias *self_, Args... args) { new (self_) Alias(args...); }, extra...); - } -}; - - -inline void keep_alive_impl(handle nurse, handle patient) { - if (!nurse || !patient) - pybind11_fail("Could not activate keep_alive!"); - - if (patient.is_none() || nurse.is_none()) - return; /* Nothing to keep alive or nothing to be kept alive by */ - - auto tinfo = all_type_info(Py_TYPE(nurse.ptr())); - if (!tinfo.empty()) { - /* It's a pybind-registered type, so we can store the patient in the - * internal list. */ - add_patient(nurse.ptr(), patient.ptr()); - } - else { - /* Fall back to clever approach based on weak references taken from - * Boost.Python. This is not used for pybind-registered types because - * the objects can be destroyed out-of-order in a GC pass. */ - cpp_function disable_lifesupport( - [patient](handle weakref) { patient.dec_ref(); weakref.dec_ref(); }); - - weakref wr(nurse, disable_lifesupport); - - patient.inc_ref(); /* reference patient and leak the weak reference */ - (void) wr.release(); - } -} - -PYBIND11_NOINLINE inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) { - keep_alive_impl( - Nurse == 0 ? ret : Nurse <= call.args.size() ? call.args[Nurse - 1] : handle(), - Patient == 0 ? ret : Patient <= call.args.size() ? call.args[Patient - 1] : handle() - ); -} - -inline std::pair all_type_info_get_cache(PyTypeObject *type) { - auto res = get_internals().registered_types_py -#ifdef __cpp_lib_unordered_map_try_emplace - .try_emplace(type); -#else - .emplace(type, std::vector()); -#endif - if (res.second) { - // New cache entry created; set up a weak reference to automatically remove it if the type - // gets destroyed: - weakref((PyObject *) type, cpp_function([type](handle wr) { - get_internals().registered_types_py.erase(type); - wr.dec_ref(); - })).release(); - } - - return res; -} - -template -struct iterator_state { - Iterator it; - Sentinel end; - bool first_or_done; -}; - -NAMESPACE_END(detail) - -template detail::init init() { return detail::init(); } -template detail::init_alias init_alias() { return detail::init_alias(); } - -/// Makes a python iterator from a first and past-the-end C++ InputIterator. -template ()), - typename... Extra> -iterator make_iterator(Iterator first, Sentinel last, Extra &&... extra) { - typedef detail::iterator_state state; - - if (!detail::get_type_info(typeid(state), false)) { - class_(handle(), "iterator") - .def("__iter__", [](state &s) -> state& { return s; }) - .def("__next__", [](state &s) -> ValueType { - if (!s.first_or_done) - ++s.it; - else - s.first_or_done = false; - if (s.it == s.end) { - s.first_or_done = true; - throw stop_iteration(); - } - return *s.it; - }, std::forward(extra)..., Policy); - } - - return cast(state{first, last, true}); -} - -/// Makes an python iterator over the keys (`.first`) of a iterator over pairs from a -/// first and past-the-end InputIterator. -template ()).first), - typename... Extra> -iterator make_key_iterator(Iterator first, Sentinel last, Extra &&... extra) { - typedef detail::iterator_state state; - - if (!detail::get_type_info(typeid(state), false)) { - class_(handle(), "iterator") - .def("__iter__", [](state &s) -> state& { return s; }) - .def("__next__", [](state &s) -> KeyType { - if (!s.first_or_done) - ++s.it; - else - s.first_or_done = false; - if (s.it == s.end) { - s.first_or_done = true; - throw stop_iteration(); - } - return (*s.it).first; - }, std::forward(extra)..., Policy); - } - - return cast(state{first, last, true}); -} - -/// Makes an iterator over values of an stl container or other container supporting -/// `std::begin()`/`std::end()` -template iterator make_iterator(Type &value, Extra&&... extra) { - return make_iterator(std::begin(value), std::end(value), extra...); -} - -/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting -/// `std::begin()`/`std::end()` -template iterator make_key_iterator(Type &value, Extra&&... extra) { - return make_key_iterator(std::begin(value), std::end(value), extra...); -} - -template void implicitly_convertible() { - auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * { - if (!detail::make_caster().load(obj, false)) - return nullptr; - tuple args(1); - args[0] = obj; - PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr); - if (result == nullptr) - PyErr_Clear(); - return result; - }; - - if (auto tinfo = detail::get_type_info(typeid(OutputType))) - tinfo->implicit_conversions.push_back(implicit_caster); - else - pybind11_fail("implicitly_convertible: Unable to find type " + type_id()); -} - -template -void register_exception_translator(ExceptionTranslator&& translator) { - detail::get_internals().registered_exception_translators.push_front( - std::forward(translator)); -} - -/** - * Wrapper to generate a new Python exception type. - * - * This should only be used with PyErr_SetString for now. - * It is not (yet) possible to use as a py::base. - * Template type argument is reserved for future use. - */ -template -class exception : public object { -public: - exception(handle scope, const char *name, PyObject *base = PyExc_Exception) { - std::string full_name = scope.attr("__name__").cast() + - std::string(".") + name; - m_ptr = PyErr_NewException(const_cast(full_name.c_str()), base, NULL); - if (hasattr(scope, name)) - pybind11_fail("Error during initialization: multiple incompatible " - "definitions with name \"" + std::string(name) + "\""); - scope.attr(name) = *this; - } - - // Sets the current python exception to this exception object with the given message - void operator()(const char *message) { - PyErr_SetString(m_ptr, message); - } -}; - -/** - * Registers a Python exception in `m` of the given `name` and installs an exception translator to - * translate the C++ exception to the created Python exception using the exceptions what() method. - * This is intended for simple exception translations; for more complex translation, register the - * exception object and translator directly. - */ -template -exception ®ister_exception(handle scope, - const char *name, - PyObject *base = PyExc_Exception) { - static exception ex(scope, name, base); - register_exception_translator([](std::exception_ptr p) { - if (!p) return; - try { - std::rethrow_exception(p); - } catch (const CppException &e) { - ex(e.what()); - } - }); - return ex; -} - -NAMESPACE_BEGIN(detail) -PYBIND11_NOINLINE inline void print(tuple args, dict kwargs) { - auto strings = tuple(args.size()); - for (size_t i = 0; i < args.size(); ++i) { - strings[i] = str(args[i]); - } - auto sep = kwargs.contains("sep") ? kwargs["sep"] : cast(" "); - auto line = sep.attr("join")(strings); - - object file; - if (kwargs.contains("file")) { - file = kwargs["file"].cast(); - } else { - try { - file = module::import("sys").attr("stdout"); - } catch (const error_already_set &) { - /* If print() is called from code that is executed as - part of garbage collection during interpreter shutdown, - importing 'sys' can fail. Give up rather than crashing the - interpreter in this case. */ - return; - } - } - - auto write = file.attr("write"); - write(line); - write(kwargs.contains("end") ? kwargs["end"] : cast("\n")); - - if (kwargs.contains("flush") && kwargs["flush"].cast()) - file.attr("flush")(); -} -NAMESPACE_END(detail) - -template -void print(Args &&...args) { - auto c = detail::collect_arguments(std::forward(args)...); - detail::print(c.args(), c.kwargs()); -} - -#if defined(WITH_THREAD) && !defined(PYPY_VERSION) - -/* The functions below essentially reproduce the PyGILState_* API using a RAII - * pattern, but there are a few important differences: - * - * 1. When acquiring the GIL from an non-main thread during the finalization - * phase, the GILState API blindly terminates the calling thread, which - * is often not what is wanted. This API does not do this. - * - * 2. The gil_scoped_release function can optionally cut the relationship - * of a PyThreadState and its associated thread, which allows moving it to - * another thread (this is a fairly rare/advanced use case). - * - * 3. The reference count of an acquired thread state can be controlled. This - * can be handy to prevent cases where callbacks issued from an external - * thread would otherwise constantly construct and destroy thread state data - * structures. - * - * See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an - * example which uses features 2 and 3 to migrate the Python thread of - * execution to another thread (to run the event loop on the original thread, - * in this case). - */ - -class gil_scoped_acquire { -public: - PYBIND11_NOINLINE gil_scoped_acquire() { - auto const &internals = detail::get_internals(); - tstate = (PyThreadState *) PyThread_get_key_value(internals.tstate); - - if (!tstate) { - tstate = PyThreadState_New(internals.istate); - #if !defined(NDEBUG) - if (!tstate) - pybind11_fail("scoped_acquire: could not create thread state!"); - #endif - tstate->gilstate_counter = 0; - #if PY_MAJOR_VERSION < 3 - PyThread_delete_key_value(internals.tstate); - #endif - PyThread_set_key_value(internals.tstate, tstate); - } else { - release = detail::get_thread_state_unchecked() != tstate; - } - - if (release) { - /* Work around an annoying assertion in PyThreadState_Swap */ - #if defined(Py_DEBUG) - PyInterpreterState *interp = tstate->interp; - tstate->interp = nullptr; - #endif - PyEval_AcquireThread(tstate); - #if defined(Py_DEBUG) - tstate->interp = interp; - #endif - } - - inc_ref(); - } - - void inc_ref() { - ++tstate->gilstate_counter; - } - - PYBIND11_NOINLINE void dec_ref() { - --tstate->gilstate_counter; - #if !defined(NDEBUG) - if (detail::get_thread_state_unchecked() != tstate) - pybind11_fail("scoped_acquire::dec_ref(): thread state must be current!"); - if (tstate->gilstate_counter < 0) - pybind11_fail("scoped_acquire::dec_ref(): reference count underflow!"); - #endif - if (tstate->gilstate_counter == 0) { - #if !defined(NDEBUG) - if (!release) - pybind11_fail("scoped_acquire::dec_ref(): internal error!"); - #endif - PyThreadState_Clear(tstate); - PyThreadState_DeleteCurrent(); - PyThread_delete_key_value(detail::get_internals().tstate); - release = false; - } - } - - PYBIND11_NOINLINE ~gil_scoped_acquire() { - dec_ref(); - if (release) - PyEval_SaveThread(); - } -private: - PyThreadState *tstate = nullptr; - bool release = true; -}; - -class gil_scoped_release { -public: - explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) { - // `get_internals()` must be called here unconditionally in order to initialize - // `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an - // initialization race could occur as multiple threads try `gil_scoped_acquire`. - const auto &internals = detail::get_internals(); - tstate = PyEval_SaveThread(); - if (disassoc) { - auto key = internals.tstate; - #if PY_MAJOR_VERSION < 3 - PyThread_delete_key_value(key); - #else - PyThread_set_key_value(key, nullptr); - #endif - } - } - ~gil_scoped_release() { - if (!tstate) - return; - PyEval_RestoreThread(tstate); - if (disassoc) { - auto key = detail::get_internals().tstate; - #if PY_MAJOR_VERSION < 3 - PyThread_delete_key_value(key); - #endif - PyThread_set_key_value(key, tstate); - } - } -private: - PyThreadState *tstate; - bool disassoc; -}; -#elif defined(PYPY_VERSION) -class gil_scoped_acquire { - PyGILState_STATE state; -public: - gil_scoped_acquire() { state = PyGILState_Ensure(); } - ~gil_scoped_acquire() { PyGILState_Release(state); } -}; - -class gil_scoped_release { - PyThreadState *state; -public: - gil_scoped_release() { state = PyEval_SaveThread(); } - ~gil_scoped_release() { PyEval_RestoreThread(state); } -}; -#else -class gil_scoped_acquire { }; -class gil_scoped_release { }; -#endif - -error_already_set::~error_already_set() { - if (type) { - gil_scoped_acquire gil; - type.release().dec_ref(); - value.release().dec_ref(); - trace.release().dec_ref(); - } -} - -inline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) { - handle self = detail::get_object_handle(this_ptr, this_type); - if (!self) - return function(); - handle type = self.get_type(); - auto key = std::make_pair(type.ptr(), name); - - /* Cache functions that aren't overloaded in Python to avoid - many costly Python dictionary lookups below */ - auto &cache = detail::get_internals().inactive_overload_cache; - if (cache.find(key) != cache.end()) - return function(); - - function overload = getattr(self, name, function()); - if (overload.is_cpp_function()) { - cache.insert(key); - return function(); - } - - /* Don't call dispatch code if invoked from overridden function. - Unfortunately this doesn't work on PyPy. */ -#if !defined(PYPY_VERSION) - PyFrameObject *frame = PyThreadState_Get()->frame; - if (frame && (std::string) str(frame->f_code->co_name) == name && - frame->f_code->co_argcount > 0) { - PyFrame_FastToLocals(frame); - PyObject *self_caller = PyDict_GetItem( - frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0)); - if (self_caller == self.ptr()) - return function(); - } -#else - /* PyPy currently doesn't provide a detailed cpyext emulation of - frame objects, so we have to emulate this using Python. This - is going to be slow..*/ - dict d; d["self"] = self; d["name"] = pybind11::str(name); - PyObject *result = PyRun_String( - "import inspect\n" - "frame = inspect.currentframe()\n" - "if frame is not None:\n" - " frame = frame.f_back\n" - " if frame is not None and str(frame.f_code.co_name) == name and " - "frame.f_code.co_argcount > 0:\n" - " self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\n" - " if self_caller == self:\n" - " self = None\n", - Py_file_input, d.ptr(), d.ptr()); - if (result == nullptr) - throw error_already_set(); - if (d["self"].is_none()) - return function(); - Py_DECREF(result); -#endif - - return overload; -} - -template function get_overload(const T *this_ptr, const char *name) { - auto tinfo = detail::get_type_info(typeid(T)); - return tinfo ? get_type_overload(this_ptr, tinfo, name) : function(); -} - -#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) { \ - pybind11::gil_scoped_acquire gil; \ - pybind11::function overload = pybind11::get_overload(static_cast(this), name); \ - if (overload) { \ - auto o = overload(__VA_ARGS__); \ - if (pybind11::detail::cast_is_temporary_value_reference::value) { \ - static pybind11::detail::overload_caster_t caster; \ - return pybind11::detail::cast_ref(std::move(o), caster); \ - } \ - else return pybind11::detail::cast_safe(std::move(o)); \ - } \ - } - -#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \ - PYBIND11_OVERLOAD_INT(ret_type, cname, name, __VA_ARGS__) \ - return cname::fn(__VA_ARGS__) - -#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \ - PYBIND11_OVERLOAD_INT(ret_type, cname, name, __VA_ARGS__) \ - pybind11::pybind11_fail("Tried to call pure virtual function \"" #cname "::" name "\""); - -#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \ - PYBIND11_OVERLOAD_NAME(ret_type, cname, #fn, fn, __VA_ARGS__) - -#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \ - PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, #fn, fn, __VA_ARGS__) - -NAMESPACE_END(pybind11) - -#if defined(_MSC_VER) -# pragma warning(pop) -#elif defined(__INTEL_COMPILER) -/* Leave ignored warnings on */ -#elif defined(__GNUG__) && !defined(__clang__) -# pragma GCC diagnostic pop -#endif diff --git a/lanms/include/pybind11/pytypes.h b/lanms/include/pybind11/pytypes.h deleted file mode 100644 index 095d40f1..00000000 --- a/lanms/include/pybind11/pytypes.h +++ /dev/null @@ -1,1318 +0,0 @@ -/* - pybind11/typeid.h: Convenience wrapper classes for basic Python types - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "common.h" -#include "buffer_info.h" -#include -#include - -NAMESPACE_BEGIN(pybind11) - -/* A few forward declarations */ -class handle; class object; -class str; class iterator; -struct arg; struct arg_v; - -NAMESPACE_BEGIN(detail) -class args_proxy; -inline bool isinstance_generic(handle obj, const std::type_info &tp); - -// Accessor forward declarations -template class accessor; -namespace accessor_policies { - struct obj_attr; - struct str_attr; - struct generic_item; - struct sequence_item; - struct list_item; - struct tuple_item; -} -using obj_attr_accessor = accessor; -using str_attr_accessor = accessor; -using item_accessor = accessor; -using sequence_accessor = accessor; -using list_accessor = accessor; -using tuple_accessor = accessor; - -/// Tag and check to identify a class which implements the Python object API -class pyobject_tag { }; -template using is_pyobject = std::is_base_of>; - -/** \rst - A mixin class which adds common functions to `handle`, `object` and various accessors. - The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``. -\endrst */ -template -class object_api : public pyobject_tag { - const Derived &derived() const { return static_cast(*this); } - -public: - /** \rst - Return an iterator equivalent to calling ``iter()`` in Python. The object - must be a collection which supports the iteration protocol. - \endrst */ - iterator begin() const; - /// Return a sentinel which ends iteration. - iterator end() const; - - /** \rst - Return an internal functor to invoke the object's sequence protocol. Casting - the returned ``detail::item_accessor`` instance to a `handle` or `object` - subclass causes a corresponding call to ``__getitem__``. Assigning a `handle` - or `object` subclass causes a call to ``__setitem__``. - \endrst */ - item_accessor operator[](handle key) const; - /// See above (the only difference is that they key is provided as a string literal) - item_accessor operator[](const char *key) const; - - /** \rst - Return an internal functor to access the object's attributes. Casting the - returned ``detail::obj_attr_accessor`` instance to a `handle` or `object` - subclass causes a corresponding call to ``getattr``. Assigning a `handle` - or `object` subclass causes a call to ``setattr``. - \endrst */ - obj_attr_accessor attr(handle key) const; - /// See above (the only difference is that they key is provided as a string literal) - str_attr_accessor attr(const char *key) const; - - /** \rst - Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple`` - or ``list`` for a function call. Applying another * to the result yields - ** unpacking, e.g. to unpack a dict as function keyword arguments. - See :ref:`calling_python_functions`. - \endrst */ - args_proxy operator*() const; - - /// Check if the given item is contained within this object, i.e. ``item in obj``. - template bool contains(T &&item) const; - - /** \rst - Assuming the Python object is a function or implements the ``__call__`` - protocol, ``operator()`` invokes the underlying function, passing an - arbitrary set of parameters. The result is returned as a `object` and - may need to be converted back into a Python object using `handle::cast()`. - - When some of the arguments cannot be converted to Python objects, the - function will throw a `cast_error` exception. When the Python function - call fails, a `error_already_set` exception is thrown. - \endrst */ - template - object operator()(Args &&...args) const; - template - PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)") - object call(Args&&... args) const; - - /// Equivalent to ``obj is other`` in Python. - bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); } - /// Equivalent to ``obj is None`` in Python. - bool is_none() const { return derived().ptr() == Py_None; } - PYBIND11_DEPRECATED("Use py::str(obj) instead") - pybind11::str str() const; - - /// Get or set the object's docstring, i.e. ``obj.__doc__``. - str_attr_accessor doc() const; - - /// Return the object's current reference count - int ref_count() const { return static_cast(Py_REFCNT(derived().ptr())); } - /// Return a handle to the Python type object underlying the instance - handle get_type() const; -}; - -NAMESPACE_END(detail) - -/** \rst - Holds a reference to a Python object (no reference counting) - - The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a - ``PyObject *`` in Python's C API). It does not perform any automatic reference - counting and merely provides a basic C++ interface to various Python API functions. - - .. seealso:: - The `object` class inherits from `handle` and adds automatic reference - counting features. -\endrst */ -class handle : public detail::object_api { -public: - /// The default constructor creates a handle with a ``nullptr``-valued pointer - handle() = default; - /// Creates a ``handle`` from the given raw Python object pointer - handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject* - - /// Return the underlying ``PyObject *`` pointer - PyObject *ptr() const { return m_ptr; } - PyObject *&ptr() { return m_ptr; } - - /** \rst - Manually increase the reference count of the Python object. Usually, it is - preferable to use the `object` class which derives from `handle` and calls - this function automatically. Returns a reference to itself. - \endrst */ - const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; } - - /** \rst - Manually decrease the reference count of the Python object. Usually, it is - preferable to use the `object` class which derives from `handle` and calls - this function automatically. Returns a reference to itself. - \endrst */ - const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; } - - /** \rst - Attempt to cast the Python object into the given C++ type. A `cast_error` - will be throw upon failure. - \endrst */ - template T cast() const; - /// Return ``true`` when the `handle` wraps a valid Python object - explicit operator bool() const { return m_ptr != nullptr; } - /** \rst - Deprecated: Check that the underlying pointers are the same. - Equivalent to ``obj1 is obj2`` in Python. - \endrst */ - PYBIND11_DEPRECATED("Use obj1.is(obj2) instead") - bool operator==(const handle &h) const { return m_ptr == h.m_ptr; } - PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead") - bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; } - PYBIND11_DEPRECATED("Use handle::operator bool() instead") - bool check() const { return m_ptr != nullptr; } -protected: - PyObject *m_ptr = nullptr; -}; - -/** \rst - Holds a reference to a Python object (with reference counting) - - Like `handle`, the `object` class is a thin wrapper around an arbitrary Python - object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it - optionally increases the object's reference count upon construction, and it - *always* decreases the reference count when the `object` instance goes out of - scope and is destructed. When using `object` instances consistently, it is much - easier to get reference counting right at the first attempt. -\endrst */ -class object : public handle { -public: - object() = default; - PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") - object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); } - /// Copy constructor; always increases the reference count - object(const object &o) : handle(o) { inc_ref(); } - /// Move constructor; steals the object from ``other`` and preserves its reference count - object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; } - /// Destructor; automatically calls `handle::dec_ref()` - ~object() { dec_ref(); } - - /** \rst - Resets the internal pointer to ``nullptr`` without without decreasing the - object's reference count. The function returns a raw handle to the original - Python object. - \endrst */ - handle release() { - PyObject *tmp = m_ptr; - m_ptr = nullptr; - return handle(tmp); - } - - object& operator=(const object &other) { - other.inc_ref(); - dec_ref(); - m_ptr = other.m_ptr; - return *this; - } - - object& operator=(object &&other) noexcept { - if (this != &other) { - handle temp(m_ptr); - m_ptr = other.m_ptr; - other.m_ptr = nullptr; - temp.dec_ref(); - } - return *this; - } - - // Calling cast() on an object lvalue just copies (via handle::cast) - template T cast() const &; - // Calling on an object rvalue does a move, if needed and/or possible - template T cast() &&; - -protected: - // Tags for choosing constructors from raw PyObject * - struct borrowed_t { }; - struct stolen_t { }; - - template friend T reinterpret_borrow(handle); - template friend T reinterpret_steal(handle); - -public: - // Only accessible from derived classes and the reinterpret_* functions - object(handle h, borrowed_t) : handle(h) { inc_ref(); } - object(handle h, stolen_t) : handle(h) { } -}; - -/** \rst - Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference. - The target type ``T`` must be `object` or one of its derived classes. The function - doesn't do any conversions or checks. It's up to the user to make sure that the - target type is correct. - - .. code-block:: cpp - - PyObject *p = PyList_GetItem(obj, index); - py::object o = reinterpret_borrow(p); - // or - py::tuple t = reinterpret_borrow(p); // <-- `p` must be already be a `tuple` -\endrst */ -template T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; } - -/** \rst - Like `reinterpret_borrow`, but steals the reference. - - .. code-block:: cpp - - PyObject *p = PyObject_Str(obj); - py::str s = reinterpret_steal(p); // <-- `p` must be already be a `str` -\endrst */ -template T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; } - -NAMESPACE_BEGIN(detail) -inline std::string error_string(); -NAMESPACE_END(detail) - -/// Fetch and hold an error which was already set in Python. An instance of this is typically -/// thrown to propagate python-side errors back through C++ which can either be caught manually or -/// else falls back to the function dispatcher (which then raises the captured error back to -/// python). -class error_already_set : public std::runtime_error { -public: - /// Constructs a new exception from the current Python error indicator, if any. The current - /// Python error indicator will be cleared. - error_already_set() : std::runtime_error(detail::error_string()) { - PyErr_Fetch(&type.ptr(), &value.ptr(), &trace.ptr()); - } - - inline ~error_already_set(); - - /// Give the currently-held error back to Python, if any. If there is currently a Python error - /// already set it is cleared first. After this call, the current object no longer stores the - /// error variables (but the `.what()` string is still available). - void restore() { PyErr_Restore(type.release().ptr(), value.release().ptr(), trace.release().ptr()); } - - // Does nothing; provided for backwards compatibility. - PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated") - void clear() {} - - /// Check if the currently trapped error type matches the given Python exception class (or a - /// subclass thereof). May also be passed a tuple to search for any exception class matches in - /// the given tuple. - bool matches(handle ex) const { return PyErr_GivenExceptionMatches(ex.ptr(), type.ptr()); } - -private: - object type, value, trace; -}; - -/** \defgroup python_builtins _ - Unless stated otherwise, the following C++ functions behave the same - as their Python counterparts. - */ - -/** \ingroup python_builtins - \rst - Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of - `object` or a class which was exposed to Python as ``py::class_``. -\endrst */ -template ::value, int> = 0> -bool isinstance(handle obj) { return T::check_(obj); } - -template ::value, int> = 0> -bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); } - -template <> inline bool isinstance(handle obj) = delete; -template <> inline bool isinstance(handle obj) { return obj.ptr() != nullptr; } - -/// \ingroup python_builtins -/// Return true if ``obj`` is an instance of the ``type``. -inline bool isinstance(handle obj, handle type) { - const auto result = PyObject_IsInstance(obj.ptr(), type.ptr()); - if (result == -1) - throw error_already_set(); - return result != 0; -} - -/// \addtogroup python_builtins -/// @{ -inline bool hasattr(handle obj, handle name) { - return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1; -} - -inline bool hasattr(handle obj, const char *name) { - return PyObject_HasAttrString(obj.ptr(), name) == 1; -} - -inline object getattr(handle obj, handle name) { - PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} - -inline object getattr(handle obj, const char *name) { - PyObject *result = PyObject_GetAttrString(obj.ptr(), name); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} - -inline object getattr(handle obj, handle name, handle default_) { - if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) { - return reinterpret_steal(result); - } else { - PyErr_Clear(); - return reinterpret_borrow(default_); - } -} - -inline object getattr(handle obj, const char *name, handle default_) { - if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) { - return reinterpret_steal(result); - } else { - PyErr_Clear(); - return reinterpret_borrow(default_); - } -} - -inline void setattr(handle obj, handle name, handle value) { - if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); } -} - -inline void setattr(handle obj, const char *name, handle value) { - if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); } -} -/// @} python_builtins - -NAMESPACE_BEGIN(detail) -inline handle get_function(handle value) { - if (value) { -#if PY_MAJOR_VERSION >= 3 - if (PyInstanceMethod_Check(value.ptr())) - value = PyInstanceMethod_GET_FUNCTION(value.ptr()); - else -#endif - if (PyMethod_Check(value.ptr())) - value = PyMethod_GET_FUNCTION(value.ptr()); - } - return value; -} - -// Helper aliases/functions to support implicit casting of values given to python accessors/methods. -// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes -// through pybind11::cast(obj) to convert it to an `object`. -template ::value, int> = 0> -auto object_or_cast(T &&o) -> decltype(std::forward(o)) { return std::forward(o); } -// The following casting version is implemented in cast.h: -template ::value, int> = 0> -object object_or_cast(T &&o); -// Match a PyObject*, which we want to convert directly to handle via its converting constructor -inline handle object_or_cast(PyObject *ptr) { return ptr; } - - -template -class accessor : public object_api> { - using key_type = typename Policy::key_type; - -public: - accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { } - accessor(const accessor &a) = default; - accessor(accessor &&a) = default; - - // accessor overload required to override default assignment operator (templates are not allowed - // to replace default compiler-generated assignments). - void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); } - void operator=(const accessor &a) & { operator=(handle(a)); } - - template void operator=(T &&value) && { - Policy::set(obj, key, object_or_cast(std::forward(value))); - } - template void operator=(T &&value) & { - get_cache() = reinterpret_borrow(object_or_cast(std::forward(value))); - } - - template - PYBIND11_DEPRECATED("Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)") - explicit operator enable_if_t::value || - std::is_same::value, bool>() const { - return hasattr(obj, key); - } - template - PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)") - explicit operator enable_if_t::value, bool>() const { - return obj.contains(key); - } - - operator object() const { return get_cache(); } - PyObject *ptr() const { return get_cache().ptr(); } - template T cast() const { return get_cache().template cast(); } - -private: - object &get_cache() const { - if (!cache) { cache = Policy::get(obj, key); } - return cache; - } - -private: - handle obj; - key_type key; - mutable object cache; -}; - -NAMESPACE_BEGIN(accessor_policies) -struct obj_attr { - using key_type = object; - static object get(handle obj, handle key) { return getattr(obj, key); } - static void set(handle obj, handle key, handle val) { setattr(obj, key, val); } -}; - -struct str_attr { - using key_type = const char *; - static object get(handle obj, const char *key) { return getattr(obj, key); } - static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); } -}; - -struct generic_item { - using key_type = object; - - static object get(handle obj, handle key) { - PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); - } - - static void set(handle obj, handle key, handle val) { - if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); } - } -}; - -struct sequence_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PySequence_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); - } - - static void set(handle obj, size_t index, handle val) { - // PySequence_SetItem does not steal a reference to 'val' - if (PySequence_SetItem(obj.ptr(), static_cast(index), val.ptr()) != 0) { - throw error_already_set(); - } - } -}; - -struct list_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PyList_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_borrow(result); - } - - static void set(handle obj, size_t index, handle val) { - // PyList_SetItem steals a reference to 'val' - if (PyList_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { - throw error_already_set(); - } - } -}; - -struct tuple_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_borrow(result); - } - - static void set(handle obj, size_t index, handle val) { - // PyTuple_SetItem steals a reference to 'val' - if (PyTuple_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { - throw error_already_set(); - } - } -}; -NAMESPACE_END(accessor_policies) - -/// STL iterator template used for tuple, list, sequence and dict -template -class generic_iterator : public Policy { - using It = generic_iterator; - -public: - using difference_type = ssize_t; - using iterator_category = typename Policy::iterator_category; - using value_type = typename Policy::value_type; - using reference = typename Policy::reference; - using pointer = typename Policy::pointer; - - generic_iterator() = default; - generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { } - - reference operator*() const { return Policy::dereference(); } - reference operator[](difference_type n) const { return *(*this + n); } - pointer operator->() const { return **this; } - - It &operator++() { Policy::increment(); return *this; } - It operator++(int) { auto copy = *this; Policy::increment(); return copy; } - It &operator--() { Policy::decrement(); return *this; } - It operator--(int) { auto copy = *this; Policy::decrement(); return copy; } - It &operator+=(difference_type n) { Policy::advance(n); return *this; } - It &operator-=(difference_type n) { Policy::advance(-n); return *this; } - - friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; } - friend It operator+(difference_type n, const It &b) { return b + n; } - friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; } - friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); } - - friend bool operator==(const It &a, const It &b) { return a.equal(b); } - friend bool operator!=(const It &a, const It &b) { return !(a == b); } - friend bool operator< (const It &a, const It &b) { return b - a > 0; } - friend bool operator> (const It &a, const It &b) { return b < a; } - friend bool operator>=(const It &a, const It &b) { return !(a < b); } - friend bool operator<=(const It &a, const It &b) { return !(a > b); } -}; - -NAMESPACE_BEGIN(iterator_policies) -/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers -template -struct arrow_proxy { - T value; - - arrow_proxy(T &&value) : value(std::move(value)) { } - T *operator->() const { return &value; } -}; - -/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS`` -class sequence_fast_readonly { -protected: - using iterator_category = std::random_access_iterator_tag; - using value_type = handle; - using reference = const handle; - using pointer = arrow_proxy; - - sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { } - - reference dereference() const { return *ptr; } - void increment() { ++ptr; } - void decrement() { --ptr; } - void advance(ssize_t n) { ptr += n; } - bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; } - ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; } - -private: - PyObject **ptr; -}; - -/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor`` -class sequence_slow_readwrite { -protected: - using iterator_category = std::random_access_iterator_tag; - using value_type = object; - using reference = sequence_accessor; - using pointer = arrow_proxy; - - sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { } - - reference dereference() const { return {obj, static_cast(index)}; } - void increment() { ++index; } - void decrement() { --index; } - void advance(ssize_t n) { index += n; } - bool equal(const sequence_slow_readwrite &b) const { return index == b.index; } - ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; } - -private: - handle obj; - ssize_t index; -}; - -/// Python's dictionary protocol permits this to be a forward iterator -class dict_readonly { -protected: - using iterator_category = std::forward_iterator_tag; - using value_type = std::pair; - using reference = const value_type; - using pointer = arrow_proxy; - - dict_readonly() = default; - dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); } - - reference dereference() const { return {key, value}; } - void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } } - bool equal(const dict_readonly &b) const { return pos == b.pos; } - -private: - handle obj; - PyObject *key, *value; - ssize_t pos = -1; -}; -NAMESPACE_END(iterator_policies) - -#if !defined(PYPY_VERSION) -using tuple_iterator = generic_iterator; -using list_iterator = generic_iterator; -#else -using tuple_iterator = generic_iterator; -using list_iterator = generic_iterator; -#endif - -using sequence_iterator = generic_iterator; -using dict_iterator = generic_iterator; - -inline bool PyIterable_Check(PyObject *obj) { - PyObject *iter = PyObject_GetIter(obj); - if (iter) { - Py_DECREF(iter); - return true; - } else { - PyErr_Clear(); - return false; - } -} - -inline bool PyNone_Check(PyObject *o) { return o == Py_None; } - -inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); } - -class kwargs_proxy : public handle { -public: - explicit kwargs_proxy(handle h) : handle(h) { } -}; - -class args_proxy : public handle { -public: - explicit args_proxy(handle h) : handle(h) { } - kwargs_proxy operator*() const { return kwargs_proxy(*this); } -}; - -/// Python argument categories (using PEP 448 terms) -template using is_keyword = std::is_base_of; -template using is_s_unpacking = std::is_same; // * unpacking -template using is_ds_unpacking = std::is_same; // ** unpacking -template using is_positional = satisfies_none_of; -template using is_keyword_or_ds = satisfies_any_of; - -// Call argument collector forward declarations -template -class simple_collector; -template -class unpacking_collector; - -NAMESPACE_END(detail) - -// TODO: After the deprecated constructors are removed, this macro can be simplified by -// inheriting ctors: `using Parent::Parent`. It's not an option right now because -// the `using` statement triggers the parent deprecation warning even if the ctor -// isn't even used. -#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - public: \ - PYBIND11_DEPRECATED("Use reinterpret_borrow<"#Name">() or reinterpret_steal<"#Name">()") \ - Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \ - Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \ - Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \ - PYBIND11_DEPRECATED("Use py::isinstance(obj) instead") \ - bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \ - static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } - -#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \ - PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ - Name(const object &o) : Parent(ConvertFun(o.ptr()), stolen_t{}) { if (!m_ptr) throw error_already_set(); } - -#define PYBIND11_OBJECT(Name, Parent, CheckFun) \ - PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ - Name(const object &o) : Parent(o) { } \ - Name(object &&o) : Parent(std::move(o)) { } - -#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \ - PYBIND11_OBJECT(Name, Parent, CheckFun) \ - Name() : Parent() { } - -/// \addtogroup pytypes -/// @{ - -/** \rst - Wraps a Python iterator so that it can also be used as a C++ input iterator - - Caveat: copying an iterator does not (and cannot) clone the internal - state of the Python iterable. This also applies to the post-increment - operator. This iterator should only be used to retrieve the current - value using ``operator*()``. -\endrst */ -class iterator : public object { -public: - using iterator_category = std::input_iterator_tag; - using difference_type = ssize_t; - using value_type = handle; - using reference = const handle; - using pointer = const handle *; - - PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check) - - iterator& operator++() { - advance(); - return *this; - } - - iterator operator++(int) { - auto rv = *this; - advance(); - return rv; - } - - reference operator*() const { - if (m_ptr && !value.ptr()) { - auto& self = const_cast(*this); - self.advance(); - } - return value; - } - - pointer operator->() const { operator*(); return &value; } - - /** \rst - The value which marks the end of the iteration. ``it == iterator::sentinel()`` - is equivalent to catching ``StopIteration`` in Python. - - .. code-block:: cpp - - void foo(py::iterator it) { - while (it != py::iterator::sentinel()) { - // use `*it` - ++it; - } - } - \endrst */ - static iterator sentinel() { return {}; } - - friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); } - friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); } - -private: - void advance() { - value = reinterpret_steal(PyIter_Next(m_ptr)); - if (PyErr_Occurred()) { throw error_already_set(); } - } - -private: - object value = {}; -}; - -class iterable : public object { -public: - PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check) -}; - -class bytes; - -class str : public object { -public: - PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str) - - str(const char *c, size_t n) - : object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate string object!"); - } - - // 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects - str(const char *c = "") - : object(PyUnicode_FromString(c), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate string object!"); - } - - str(const std::string &s) : str(s.data(), s.size()) { } - - explicit str(const bytes &b); - - /** \rst - Return a string representation of the object. This is analogous to - the ``str()`` function in Python. - \endrst */ - explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { } - - operator std::string() const { - object temp = *this; - if (PyUnicode_Check(m_ptr)) { - temp = reinterpret_steal(PyUnicode_AsUTF8String(m_ptr)); - if (!temp) - pybind11_fail("Unable to extract string contents! (encoding issue)"); - } - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract string contents! (invalid type)"); - return std::string(buffer, (size_t) length); - } - - template - str format(Args &&...args) const { - return attr("format")(std::forward(args)...); - } - -private: - /// Return string representation -- always returns a new reference, even if already a str - static PyObject *raw_str(PyObject *op) { - PyObject *str_value = PyObject_Str(op); -#if PY_MAJOR_VERSION < 3 - if (!str_value) throw error_already_set(); - PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); - Py_XDECREF(str_value); str_value = unicode; -#endif - return str_value; - } -}; -/// @} pytypes - -inline namespace literals { -/** \rst - String literal version of `str` - \endrst */ -inline str operator"" _s(const char *s, size_t size) { return {s, size}; } -} - -/// \addtogroup pytypes -/// @{ -class bytes : public object { -public: - PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK) - - // Allow implicit conversion: - bytes(const char *c = "") - : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); - } - - bytes(const char *c, size_t n) - : object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); - } - - // Allow implicit conversion: - bytes(const std::string &s) : bytes(s.data(), s.size()) { } - - explicit bytes(const pybind11::str &s); - - operator std::string() const { - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length)) - pybind11_fail("Unable to extract bytes contents!"); - return std::string(buffer, (size_t) length); - } -}; - -inline bytes::bytes(const pybind11::str &s) { - object temp = s; - if (PyUnicode_Check(s.ptr())) { - temp = reinterpret_steal(PyUnicode_AsUTF8String(s.ptr())); - if (!temp) - pybind11_fail("Unable to extract string contents! (encoding issue)"); - } - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract string contents! (invalid type)"); - auto obj = reinterpret_steal(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length)); - if (!obj) - pybind11_fail("Could not allocate bytes object!"); - m_ptr = obj.release().ptr(); -} - -inline str::str(const bytes& b) { - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract bytes contents!"); - auto obj = reinterpret_steal(PyUnicode_FromStringAndSize(buffer, (ssize_t) length)); - if (!obj) - pybind11_fail("Could not allocate string object!"); - m_ptr = obj.release().ptr(); -} - -class none : public object { -public: - PYBIND11_OBJECT(none, object, detail::PyNone_Check) - none() : object(Py_None, borrowed_t{}) { } -}; - -class bool_ : public object { -public: - PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool) - bool_() : object(Py_False, borrowed_t{}) { } - // Allow implicit conversion from and to `bool`: - bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { } - operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; } - -private: - /// Return the truth value of an object -- always returns a new reference - static PyObject *raw_bool(PyObject *op) { - const auto value = PyObject_IsTrue(op); - if (value == -1) return nullptr; - return handle(value ? Py_True : Py_False).inc_ref().ptr(); - } -}; - -NAMESPACE_BEGIN(detail) -// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1; -// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned). -// (The distinction is critically important when casting a returned -1 error value to some other -// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes). -template -Unsigned as_unsigned(PyObject *o) { - if (sizeof(Unsigned) <= sizeof(unsigned long) -#if PY_VERSION_HEX < 0x03000000 - || PyInt_Check(o) -#endif - ) { - unsigned long v = PyLong_AsUnsignedLong(o); - return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; - } - else { - unsigned long long v = PyLong_AsUnsignedLongLong(o); - return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; - } -} -NAMESPACE_END(detail) - -class int_ : public object { -public: - PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long) - int_() : object(PyLong_FromLong(0), stolen_t{}) { } - // Allow implicit conversion from C++ integral types: - template ::value, int> = 0> - int_(T value) { - if (sizeof(T) <= sizeof(long)) { - if (std::is_signed::value) - m_ptr = PyLong_FromLong((long) value); - else - m_ptr = PyLong_FromUnsignedLong((unsigned long) value); - } else { - if (std::is_signed::value) - m_ptr = PyLong_FromLongLong((long long) value); - else - m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value); - } - if (!m_ptr) pybind11_fail("Could not allocate int object!"); - } - - template ::value, int> = 0> - operator T() const { - return std::is_unsigned::value - ? detail::as_unsigned(m_ptr) - : sizeof(T) <= sizeof(long) - ? (T) PyLong_AsLong(m_ptr) - : (T) PYBIND11_LONG_AS_LONGLONG(m_ptr); - } -}; - -class float_ : public object { -public: - PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float) - // Allow implicit conversion from float/double: - float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate float object!"); - } - float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate float object!"); - } - operator float() const { return (float) PyFloat_AsDouble(m_ptr); } - operator double() const { return (double) PyFloat_AsDouble(m_ptr); } -}; - -class weakref : public object { -public: - PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check) - explicit weakref(handle obj, handle callback = {}) - : object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate weak reference!"); - } -}; - -class slice : public object { -public: - PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check) - slice(ssize_t start_, ssize_t stop_, ssize_t step_) { - int_ start(start_), stop(stop_), step(step_); - m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr()); - if (!m_ptr) pybind11_fail("Could not allocate slice object!"); - } - bool compute(size_t length, size_t *start, size_t *stop, size_t *step, - size_t *slicelength) const { - return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr, - (ssize_t) length, (ssize_t *) start, - (ssize_t *) stop, (ssize_t *) step, - (ssize_t *) slicelength) == 0; - } -}; - -class capsule : public object { -public: - PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact) - PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") - capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { } - - explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr) - : object(PyCapsule_New(const_cast(value), name, destructor), stolen_t{}) { - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input") - capsule(const void *value, void (*destruct)(PyObject *)) - : object(PyCapsule_New(const_cast(value), nullptr, destruct), stolen_t{}) { - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - capsule(const void *value, void (*destructor)(void *)) { - m_ptr = PyCapsule_New(const_cast(value), nullptr, [](PyObject *o) { - auto destructor = reinterpret_cast(PyCapsule_GetContext(o)); - void *ptr = PyCapsule_GetPointer(o, nullptr); - destructor(ptr); - }); - - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - - if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0) - pybind11_fail("Could not set capsule context!"); - } - - capsule(void (*destructor)()) { - m_ptr = PyCapsule_New(reinterpret_cast(destructor), nullptr, [](PyObject *o) { - auto destructor = reinterpret_cast(PyCapsule_GetPointer(o, nullptr)); - destructor(); - }); - - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - template operator T *() const { - auto name = this->name(); - T * result = static_cast(PyCapsule_GetPointer(m_ptr, name)); - if (!result) pybind11_fail("Unable to extract capsule contents!"); - return result; - } - - const char *name() const { return PyCapsule_GetName(m_ptr); } -}; - -class tuple : public object { -public: - PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple) - explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate tuple object!"); - } - size_t size() const { return (size_t) PyTuple_Size(m_ptr); } - detail::tuple_accessor operator[](size_t index) const { return {*this, index}; } - detail::tuple_iterator begin() const { return {*this, 0}; } - detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; } -}; - -class dict : public object { -public: - PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict) - dict() : object(PyDict_New(), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate dict object!"); - } - template ...>::value>, - // MSVC workaround: it can't compile an out-of-line definition, so defer the collector - typename collector = detail::deferred_t, Args...>> - explicit dict(Args &&...args) : dict(collector(std::forward(args)...).kwargs()) { } - - size_t size() const { return (size_t) PyDict_Size(m_ptr); } - detail::dict_iterator begin() const { return {*this, 0}; } - detail::dict_iterator end() const { return {}; } - void clear() const { PyDict_Clear(ptr()); } - bool contains(handle key) const { return PyDict_Contains(ptr(), key.ptr()) == 1; } - bool contains(const char *key) const { return PyDict_Contains(ptr(), pybind11::str(key).ptr()) == 1; } - -private: - /// Call the `dict` Python type -- always returns a new reference - static PyObject *raw_dict(PyObject *op) { - if (PyDict_Check(op)) - return handle(op).inc_ref().ptr(); - return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr); - } -}; - -class sequence : public object { -public: - PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check) - size_t size() const { return (size_t) PySequence_Size(m_ptr); } - detail::sequence_accessor operator[](size_t index) const { return {*this, index}; } - detail::sequence_iterator begin() const { return {*this, 0}; } - detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; } -}; - -class list : public object { -public: - PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List) - explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate list object!"); - } - size_t size() const { return (size_t) PyList_Size(m_ptr); } - detail::list_accessor operator[](size_t index) const { return {*this, index}; } - detail::list_iterator begin() const { return {*this, 0}; } - detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; } - template void append(T &&val) const { - PyList_Append(m_ptr, detail::object_or_cast(std::forward(val)).ptr()); - } -}; - -class args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) }; -class kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check) }; - -class set : public object { -public: - PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New) - set() : object(PySet_New(nullptr), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate set object!"); - } - size_t size() const { return (size_t) PySet_Size(m_ptr); } - template bool add(T &&val) const { - return PySet_Add(m_ptr, detail::object_or_cast(std::forward(val)).ptr()) == 0; - } - void clear() const { PySet_Clear(m_ptr); } -}; - -class function : public object { -public: - PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check) - handle cpp_function() const { - handle fun = detail::get_function(m_ptr); - if (fun && PyCFunction_Check(fun.ptr())) - return fun; - return handle(); - } - bool is_cpp_function() const { return (bool) cpp_function(); } -}; - -class buffer : public object { -public: - PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer) - - buffer_info request(bool writable = false) { - int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if (writable) flags |= PyBUF_WRITABLE; - Py_buffer *view = new Py_buffer(); - if (PyObject_GetBuffer(m_ptr, view, flags) != 0) { - delete view; - throw error_already_set(); - } - return buffer_info(view); - } -}; - -class memoryview : public object { -public: - explicit memoryview(const buffer_info& info) { - static Py_buffer buf { }; - // Py_buffer uses signed sizes, strides and shape!.. - static std::vector py_strides { }; - static std::vector py_shape { }; - buf.buf = info.ptr; - buf.itemsize = info.itemsize; - buf.format = const_cast(info.format.c_str()); - buf.ndim = (int) info.ndim; - buf.len = info.size; - py_strides.clear(); - py_shape.clear(); - for (size_t i = 0; i < (size_t) info.ndim; ++i) { - py_strides.push_back(info.strides[i]); - py_shape.push_back(info.shape[i]); - } - buf.strides = py_strides.data(); - buf.shape = py_shape.data(); - buf.suboffsets = nullptr; - buf.readonly = false; - buf.internal = nullptr; - - m_ptr = PyMemoryView_FromBuffer(&buf); - if (!m_ptr) - pybind11_fail("Unable to create memoryview from buffer descriptor"); - } - - PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject) -}; -/// @} pytypes - -/// \addtogroup python_builtins -/// @{ -inline size_t len(handle h) { - ssize_t result = PyObject_Length(h.ptr()); - if (result < 0) - pybind11_fail("Unable to compute length of object"); - return (size_t) result; -} - -inline str repr(handle h) { - PyObject *str_value = PyObject_Repr(h.ptr()); - if (!str_value) throw error_already_set(); -#if PY_MAJOR_VERSION < 3 - PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); - Py_XDECREF(str_value); str_value = unicode; - if (!str_value) throw error_already_set(); -#endif - return reinterpret_steal(str_value); -} - -inline iterator iter(handle obj) { - PyObject *result = PyObject_GetIter(obj.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} -/// @} python_builtins - -NAMESPACE_BEGIN(detail) -template iterator object_api::begin() const { return iter(derived()); } -template iterator object_api::end() const { return iterator::sentinel(); } -template item_accessor object_api::operator[](handle key) const { - return {derived(), reinterpret_borrow(key)}; -} -template item_accessor object_api::operator[](const char *key) const { - return {derived(), pybind11::str(key)}; -} -template obj_attr_accessor object_api::attr(handle key) const { - return {derived(), reinterpret_borrow(key)}; -} -template str_attr_accessor object_api::attr(const char *key) const { - return {derived(), key}; -} -template args_proxy object_api::operator*() const { - return args_proxy(derived().ptr()); -} -template template bool object_api::contains(T &&item) const { - return attr("__contains__")(std::forward(item)).template cast(); -} - -template -pybind11::str object_api::str() const { return pybind11::str(derived()); } - -template -str_attr_accessor object_api::doc() const { return attr("__doc__"); } - -template -handle object_api::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); } - -NAMESPACE_END(detail) -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/stl.h b/lanms/include/pybind11/stl.h deleted file mode 100644 index d07a81f9..00000000 --- a/lanms/include/pybind11/stl.h +++ /dev/null @@ -1,367 +0,0 @@ -/* - pybind11/stl.h: Transparent conversion for STL data types - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include -#include -#include -#include -#include -#include -#include - -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable: 4127) // warning C4127: Conditional expression is constant -#endif - -#ifdef __has_include -// std::optional (but including it in c++14 mode isn't allowed) -# if defined(PYBIND11_CPP17) && __has_include() -# include -# define PYBIND11_HAS_OPTIONAL 1 -# endif -// std::experimental::optional (but not allowed in c++11 mode) -# if defined(PYBIND11_CPP14) && __has_include() -# include -# define PYBIND11_HAS_EXP_OPTIONAL 1 -# endif -// std::variant -# if defined(PYBIND11_CPP17) && __has_include() -# include -# define PYBIND11_HAS_VARIANT 1 -# endif -#elif defined(_MSC_VER) && defined(PYBIND11_CPP17) -# include -# include -# define PYBIND11_HAS_OPTIONAL 1 -# define PYBIND11_HAS_VARIANT 1 -#endif - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for -/// forwarding a container element). Typically used indirect via forwarded_type(), below. -template -using forwarded_type = conditional_t< - std::is_lvalue_reference::value, remove_reference_t &, remove_reference_t &&>; - -/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically -/// used for forwarding a container's elements. -template -forwarded_type forward_like(U &&u) { - return std::forward>(std::forward(u)); -} - -template struct set_caster { - using type = Type; - using key_conv = make_caster; - - bool load(handle src, bool convert) { - if (!isinstance(src)) - return false; - auto s = reinterpret_borrow(src); - value.clear(); - for (auto entry : s) { - key_conv conv; - if (!conv.load(entry, convert)) - return false; - value.insert(cast_op(std::move(conv))); - } - return true; - } - - template - static handle cast(T &&src, return_value_policy policy, handle parent) { - pybind11::set s; - for (auto &value: src) { - auto value_ = reinterpret_steal(key_conv::cast(forward_like(value), policy, parent)); - if (!value_ || !s.add(value_)) - return handle(); - } - return s.release(); - } - - PYBIND11_TYPE_CASTER(type, _("Set[") + key_conv::name() + _("]")); -}; - -template struct map_caster { - using key_conv = make_caster; - using value_conv = make_caster; - - bool load(handle src, bool convert) { - if (!isinstance(src)) - return false; - auto d = reinterpret_borrow(src); - value.clear(); - for (auto it : d) { - key_conv kconv; - value_conv vconv; - if (!kconv.load(it.first.ptr(), convert) || - !vconv.load(it.second.ptr(), convert)) - return false; - value.emplace(cast_op(std::move(kconv)), cast_op(std::move(vconv))); - } - return true; - } - - template - static handle cast(T &&src, return_value_policy policy, handle parent) { - dict d; - for (auto &kv: src) { - auto key = reinterpret_steal(key_conv::cast(forward_like(kv.first), policy, parent)); - auto value = reinterpret_steal(value_conv::cast(forward_like(kv.second), policy, parent)); - if (!key || !value) - return handle(); - d[key] = value; - } - return d.release(); - } - - PYBIND11_TYPE_CASTER(Type, _("Dict[") + key_conv::name() + _(", ") + value_conv::name() + _("]")); -}; - -template struct list_caster { - using value_conv = make_caster; - - bool load(handle src, bool convert) { - if (!isinstance(src)) - return false; - auto s = reinterpret_borrow(src); - value.clear(); - reserve_maybe(s, &value); - for (auto it : s) { - value_conv conv; - if (!conv.load(it, convert)) - return false; - value.push_back(cast_op(std::move(conv))); - } - return true; - } - -private: - template ().reserve(0)), void>::value, int> = 0> - void reserve_maybe(sequence s, Type *) { value.reserve(s.size()); } - void reserve_maybe(sequence, void *) { } - -public: - template - static handle cast(T &&src, return_value_policy policy, handle parent) { - list l(src.size()); - size_t index = 0; - for (auto &value: src) { - auto value_ = reinterpret_steal(value_conv::cast(forward_like(value), policy, parent)); - if (!value_) - return handle(); - PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference - } - return l.release(); - } - - PYBIND11_TYPE_CASTER(Type, _("List[") + value_conv::name() + _("]")); -}; - -template struct type_caster> - : list_caster, Type> { }; - -template struct type_caster> - : list_caster, Type> { }; - -template struct array_caster { - using value_conv = make_caster; - -private: - template - bool require_size(enable_if_t size) { - if (value.size() != size) - value.resize(size); - return true; - } - template - bool require_size(enable_if_t size) { - return size == Size; - } - -public: - bool load(handle src, bool convert) { - if (!isinstance(src)) - return false; - auto l = reinterpret_borrow(src); - if (!require_size(l.size())) - return false; - size_t ctr = 0; - for (auto it : l) { - value_conv conv; - if (!conv.load(it, convert)) - return false; - value[ctr++] = cast_op(std::move(conv)); - } - return true; - } - - template - static handle cast(T &&src, return_value_policy policy, handle parent) { - list l(src.size()); - size_t index = 0; - for (auto &value: src) { - auto value_ = reinterpret_steal(value_conv::cast(forward_like(value), policy, parent)); - if (!value_) - return handle(); - PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference - } - return l.release(); - } - - PYBIND11_TYPE_CASTER(ArrayType, _("List[") + value_conv::name() + _(_(""), _("[") + _() + _("]")) + _("]")); -}; - -template struct type_caster> - : array_caster, Type, false, Size> { }; - -template struct type_caster> - : array_caster, Type, true> { }; - -template struct type_caster> - : set_caster, Key> { }; - -template struct type_caster> - : set_caster, Key> { }; - -template struct type_caster> - : map_caster, Key, Value> { }; - -template struct type_caster> - : map_caster, Key, Value> { }; - -// This type caster is intended to be used for std::optional and std::experimental::optional -template struct optional_caster { - using value_conv = make_caster; - - template - static handle cast(T_ &&src, return_value_policy policy, handle parent) { - if (!src) - return none().inc_ref(); - return value_conv::cast(*std::forward(src), policy, parent); - } - - bool load(handle src, bool convert) { - if (!src) { - return false; - } else if (src.is_none()) { - return true; // default-constructed value is already empty - } - value_conv inner_caster; - if (!inner_caster.load(src, convert)) - return false; - - value.emplace(cast_op(std::move(inner_caster))); - return true; - } - - PYBIND11_TYPE_CASTER(T, _("Optional[") + value_conv::name() + _("]")); -}; - -#if PYBIND11_HAS_OPTIONAL -template struct type_caster> - : public optional_caster> {}; - -template<> struct type_caster - : public void_caster {}; -#endif - -#if PYBIND11_HAS_EXP_OPTIONAL -template struct type_caster> - : public optional_caster> {}; - -template<> struct type_caster - : public void_caster {}; -#endif - -/// Visit a variant and cast any found type to Python -struct variant_caster_visitor { - return_value_policy policy; - handle parent; - - template - handle operator()(T &&src) const { - return make_caster::cast(std::forward(src), policy, parent); - } -}; - -/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar -/// `namespace::variant` types which provide a `namespace::visit()` function are handled here -/// automatically using argument-dependent lookup. Users can provide specializations for other -/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`. -template class Variant> -struct visit_helper { - template - static auto call(Args &&...args) -> decltype(visit(std::forward(args)...)) { - return visit(std::forward(args)...); - } -}; - -/// Generic variant caster -template struct variant_caster; - -template class V, typename... Ts> -struct variant_caster> { - static_assert(sizeof...(Ts) > 0, "Variant must consist of at least one alternative."); - - template - bool load_alternative(handle src, bool convert, type_list) { - auto caster = make_caster(); - if (caster.load(src, convert)) { - value = cast_op(caster); - return true; - } - return load_alternative(src, convert, type_list{}); - } - - bool load_alternative(handle, bool, type_list<>) { return false; } - - bool load(handle src, bool convert) { - // Do a first pass without conversions to improve constructor resolution. - // E.g. `py::int_(1).cast>()` needs to fill the `int` - // slot of the variant. Without two-pass loading `double` would be filled - // because it appears first and a conversion is possible. - if (convert && load_alternative(src, false, type_list{})) - return true; - return load_alternative(src, convert, type_list{}); - } - - template - static handle cast(Variant &&src, return_value_policy policy, handle parent) { - return visit_helper::call(variant_caster_visitor{policy, parent}, - std::forward(src)); - } - - using Type = V; - PYBIND11_TYPE_CASTER(Type, _("Union[") + detail::concat(make_caster::name()...) + _("]")); -}; - -#if PYBIND11_HAS_VARIANT -template -struct type_caster> : variant_caster> { }; -#endif -NAMESPACE_END(detail) - -inline std::ostream &operator<<(std::ostream &os, const handle &obj) { - os << (std::string) str(obj); - return os; -} - -NAMESPACE_END(pybind11) - -#if defined(_MSC_VER) -#pragma warning(pop) -#endif diff --git a/lanms/include/pybind11/stl_bind.h b/lanms/include/pybind11/stl_bind.h deleted file mode 100644 index f16e9d22..00000000 --- a/lanms/include/pybind11/stl_bind.h +++ /dev/null @@ -1,585 +0,0 @@ -/* - pybind11/std_bind.h: Binding generators for STL data types - - Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "common.h" -#include "operators.h" - -#include -#include - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) - -/* SFINAE helper class used by 'is_comparable */ -template struct container_traits { - template static std::true_type test_comparable(decltype(std::declval() == std::declval())*); - template static std::false_type test_comparable(...); - template static std::true_type test_value(typename T2::value_type *); - template static std::false_type test_value(...); - template static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *); - template static std::false_type test_pair(...); - - static constexpr const bool is_comparable = std::is_same(nullptr))>::value; - static constexpr const bool is_pair = std::is_same(nullptr, nullptr))>::value; - static constexpr const bool is_vector = std::is_same(nullptr))>::value; - static constexpr const bool is_element = !is_pair && !is_vector; -}; - -/* Default: is_comparable -> std::false_type */ -template -struct is_comparable : std::false_type { }; - -/* For non-map data structures, check whether operator== can be instantiated */ -template -struct is_comparable< - T, enable_if_t::is_element && - container_traits::is_comparable>> - : std::true_type { }; - -/* For a vector/map data structure, recursively check the value type (which is std::pair for maps) */ -template -struct is_comparable::is_vector>> { - static constexpr const bool value = - is_comparable::value; -}; - -/* For pairs, recursively check the two data types */ -template -struct is_comparable::is_pair>> { - static constexpr const bool value = - is_comparable::value && - is_comparable::value; -}; - -/* Fallback functions */ -template void vector_if_copy_constructible(const Args &...) { } -template void vector_if_equal_operator(const Args &...) { } -template void vector_if_insertion_operator(const Args &...) { } -template void vector_modifiers(const Args &...) { } - -template -void vector_if_copy_constructible(enable_if_t::value, Class_> &cl) { - cl.def(init(), "Copy constructor"); -} - -template -void vector_if_equal_operator(enable_if_t::value, Class_> &cl) { - using T = typename Vector::value_type; - - cl.def(self == self); - cl.def(self != self); - - cl.def("count", - [](const Vector &v, const T &x) { - return std::count(v.begin(), v.end(), x); - }, - arg("x"), - "Return the number of times ``x`` appears in the list" - ); - - cl.def("remove", [](Vector &v, const T &x) { - auto p = std::find(v.begin(), v.end(), x); - if (p != v.end()) - v.erase(p); - else - throw value_error(); - }, - arg("x"), - "Remove the first item from the list whose value is x. " - "It is an error if there is no such item." - ); - - cl.def("__contains__", - [](const Vector &v, const T &x) { - return std::find(v.begin(), v.end(), x) != v.end(); - }, - arg("x"), - "Return true the container contains ``x``" - ); -} - -// Vector modifiers -- requires a copyable vector_type: -// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it seems -// silly to allow deletion but not insertion, so include them here too.) -template -void vector_modifiers(enable_if_t::value, Class_> &cl) { - using T = typename Vector::value_type; - using SizeType = typename Vector::size_type; - using DiffType = typename Vector::difference_type; - - cl.def("append", - [](Vector &v, const T &value) { v.push_back(value); }, - arg("x"), - "Add an item to the end of the list"); - - cl.def("__init__", [](Vector &v, iterable it) { - new (&v) Vector(); - try { - v.reserve(len(it)); - for (handle h : it) - v.push_back(h.cast()); - } catch (...) { - v.~Vector(); - throw; - } - }); - - cl.def("extend", - [](Vector &v, const Vector &src) { - v.insert(v.end(), src.begin(), src.end()); - }, - arg("L"), - "Extend the list by appending all the items in the given list" - ); - - cl.def("insert", - [](Vector &v, SizeType i, const T &x) { - if (i > v.size()) - throw index_error(); - v.insert(v.begin() + (DiffType) i, x); - }, - arg("i") , arg("x"), - "Insert an item at a given position." - ); - - cl.def("pop", - [](Vector &v) { - if (v.empty()) - throw index_error(); - T t = v.back(); - v.pop_back(); - return t; - }, - "Remove and return the last item" - ); - - cl.def("pop", - [](Vector &v, SizeType i) { - if (i >= v.size()) - throw index_error(); - T t = v[i]; - v.erase(v.begin() + (DiffType) i); - return t; - }, - arg("i"), - "Remove and return the item at index ``i``" - ); - - cl.def("__setitem__", - [](Vector &v, SizeType i, const T &t) { - if (i >= v.size()) - throw index_error(); - v[i] = t; - } - ); - - /// Slicing protocol - cl.def("__getitem__", - [](const Vector &v, slice slice) -> Vector * { - size_t start, stop, step, slicelength; - - if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) - throw error_already_set(); - - Vector *seq = new Vector(); - seq->reserve((size_t) slicelength); - - for (size_t i=0; ipush_back(v[start]); - start += step; - } - return seq; - }, - arg("s"), - "Retrieve list elements using a slice object" - ); - - cl.def("__setitem__", - [](Vector &v, slice slice, const Vector &value) { - size_t start, stop, step, slicelength; - if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) - throw error_already_set(); - - if (slicelength != value.size()) - throw std::runtime_error("Left and right hand size of slice assignment have different sizes!"); - - for (size_t i=0; i= v.size()) - throw index_error(); - v.erase(v.begin() + DiffType(i)); - }, - "Delete the list elements at index ``i``" - ); - - cl.def("__delitem__", - [](Vector &v, slice slice) { - size_t start, stop, step, slicelength; - - if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) - throw error_already_set(); - - if (step == 1 && false) { - v.erase(v.begin() + (DiffType) start, v.begin() + DiffType(start + slicelength)); - } else { - for (size_t i = 0; i < slicelength; ++i) { - v.erase(v.begin() + DiffType(start)); - start += step - 1; - } - } - }, - "Delete list elements using a slice object" - ); - -} - -// If the type has an operator[] that doesn't return a reference (most notably std::vector), -// we have to access by copying; otherwise we return by reference. -template using vector_needs_copy = negation< - std::is_same()[typename Vector::size_type()]), typename Vector::value_type &>>; - -// The usual case: access and iterate by reference -template -void vector_accessor(enable_if_t::value, Class_> &cl) { - using T = typename Vector::value_type; - using SizeType = typename Vector::size_type; - using ItType = typename Vector::iterator; - - cl.def("__getitem__", - [](Vector &v, SizeType i) -> T & { - if (i >= v.size()) - throw index_error(); - return v[i]; - }, - return_value_policy::reference_internal // ref + keepalive - ); - - cl.def("__iter__", - [](Vector &v) { - return make_iterator< - return_value_policy::reference_internal, ItType, ItType, T&>( - v.begin(), v.end()); - }, - keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ - ); -} - -// The case for special objects, like std::vector, that have to be returned-by-copy: -template -void vector_accessor(enable_if_t::value, Class_> &cl) { - using T = typename Vector::value_type; - using SizeType = typename Vector::size_type; - using ItType = typename Vector::iterator; - cl.def("__getitem__", - [](const Vector &v, SizeType i) -> T { - if (i >= v.size()) - throw index_error(); - return v[i]; - } - ); - - cl.def("__iter__", - [](Vector &v) { - return make_iterator< - return_value_policy::copy, ItType, ItType, T>( - v.begin(), v.end()); - }, - keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ - ); -} - -template auto vector_if_insertion_operator(Class_ &cl, std::string const &name) - -> decltype(std::declval() << std::declval(), void()) { - using size_type = typename Vector::size_type; - - cl.def("__repr__", - [name](Vector &v) { - std::ostringstream s; - s << name << '['; - for (size_type i=0; i < v.size(); ++i) { - s << v[i]; - if (i != v.size() - 1) - s << ", "; - } - s << ']'; - return s.str(); - }, - "Return the canonical string representation of this list." - ); -} - -// Provide the buffer interface for vectors if we have data() and we have a format for it -// GCC seems to have "void std::vector::data()" - doing SFINAE on the existence of data() is insufficient, we need to check it returns an appropriate pointer -template -struct vector_has_data_and_format : std::false_type {}; -template -struct vector_has_data_and_format::format(), std::declval().data()), typename Vector::value_type*>::value>> : std::true_type {}; - -// Add the buffer interface to a vector -template -enable_if_t...>::value> -vector_buffer(Class_& cl) { - using T = typename Vector::value_type; - - static_assert(vector_has_data_and_format::value, "There is not an appropriate format descriptor for this vector"); - - // numpy.h declares this for arbitrary types, but it may raise an exception and crash hard at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here - format_descriptor::format(); - - cl.def_buffer([](Vector& v) -> buffer_info { - return buffer_info(v.data(), static_cast(sizeof(T)), format_descriptor::format(), 1, {v.size()}, {sizeof(T)}); - }); - - cl.def("__init__", [](Vector& vec, buffer buf) { - auto info = buf.request(); - if (info.ndim != 1 || info.strides[0] % static_cast(sizeof(T))) - throw type_error("Only valid 1D buffers can be copied to a vector"); - if (!detail::compare_buffer_info::compare(info) || (ssize_t) sizeof(T) != info.itemsize) - throw type_error("Format mismatch (Python: " + info.format + " C++: " + format_descriptor::format() + ")"); - new (&vec) Vector(); - vec.reserve((size_t) info.shape[0]); - T *p = static_cast(info.ptr); - ssize_t step = info.strides[0] / static_cast(sizeof(T)); - T *end = p + info.shape[0] * step; - for (; p != end; p += step) - vec.push_back(*p); - }); - - return; -} - -template -enable_if_t...>::value> vector_buffer(Class_&) {} - -NAMESPACE_END(detail) - -// -// std::vector -// -template , typename... Args> -class_ bind_vector(module &m, std::string const &name, Args&&... args) { - using Class_ = class_; - - Class_ cl(m, name.c_str(), std::forward(args)...); - - // Declare the buffer interface if a buffer_protocol() is passed in - detail::vector_buffer(cl); - - cl.def(init<>()); - - // Register copy constructor (if possible) - detail::vector_if_copy_constructible(cl); - - // Register comparison-related operators and functions (if possible) - detail::vector_if_equal_operator(cl); - - // Register stream insertion operator (if possible) - detail::vector_if_insertion_operator(cl, name); - - // Modifiers require copyable vector value type - detail::vector_modifiers(cl); - - // Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive - detail::vector_accessor(cl); - - cl.def("__bool__", - [](const Vector &v) -> bool { - return !v.empty(); - }, - "Check whether the list is nonempty" - ); - - cl.def("__len__", &Vector::size); - - - - -#if 0 - // C++ style functions deprecated, leaving it here as an example - cl.def(init()); - - cl.def("resize", - (void (Vector::*) (size_type count)) & Vector::resize, - "changes the number of elements stored"); - - cl.def("erase", - [](Vector &v, SizeType i) { - if (i >= v.size()) - throw index_error(); - v.erase(v.begin() + i); - }, "erases element at index ``i``"); - - cl.def("empty", &Vector::empty, "checks whether the container is empty"); - cl.def("size", &Vector::size, "returns the number of elements"); - cl.def("push_back", (void (Vector::*)(const T&)) &Vector::push_back, "adds an element to the end"); - cl.def("pop_back", &Vector::pop_back, "removes the last element"); - - cl.def("max_size", &Vector::max_size, "returns the maximum possible number of elements"); - cl.def("reserve", &Vector::reserve, "reserves storage"); - cl.def("capacity", &Vector::capacity, "returns the number of elements that can be held in currently allocated storage"); - cl.def("shrink_to_fit", &Vector::shrink_to_fit, "reduces memory usage by freeing unused memory"); - - cl.def("clear", &Vector::clear, "clears the contents"); - cl.def("swap", &Vector::swap, "swaps the contents"); - - cl.def("front", [](Vector &v) { - if (v.size()) return v.front(); - else throw index_error(); - }, "access the first element"); - - cl.def("back", [](Vector &v) { - if (v.size()) return v.back(); - else throw index_error(); - }, "access the last element "); - -#endif - - return cl; -} - - - -// -// std::map, std::unordered_map -// - -NAMESPACE_BEGIN(detail) - -/* Fallback functions */ -template void map_if_insertion_operator(const Args &...) { } -template void map_assignment(const Args &...) { } - -// Map assignment when copy-assignable: just copy the value -template -void map_assignment(enable_if_t::value, Class_> &cl) { - using KeyType = typename Map::key_type; - using MappedType = typename Map::mapped_type; - - cl.def("__setitem__", - [](Map &m, const KeyType &k, const MappedType &v) { - auto it = m.find(k); - if (it != m.end()) it->second = v; - else m.emplace(k, v); - } - ); -} - -// Not copy-assignable, but still copy-constructible: we can update the value by erasing and reinserting -template -void map_assignment(enable_if_t< - !std::is_copy_assignable::value && - is_copy_constructible::value, - Class_> &cl) { - using KeyType = typename Map::key_type; - using MappedType = typename Map::mapped_type; - - cl.def("__setitem__", - [](Map &m, const KeyType &k, const MappedType &v) { - // We can't use m[k] = v; because value type might not be default constructable - auto r = m.emplace(k, v); - if (!r.second) { - // value type is not copy assignable so the only way to insert it is to erase it first... - m.erase(r.first); - m.emplace(k, v); - } - } - ); -} - - -template auto map_if_insertion_operator(Class_ &cl, std::string const &name) --> decltype(std::declval() << std::declval() << std::declval(), void()) { - - cl.def("__repr__", - [name](Map &m) { - std::ostringstream s; - s << name << '{'; - bool f = false; - for (auto const &kv : m) { - if (f) - s << ", "; - s << kv.first << ": " << kv.second; - f = true; - } - s << '}'; - return s.str(); - }, - "Return the canonical string representation of this map." - ); -} - - -NAMESPACE_END(detail) - -template , typename... Args> -class_ bind_map(module &m, const std::string &name, Args&&... args) { - using KeyType = typename Map::key_type; - using MappedType = typename Map::mapped_type; - using Class_ = class_; - - Class_ cl(m, name.c_str(), std::forward(args)...); - - cl.def(init<>()); - - // Register stream insertion operator (if possible) - detail::map_if_insertion_operator(cl, name); - - cl.def("__bool__", - [](const Map &m) -> bool { return !m.empty(); }, - "Check whether the map is nonempty" - ); - - cl.def("__iter__", - [](Map &m) { return make_key_iterator(m.begin(), m.end()); }, - keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ - ); - - cl.def("items", - [](Map &m) { return make_iterator(m.begin(), m.end()); }, - keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ - ); - - cl.def("__getitem__", - [](Map &m, const KeyType &k) -> MappedType & { - auto it = m.find(k); - if (it == m.end()) - throw key_error(); - return it->second; - }, - return_value_policy::reference_internal // ref + keepalive - ); - - // Assignment provided only if the type is copyable - detail::map_assignment(cl); - - cl.def("__delitem__", - [](Map &m, const KeyType &k) { - auto it = m.find(k); - if (it == m.end()) - throw key_error(); - return m.erase(it); - } - ); - - cl.def("__len__", &Map::size); - - return cl; -} - -NAMESPACE_END(pybind11) diff --git a/lanms/include/pybind11/typeid.h b/lanms/include/pybind11/typeid.h deleted file mode 100644 index c903fb14..00000000 --- a/lanms/include/pybind11/typeid.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - pybind11/typeid.h: Compiler-independent access to type identifiers - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include -#include - -#if defined(__GNUG__) -#include -#endif - -NAMESPACE_BEGIN(pybind11) -NAMESPACE_BEGIN(detail) -/// Erase all occurrences of a substring -inline void erase_all(std::string &string, const std::string &search) { - for (size_t pos = 0;;) { - pos = string.find(search, pos); - if (pos == std::string::npos) break; - string.erase(pos, search.length()); - } -} - -PYBIND11_NOINLINE inline void clean_type_id(std::string &name) { -#if defined(__GNUG__) - int status = 0; - std::unique_ptr res { - abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free }; - if (status == 0) - name = res.get(); -#else - detail::erase_all(name, "class "); - detail::erase_all(name, "struct "); - detail::erase_all(name, "enum "); -#endif - detail::erase_all(name, "pybind11::"); -} -NAMESPACE_END(detail) - -/// Return a string representation of a C++ type -template static std::string type_id() { - std::string name(typeid(T).name()); - detail::clean_type_id(name); - return name; -} - -NAMESPACE_END(pybind11) diff --git a/lanms/lanms.cmd b/lanms/lanms.cmd new file mode 100644 index 00000000..923be415 --- /dev/null +++ b/lanms/lanms.cmd @@ -0,0 +1,31 @@ +@echo off + +for /f %%i in ('where python') do set python_path=%%i +::set python_path=D:/Python/v3.6.5 +if exist %python_path% ( + set python_dir=%python_path:~0,-11% +) else ( + @echo error: can't found python.exe, please delete "::" and set python_path manually at line 4 + pause + exit +) + +set vs2017_path="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars64.bat" +rem set vs2017_path="C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Auxiliary/Build/vcvars64.bat" +if exist %vs2017_path% ( + call %vs2017_path% + del adaptor.pyd + cl lanms.cpp adaptor.cpp ./include/clipper/clipper.cpp /I ./include /I %python_dir%/include /LD /Fe:adaptor.pyd /link/LIBPATH:%python_dir%/libs + del adaptor.exp adaptor.obj clipper.obj adaptor.lib +) else ( + @echo can't found vs2017, please set vs2017_path manually at line 14, example: + @echo Visual Studio install path is"D:/Visual Studio/Enterprise 2017",set vs2017_path="D:/Visual Studio/Enterprise 2017/VC/Auxiliary/Build/vcvars64.bat" + pause + exit +) + +if not exist adaptor.pyd ( + @echo build adaptor.pyd failed + pause + exit +) diff --git a/lanms/lanms.cpp b/lanms/lanms.cpp new file mode 100644 index 00000000..6d1536ad --- /dev/null +++ b/lanms/lanms.cpp @@ -0,0 +1,203 @@ +#include "lanms.h" +#include +#include + +namespace lanms { + namespace cl = ClipperLib; + + float paths_area(const ClipperLib::Paths &ps) { + float area = 0; + for (auto &&p: ps) + area += cl::Area(p); + return area; + } + + float poly_iou(const Polygon &a, const Polygon &b) { + cl::Clipper clpr; + clpr.AddPath(a.poly, cl::ptSubject, true); + clpr.AddPath(b.poly, cl::ptClip, true); + + cl::Paths inter, uni; + clpr.Execute(cl::ctIntersection, inter, cl::pftEvenOdd); + clpr.Execute(cl::ctUnion, uni, cl::pftEvenOdd); + + auto inter_area = paths_area(inter), + uni_area = paths_area(uni); + return std::abs(inter_area) / std::max(std::abs(uni_area), 1.0f); + } + + bool should_merge(const Polygon &a, const Polygon &b, float iou_threshold) { + return poly_iou(a, b) > iou_threshold; + } + + PolyMerger::PolyMerger(): score(0), nr_polys(0) { + memset(data, 0, sizeof(data)); + } + + void PolyMerger::add(const Polygon &p_given) { + Polygon p; + if (nr_polys > 0) { + // vertices of two polygons to merge may not in the same order; + // we match their vertices by choosing the ordering that + // minimizes the total squared distance. + // see function normalize_poly for details. + p = normalize_poly(get(), p_given); + } else { + p = p_given; + } + assert(p.poly.size() == 4); + auto &poly = p.poly; + auto s = p.score; + data[0] += poly[0].X * s; + data[1] += poly[0].Y * s; + + data[2] += poly[1].X * s; + data[3] += poly[1].Y * s; + + data[4] += poly[2].X * s; + data[5] += poly[2].Y * s; + + data[6] += poly[3].X * s; + data[7] += poly[3].Y * s; + + score += p.score; + + nr_polys += 1; + } + + Polygon PolyMerger::normalize_poly(const Polygon &ref, const Polygon &p) { + std::int64_t min_d = std::numeric_limits::max(); + size_t best_start = 0, best_order = 0; + + for (size_t start = 0; start < 4; start ++) { + size_t j = start; + std::int64_t d = ( + sqr(ref.poly[(j + 0) % 4].X - p.poly[(j + 0) % 4].X) + + sqr(ref.poly[(j + 0) % 4].Y - p.poly[(j + 0) % 4].Y) + + sqr(ref.poly[(j + 1) % 4].X - p.poly[(j + 1) % 4].X) + + sqr(ref.poly[(j + 1) % 4].Y - p.poly[(j + 1) % 4].Y) + + sqr(ref.poly[(j + 2) % 4].X - p.poly[(j + 2) % 4].X) + + sqr(ref.poly[(j + 2) % 4].Y - p.poly[(j + 2) % 4].Y) + + sqr(ref.poly[(j + 3) % 4].X - p.poly[(j + 3) % 4].X) + + sqr(ref.poly[(j + 3) % 4].Y - p.poly[(j + 3) % 4].Y) + ); + if (d < min_d) { + min_d = d; + best_start = start; + best_order = 0; + } + + d = ( + sqr(ref.poly[(j + 0) % 4].X - p.poly[(j + 3) % 4].X) + + sqr(ref.poly[(j + 0) % 4].Y - p.poly[(j + 3) % 4].Y) + + sqr(ref.poly[(j + 1) % 4].X - p.poly[(j + 2) % 4].X) + + sqr(ref.poly[(j + 1) % 4].Y - p.poly[(j + 2) % 4].Y) + + sqr(ref.poly[(j + 2) % 4].X - p.poly[(j + 1) % 4].X) + + sqr(ref.poly[(j + 2) % 4].Y - p.poly[(j + 1) % 4].Y) + + sqr(ref.poly[(j + 3) % 4].X - p.poly[(j + 0) % 4].X) + + sqr(ref.poly[(j + 3) % 4].Y - p.poly[(j + 0) % 4].Y) + ); + if (d < min_d) { + min_d = d; + best_start = start; + best_order = 1; + } + } + + Polygon r; + r.poly.resize(4); + auto j = best_start; + if (best_order == 0) { + for (size_t i = 0; i < 4; i ++) + r.poly[i] = p.poly[(j + i) % 4]; + } else { + for (size_t i = 0; i < 4; i ++) + r.poly[i] = p.poly[(j + 4 - i - 1) % 4]; + } + r.score = p.score; + return r; + } + + Polygon PolyMerger::get() const { + Polygon p; + + auto &poly = p.poly; + poly.resize(4); + auto score_inv = 1.0f / std::max(1e-8f, score); + poly[0].X = data[0] * score_inv; + poly[0].Y = data[1] * score_inv; + poly[1].X = data[2] * score_inv; + poly[1].Y = data[3] * score_inv; + poly[2].X = data[4] * score_inv; + poly[2].Y = data[5] * score_inv; + poly[3].X = data[6] * score_inv; + poly[3].Y = data[7] * score_inv; + + assert(score > 0); + p.score = score; + + return p; + } + + std::vector standard_nms(std::vector &polys, float iou_threshold) { + size_t n = polys.size(); + if (n == 0) + return {}; + std::vector indices(n); + std::iota(std::begin(indices), std::end(indices), 0); + std::sort(std::begin(indices), std::end(indices), [&](size_t i, size_t j) { return polys[i].score > polys[j].score; }); + + std::vector keep; + while (indices.size()) { + size_t p = 0, cur = indices[0]; + keep.emplace_back(cur); + for (size_t i = 1; i < indices.size(); i ++) { + if (!should_merge(polys[cur], polys[indices[i]], iou_threshold)) { + indices[p ++] = indices[i]; + } + } + indices.resize(p); + } + + std::vector ret; + for (auto &&i: keep) { + ret.emplace_back(polys[i]); + } + return ret; + } + + std::vector merge_quadrangle_n9(const float *data, size_t n, float iou_threshold) { + using cInt = cl::cInt; + + // first pass + std::vector polys; + for (size_t i = 0; i < n; i ++) { + auto p = data + i * 9; + Polygon poly{ + { + {cInt(p[0]), cInt(p[1])}, + {cInt(p[2]), cInt(p[3])}, + {cInt(p[4]), cInt(p[5])}, + {cInt(p[6]), cInt(p[7])}, + }, + p[8], + }; + + if (polys.size()) { + // merge with the last one + auto &bpoly = polys.back(); + if (should_merge(poly, bpoly, iou_threshold)) { + PolyMerger merger; + merger.add(bpoly); + merger.add(poly); + bpoly = merger.get(); + } else { + polys.emplace_back(poly); + } + } else { + polys.emplace_back(poly); + } + } + return standard_nms(polys, iou_threshold); + } +} diff --git a/lanms/lanms.h b/lanms/lanms.h index 679666ca..4991c057 100644 --- a/lanms/lanms.h +++ b/lanms/lanms.h @@ -1,6 +1,9 @@ -#pragma once +#ifndef LANMS_H +#define LANMS_H #include "clipper/clipper.hpp" +#include +#include // locality-aware NMS namespace lanms { @@ -12,152 +15,29 @@ namespace lanms { float score; }; - float paths_area(const ClipperLib::Paths &ps) { - float area = 0; - for (auto &&p: ps) - area += cl::Area(p); - return area; - } + float paths_area(const ClipperLib::Paths &ps); - float poly_iou(const Polygon &a, const Polygon &b) { - cl::Clipper clpr; - clpr.AddPath(a.poly, cl::ptSubject, true); - clpr.AddPath(b.poly, cl::ptClip, true); + float poly_iou(const Polygon &a, const Polygon &b); - cl::Paths inter, uni; - clpr.Execute(cl::ctIntersection, inter, cl::pftEvenOdd); - clpr.Execute(cl::ctUnion, uni, cl::pftEvenOdd); - - auto inter_area = paths_area(inter), - uni_area = paths_area(uni); - return std::abs(inter_area) / std::max(std::abs(uni_area), 1.0f); - } - - bool should_merge(const Polygon &a, const Polygon &b, float iou_threshold) { - return poly_iou(a, b) > iou_threshold; - } + bool should_merge(const Polygon &a, const Polygon &b, float iou_threshold); /** * Incrementally merge polygons */ class PolyMerger { public: - PolyMerger(): score(0), nr_polys(0) { - memset(data, 0, sizeof(data)); - } + PolyMerger(); /** * Add a new polygon to be merged. */ - void add(const Polygon &p_given) { - Polygon p; - if (nr_polys > 0) { - // vertices of two polygons to merge may not in the same order; - // we match their vertices by choosing the ordering that - // minimizes the total squared distance. - // see function normalize_poly for details. - p = normalize_poly(get(), p_given); - } else { - p = p_given; - } - assert(p.poly.size() == 4); - auto &poly = p.poly; - auto s = p.score; - data[0] += poly[0].X * s; - data[1] += poly[0].Y * s; - - data[2] += poly[1].X * s; - data[3] += poly[1].Y * s; - - data[4] += poly[2].X * s; - data[5] += poly[2].Y * s; - - data[6] += poly[3].X * s; - data[7] += poly[3].Y * s; - - score += p.score; - - nr_polys += 1; - } + void add(const Polygon &p_given); inline std::int64_t sqr(std::int64_t x) { return x * x; } - Polygon normalize_poly( - const Polygon &ref, - const Polygon &p) { - - std::int64_t min_d = std::numeric_limits::max(); - size_t best_start = 0, best_order = 0; - - for (size_t start = 0; start < 4; start ++) { - size_t j = start; - std::int64_t d = ( - sqr(ref.poly[(j + 0) % 4].X - p.poly[(j + 0) % 4].X) - + sqr(ref.poly[(j + 0) % 4].Y - p.poly[(j + 0) % 4].Y) - + sqr(ref.poly[(j + 1) % 4].X - p.poly[(j + 1) % 4].X) - + sqr(ref.poly[(j + 1) % 4].Y - p.poly[(j + 1) % 4].Y) - + sqr(ref.poly[(j + 2) % 4].X - p.poly[(j + 2) % 4].X) - + sqr(ref.poly[(j + 2) % 4].Y - p.poly[(j + 2) % 4].Y) - + sqr(ref.poly[(j + 3) % 4].X - p.poly[(j + 3) % 4].X) - + sqr(ref.poly[(j + 3) % 4].Y - p.poly[(j + 3) % 4].Y) - ); - if (d < min_d) { - min_d = d; - best_start = start; - best_order = 0; - } - - d = ( - sqr(ref.poly[(j + 0) % 4].X - p.poly[(j + 3) % 4].X) - + sqr(ref.poly[(j + 0) % 4].Y - p.poly[(j + 3) % 4].Y) - + sqr(ref.poly[(j + 1) % 4].X - p.poly[(j + 2) % 4].X) - + sqr(ref.poly[(j + 1) % 4].Y - p.poly[(j + 2) % 4].Y) - + sqr(ref.poly[(j + 2) % 4].X - p.poly[(j + 1) % 4].X) - + sqr(ref.poly[(j + 2) % 4].Y - p.poly[(j + 1) % 4].Y) - + sqr(ref.poly[(j + 3) % 4].X - p.poly[(j + 0) % 4].X) - + sqr(ref.poly[(j + 3) % 4].Y - p.poly[(j + 0) % 4].Y) - ); - if (d < min_d) { - min_d = d; - best_start = start; - best_order = 1; - } - } - - Polygon r; - r.poly.resize(4); - auto j = best_start; - if (best_order == 0) { - for (size_t i = 0; i < 4; i ++) - r.poly[i] = p.poly[(j + i) % 4]; - } else { - for (size_t i = 0; i < 4; i ++) - r.poly[i] = p.poly[(j + 4 - i - 1) % 4]; - } - r.score = p.score; - return r; - } - - Polygon get() const { - Polygon p; - - auto &poly = p.poly; - poly.resize(4); - auto score_inv = 1.0f / std::max(1e-8f, score); - poly[0].X = data[0] * score_inv; - poly[0].Y = data[1] * score_inv; - poly[1].X = data[2] * score_inv; - poly[1].Y = data[3] * score_inv; - poly[2].X = data[4] * score_inv; - poly[2].Y = data[5] * score_inv; - poly[3].X = data[6] * score_inv; - poly[3].Y = data[7] * score_inv; - - assert(score > 0); - p.score = score; - - return p; - } + Polygon normalize_poly(const Polygon &ref, const Polygon &p); + + Polygon get() const; private: std::int64_t data[8]; @@ -165,70 +45,12 @@ namespace lanms { std::int32_t nr_polys; }; - /** * The standard NMS algorithm. */ - std::vector standard_nms(std::vector &polys, float iou_threshold) { - size_t n = polys.size(); - if (n == 0) - return {}; - std::vector indices(n); - std::iota(std::begin(indices), std::end(indices), 0); - std::sort(std::begin(indices), std::end(indices), [&](size_t i, size_t j) { return polys[i].score > polys[j].score; }); - - std::vector keep; - while (indices.size()) { - size_t p = 0, cur = indices[0]; - keep.emplace_back(cur); - for (size_t i = 1; i < indices.size(); i ++) { - if (!should_merge(polys[cur], polys[indices[i]], iou_threshold)) { - indices[p ++] = indices[i]; - } - } - indices.resize(p); - } - - std::vector ret; - for (auto &&i: keep) { - ret.emplace_back(polys[i]); - } - return ret; - } - - std::vector - merge_quadrangle_n9(const float *data, size_t n, float iou_threshold) { - using cInt = cl::cInt; - - // first pass - std::vector polys; - for (size_t i = 0; i < n; i ++) { - auto p = data + i * 9; - Polygon poly{ - { - {cInt(p[0]), cInt(p[1])}, - {cInt(p[2]), cInt(p[3])}, - {cInt(p[4]), cInt(p[5])}, - {cInt(p[6]), cInt(p[7])}, - }, - p[8], - }; - - if (polys.size()) { - // merge with the last one - auto &bpoly = polys.back(); - if (should_merge(poly, bpoly, iou_threshold)) { - PolyMerger merger; - merger.add(bpoly); - merger.add(poly); - bpoly = merger.get(); - } else { - polys.emplace_back(poly); - } - } else { - polys.emplace_back(poly); - } - } - return standard_nms(polys, iou_threshold); - } + std::vector standard_nms(std::vector &polys, float iou_threshold); + + std::vector merge_quadrangle_n9(const float *data, size_t n, float iou_threshold=0.3); } + +#endif // LANMS_H diff --git a/lanms/main.cpp b/lanms/main.cpp new file mode 100644 index 00000000..133a7cb0 --- /dev/null +++ b/lanms/main.cpp @@ -0,0 +1,14 @@ +#include "lanms.h" +#include +#include + +int main() +{ + const float data[] = {0, 0, 0, 1, 1, 1, 1, 0, 1}; + std::vector datavec(data, data + sizeof(data)/sizeof(float)); + + std::vector polys = lanms::merge_quadrangle_n9(datavec.data(), datavec.size() / 9); + std::cout << polys.size() << std::endl; + + return 0; +} diff --git a/lanms/pybind11 b/lanms/pybind11 new file mode 160000 index 00000000..8de7772c --- /dev/null +++ b/lanms/pybind11 @@ -0,0 +1 @@ +Subproject commit 8de7772cc72daca8e947b79b83fea46214931604 diff --git a/model.py b/model.py index 24070bcd..93443a57 100644 --- a/model.py +++ b/model.py @@ -3,13 +3,11 @@ from tensorflow.contrib import slim -tf.app.flags.DEFINE_integer('text_scale', 512, '') - from nets import resnet_v1 +import flags FLAGS = tf.app.flags.FLAGS - def unpool(inputs): return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*2, tf.shape(inputs)[2]*2]) diff --git a/multigpu_train.py b/multigpu_train.py index 0f0fbb16..1181da31 100644 --- a/multigpu_train.py +++ b/multigpu_train.py @@ -3,22 +3,10 @@ import tensorflow as tf from tensorflow.contrib import slim -tf.app.flags.DEFINE_integer('input_size', 512, '') -tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') -tf.app.flags.DEFINE_integer('num_readers', 16, '') -tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') -tf.app.flags.DEFINE_integer('max_steps', 100000, '') -tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') -tf.app.flags.DEFINE_string('gpu_list', '1', '') -tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') -tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') -tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') -tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') -tf.app.flags.DEFINE_string('pretrained_model_path', None, '') - import model import icdar +import flags FLAGS = tf.app.flags.FLAGS gpus = list(range(len(FLAGS.gpu_list.split(',')))) @@ -71,10 +59,9 @@ def main(argv=None): os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) - else: - if not FLAGS.restore: - tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) - tf.gfile.MkDir(FLAGS.checkpoint_path) + elif not FLAGS.restore: + tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) + tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') @@ -119,8 +106,7 @@ def main(argv=None): summary_op = tf.summary.merge_all() # save moving average - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, global_step) + variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): @@ -167,13 +153,15 @@ def main(argv=None): step, ml, tl, avg_time_per_step, avg_examples_per_second)) if step % FLAGS.save_checkpoint_steps == 0: - saver.save(sess, FLAGS.checkpoint_path + 'model.ckpt', global_step=global_step) + print('Saving checkpoint %d' % step) + saver.save(sess, FLAGS.checkpoint_path + '/model.ckpt', global_step=global_step) if step % FLAGS.save_summary_steps == 0: _, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: data[0], input_score_maps: data[2], input_geo_maps: data[3], input_training_masks: data[4]}) + print('Saving summary %d' % step) summary_writer.add_summary(summary_str, global_step=step) if __name__ == '__main__': diff --git a/readme.md b/readme.md index 0d293da1..8d4c0704 100644 --- a/readme.md +++ b/readme.md @@ -96,5 +96,7 @@ Here are some test examples on icdar2015, enjoy the beautiful text boxes! ### Troubleshooting + How to compile lanms on Windows ? + See https://github.com/argman/EAST/issues/120 ++ How to compile lanms with visual studio 2017 on Windows + + Double click lanms/lanms.cmd Please let me know if you encounter any issues(my email boostczc@gmail dot com). diff --git a/run_demo_server.py b/run_demo_server.py index 59e5534d..9c58dd5a 100755 --- a/run_demo_server.py +++ b/run_demo_server.py @@ -8,6 +8,7 @@ import numpy as np import uuid import json +import platform import functools import logging @@ -19,6 +20,11 @@ @functools.lru_cache(maxsize=1) def get_host_info(): + if platform.system() == 'Windows': + return {'cpuinfo': 'cpuinfo', + 'meminfo': 'meminfo', + 'loadavg': 'loadavg'} + ret = {} with open('/proc/cpuinfo') as f: ret['cpuinfo'] = f.read() @@ -31,7 +37,6 @@ def get_host_info(): return ret - @functools.lru_cache(maxsize=100) def get_predictor(checkpoint_path): logger.info('loading model') @@ -164,7 +169,7 @@ def draw_illu(illu, rst): d = np.array([t['x0'], t['y0'], t['x1'], t['y1'], t['x2'], t['y2'], t['x3'], t['y3']], dtype='int32') d = d.reshape(-1, 2) - cv2.polylines(illu, [d], isClosed=True, color=(255, 255, 0)) + cv2.polylines(illu, [d], isClosed=True, color=(255, 255, 0), lineType=cv2.LINE_AA) return illu @@ -191,7 +196,7 @@ def save_result(img, rst): -checkpoint_path = './east_icdar2015_resnet_v1_50_rbox' +checkpoint_path = './models/east_icdar2015_resnet_v1_50_rbox' @app.route('/', methods=['POST']) @@ -224,4 +229,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/test_images.py b/test_images.py new file mode 100644 index 00000000..33898f47 --- /dev/null +++ b/test_images.py @@ -0,0 +1,85 @@ +from run_demo_server import get_predictor +import cv2 as cv +import os +import numpy as np + +import flags +import tensorflow as tf +FLAGS = tf.app.flags.FLAGS + +def rst2np(rst): + boxes = [] + for t in rst['text_lines']: + d = np.array([t['x0'], t['y0'], t['x1'], t['y1'], t['x2'], + t['y2'], t['x3'], t['y3']], dtype=np.int32) + boxes.append(d.reshape((4, 2))) + return boxes + +print('path to images:', FLAGS.test_data_path) +imgnames = [f for f in os.listdir(FLAGS.test_data_path) + if os.path.isfile(os.path.join(FLAGS.test_data_path, f)) + and os.path.splitext(f)[1] != '.txt'] +assert len(imgnames) + +os.makedirs(FLAGS.output_dir, exist_ok=True) + +model_loaded = False + +winname = 'EAST' +cv.namedWindow(winname, cv.WINDOW_KEEPRATIO) +wait_ms = 0 + +for imgname in imgnames: + # read test image + imgpath = os.path.join(FLAGS.test_data_path, imgname) + img = cv.imread(imgpath, cv.IMREAD_COLOR) + if img is None: + print('%s is not an image! Skipping...' % imgpath) + continue + + # detect text boxes if not previously detected + outpath = os.path.join(FLAGS.output_dir, 'res_' + os.path.splitext(imgname)[0] + '.txt') + if not os.path.isfile(outpath): + print('Detecting text boxes for %s' % imgname) + if not model_loaded: + print('Loading model from %s' % FLAGS.checkpoint_path) + predict = get_predictor(FLAGS.checkpoint_path) + model_loaded = True + rst = predict(img) + print('Process took %.2f seconds' % (rst['timing']['overall'])) + boxes = rst2np(rst) + # write out detection result + with open(outpath, 'w') as f: + for d in boxes: + for p in d.reshape(-1, 1): + f.write('%d,' % p) + f.write('\n') + + # read corresponding detection result + boxes = [] + with open(outpath, 'r') as f: + lines = f.readlines() + for line in lines: + line = line[0:-1] # strip newline + output = line.split(',') + if len(output) != 9: + print('Invalid output in %s: %s' % (outpath, line)) + continue + box = np.array([int(b) for b in output[0:8]], dtype=np.int32).reshape((4, 2)) + boxes.append(box) + + # display result + illu = cv.polylines(img, boxes, isClosed=True, color=(255, 255, 0), + thickness=3, lineType=cv.LINE_AA) + cv.imshow(winname, illu) + ch = cv.waitKey(wait_ms) & 0xFF + + if chr(ch).lower() == 's': + outname = os.path.join(FLAGS.output_dir, imgname) + if cv.imwrite(outname, illu): + print('Wrote out %s' % outname) + else: + print('Could not write %s' % outname) + + if ch == 27: # ESC + break diff --git a/test_requirements.txt b/test_requirements.txt new file mode 100644 index 00000000..1806d426 --- /dev/null +++ b/test_requirements.txt @@ -0,0 +1,7 @@ +Shapely +Flask +matplotlib +scipy +plumbum +numpy +Pillow diff --git a/ubuntu.sh b/ubuntu.sh new file mode 100755 index 00000000..e3ff542c --- /dev/null +++ b/ubuntu.sh @@ -0,0 +1,93 @@ +#!/bin/bash +set -e +cd "$(dirname "$0")" +root="$(pwd)" + +# install tkinter +if dpkg-query -W -f='${Status}' python3-tk | grep "ok installed" > /dev/null 2>&1 +then + echo "tkinter already installed" +else + sudo apt-get install python3-tk -y +fi + +# install unzip +if dpkg-query -W -f='${Status}' unzip | grep "ok installed" > /dev/null 2>&1 +then + echo "unzip already installed" +else + sudo apt-get install unzip -y +fi + +# install gdown +if pip3 show gdown > /dev/null 2>&1 +then + echo "gdown already installed" +else + pip3 install gdown +fi + +# download model +mkdir -p models +cd models/ +modelname="east_icdar2015_resnet_v1_50_rbox" +if [ ! -d "$modelname" ] +then + echo "Downloading $modelname" + modelurl='https://drive.google.com/uc?id=0B3APw5BZJ67ETHNPaU9xUkVoV0U' + zipname="east_icdar2015_resnet_v1_50_rbox.zip" + if [ ! -f "$zipname" ] + then + gdown "$modelurl" + else + echo "$zipname exists" + fi + unzip "$zipname" + echo "Deleting $zipname" && rm "$zipname" +else + echo "$modelname exists" +fi + +# modify checkpoint path manually +cd "$modelname" +if [ ! -f checkpoint.orig ]; then +mv checkpoint checkpoint.orig +cat > checkpoint <<-EOF +model_checkpoint_path: "model.ckpt-49491" +all_model_checkpoint_paths: "model.ckpt-49491" +EOF +fi + +# required for training +cd "$root/models/" +ckptname="resnet_v1_50.ckpt" +if [ ! -f "$ckptname" ] +then + echo "Downloading $ckptname" + ckpturl='http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz' + tarname="resnet_v1_50_2016_08_28.tar.gz" + if [ ! -f "$tarname" ] + then + wget "$ckpturl" + else + echo "$tarname exists" + fi + tar -xvzf "$tarname" + echo "Deleting $tarname" && rm "$tarname" +else + echo "$ckptname exists" +fi + +# compile lanms +cd "$root/lanms" +mkdir -p build +cd build/ +cmake .. +make +cp adaptor.cpython-* .. + +# install python requirements +cd "$root" +pip3 install -r test_requirements.txt + +echo DONE