diff --git a/README.md b/README.md
index 72660df2..da6a1ba0 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
Bayesian Active Learning (Baal)
@@ -92,7 +92,7 @@ The framework consists of four main parts, as demonstrated in the flowchart belo
- ActiveLearningLoop
-
+
To get started, wrap your dataset in our _[**ActiveLearningDataset**](baal/active/dataset.py)_ class. This will ensure
diff --git a/docs/Makefile b/docs/Makefile
index a16605d4..554dcade 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,23 +1,10 @@
-# Minimal makefile for Sphinx documentation
+# Minimal makefile for Mkdocs documentation
#
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-SOURCEDIR = .
-BUILDDIR = _build
-
# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+build:
+ mkdocs build
server:
- open _build/html/index.html
\ No newline at end of file
+ mkdocs watch
\ No newline at end of file
diff --git a/docs/_static/css/default.css b/docs/_static/css/default.css
deleted file mode 100644
index 18dba0dd..00000000
--- a/docs/_static/css/default.css
+++ /dev/null
@@ -1,5116 +0,0 @@
-/*CSS from divio-docs-theme*/
-@charset "UTF-8";
-@import url("https://fonts.googleapis.com/css?family=Nunito:400:600");
-* {
- -webkit-box-sizing: border-box;
- -moz-box-sizing: border-box;
- box-sizing: border-box; }
-
-article, aside, details, figcaption, figure, footer, header, hgroup, nav, section {
- display: block; }
-
-audio, canvas, video {
- display: inline-block;
- *display: inline;
- *zoom: 1; }
-
-audio:not([controls]) {
- display: none; }
-
-[hidden] {
- display: none; }
-
-* {
- -webkit-box-sizing: border-box;
- -moz-box-sizing: border-box;
- box-sizing: border-box; }
-
-html {
- font-size: 100%;
- -webkit-text-size-adjust: 100%;
- -ms-text-size-adjust: 100%; }
-
-body {
- margin: 0;
- -webkit-font-smoothing: antialiased;
- -moz-osx-font-smoothing: grayscale; }
-
-a:hover, a:active {
- outline: 0; }
-
-abbr[title] {
- border-bottom: 1px dotted; }
-
-b, strong {
- font-weight: 600; }
-
-blockquote {
- margin: 0; }
-
-dfn {
- font-style: italic; }
-
-ins {
- background: #ff9;
- color: #000;
- text-decoration: none; }
-
-mark {
- background: #ff0;
- color: #000;
- font-style: italic;
- font-weight: 600; }
-
-pre, code, .rst-content tt, .rst-content code, kbd, samp {
- font-family: monospace, serif;
- _font-family: "courier new", monospace;
- font-size: 1em; }
-
-pre {
- white-space: pre; }
-
-q {
- quotes: none; }
-
-q:before, q:after {
- content: "";
- content: none; }
-
-small {
- font-size: 85%; }
-
-sub, sup {
- font-size: 75%;
- line-height: 0;
- position: relative;
- vertical-align: baseline; }
-
-sup {
- top: -0.5em; }
-
-sub {
- bottom: -0.25em; }
-
-ul, ol, dl {
- padding-left:20px;
- list-style-type: circle;
-}
-
-li {
- list-style: none; }
-
-dd {
- margin: 0; }
-
-img {
- border: 0;
- -ms-interpolation-mode: bicubic;
- vertical-align: middle;
- max-width: 100%; }
-
-svg:not(:root) {
- overflow: hidden; }
-
-figure {
- margin: 0; }
-
-form {
- margin: 0; }
-
-fieldset {
- border: 0;
- margin: 0;
- padding: 0; }
-
-label {
- cursor: pointer; }
-
-legend {
- border: 0;
- *margin-left: -7px;
- padding: 0;
- white-space: normal; }
-
-button, input, select, textarea {
- font-size: 100%;
- margin: 0;
- vertical-align: baseline;
- *vertical-align: middle; }
-
-button, input {
- line-height: normal; }
-
-button, input[type="button"], input[type="reset"], input[type="submit"] {
- cursor: pointer;
- -webkit-appearance: button;
- *overflow: visible; }
-
-button[disabled], input[disabled] {
- cursor: default; }
-
-input[type="checkbox"], input[type="radio"] {
- box-sizing: border-box;
- padding: 0;
- *width: 13px;
- *height: 13px; }
-
-input[type="search"] {
- -webkit-appearance: textfield;
- -moz-box-sizing: content-box;
- -webkit-box-sizing: content-box;
- box-sizing: content-box; }
-
-input[type="search"]::-webkit-search-decoration, input[type="search"]::-webkit-search-cancel-button {
- -webkit-appearance: none; }
-
-button::-moz-focus-inner, input::-moz-focus-inner {
- border: 0;
- padding: 0; }
-
-textarea {
- overflow: auto;
- vertical-align: top;
- resize: vertical; }
-
-table {
- border-collapse: collapse;
- border-spacing: 0; }
-
-td {
- vertical-align: top; }
-
-.chromeframe {
- margin: 0.2em 0;
- background: #ccc;
- color: black;
- padding: 0.2em 0; }
-
-.ir {
- display: block;
- border: 0;
- text-indent: -999em;
- overflow: hidden;
- background-color: transparent;
- background-repeat: no-repeat;
- text-align: left;
- direction: ltr;
- *line-height: 0; }
-
-.ir br {
- display: none; }
-
-.hidden {
- display: none !important;
- visibility: hidden; }
-
-.visuallyhidden {
- border: 0;
- clip: rect(0 0 0 0);
- height: 1px;
- margin: -1px;
- overflow: hidden;
- padding: 0;
- position: absolute;
- width: 1px; }
-
-.visuallyhidden.focusable:active, .visuallyhidden.focusable:focus {
- clip: auto;
- height: auto;
- margin: 0;
- overflow: visible;
- position: static;
- width: auto; }
-
-.invisible {
- visibility: hidden; }
-
-.relative {
- position: relative; }
-
-big, small {
- font-size: 100%; }
-
-@media print {
- html, body, section {
- background: none !important; }
- * {
- box-shadow: none !important;
- text-shadow: none !important;
- filter: none !important;
- -ms-filter: none !important; }
- a, a:visited {
- text-decoration: underline; }
- .ir a:after, a[href^="javascript:"]:after, a[href^="#"]:after {
- content: ""; }
- pre, blockquote {
- page-break-inside: avoid; }
- thead {
- display: table-header-group; }
- tr, img {
- page-break-inside: avoid; }
- img {
- max-width: 100% !important; }
- @page {
- margin: 0.5cm; }
- p, h2, .rst-content .toctree-wrapper p.caption, h3 {
- orphans: 3;
- widows: 3; }
- h2, .rst-content .toctree-wrapper p.caption, h3 {
- page-break-after: avoid; } }
-
-.fa:before, .wy-menu-vertical li span.toctree-expand:before, .wy-menu-vertical li.on a span.toctree-expand:before, .wy-menu-vertical li.current > a span.toctree-expand:before, .rst-content .admonition-title:before, .rst-content h1 .headerlink:before, .rst-content h2 .headerlink:before, .rst-content h3 .headerlink:before, .rst-content h4 .headerlink:before, .rst-content h5 .headerlink:before, .rst-content h6 .headerlink:before, .rst-content dl dt .headerlink:before, .rst-content p.caption .headerlink:before, .rst-content tt.download span:first-child:before, .rst-content code.download span:first-child:before, .icon:before, .wy-dropdown .caret:before, .wy-inline-validate.wy-inline-validate-success .wy-input-context:before, .wy-inline-validate.wy-inline-validate-danger .wy-input-context:before, .wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, .wy-inline-validate.wy-inline-validate-info .wy-input-context:before, .wy-alert, .rst-content .note, .rst-content .attention, .rst-content .caution, .rst-content .danger, .rst-content .error, .rst-content .hint, .rst-content .important, .rst-content .tip, .rst-content .warning, .rst-content .seealso, .rst-content .admonition-todo, .rst-content .admonition, .btn, input[type="text"], input[type="password"], input[type="email"], input[type="url"], input[type="date"], input[type="month"], input[type="time"], input[type="datetime"], input[type="datetime-local"], input[type="week"], input[type="number"], input[type="search"], input[type="tel"], input[type="color"], select, textarea, .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a, .wy-side-nav-search > a, .wy-side-nav-search .wy-dropdown > a, .wy-nav-top a {
- -webkit-font-smoothing: antialiased; }
-
-.clearfix {
- *zoom: 1; }
-
-.clearfix:before, .clearfix:after {
- display: table;
- content: ""; }
-
-.clearfix:after {
- clear: both; }
-
-/*!
- * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
- * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
- */
-/* FONT PATH
- * -------------------------- */
-@font-face {
- font-family: 'FontAwesome';
- src: url("../fonts/fontawesome-webfont.eot?v=4.7.0");
- src: url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"), url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"), url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"), url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"), url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");
- font-weight: normal;
- font-style: normal; }
-
-.fa, .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current > a span.toctree-expand, .rst-content .admonition-title, .rst-content h1 .headerlink, .rst-content h2 .headerlink, .rst-content h3 .headerlink, .rst-content h4 .headerlink, .rst-content h5 .headerlink, .rst-content h6 .headerlink, .rst-content dl dt .headerlink, .rst-content p.caption .headerlink, .rst-content tt.download span:first-child, .rst-content code.download span:first-child, .icon {
- display: inline-block;
- font: normal normal normal 14px/1 FontAwesome;
- font-size: inherit;
- text-rendering: auto;
- -webkit-font-smoothing: antialiased;
- -moz-osx-font-smoothing: grayscale; }
-
-/* makes the font 33% larger relative to the icon container */
-.fa-lg {
- font-size: 1.3333333333em;
- line-height: 0.75em;
- vertical-align: -15%; }
-
-.fa-2x {
- font-size: 2em; }
-
-.fa-3x {
- font-size: 3em; }
-
-.fa-4x {
- font-size: 4em; }
-
-.fa-5x {
- font-size: 5em; }
-
-.fa-fw {
- width: 1.2857142857em;
- text-align: center; }
-
-.fa-ul {
- padding-left: 0;
- margin-left: 2.1428571429em;
- list-style-type: circle; }
-
-.fa-ul > li {
- position: relative; }
-
-.fa-li {
- position: absolute;
- left: -2.1428571429em;
- width: 2.1428571429em;
- top: 0.1428571429em;
- text-align: center; }
-
-.fa-li.fa-lg {
- left: -1.8571428571em; }
-
-.fa-border {
- padding: .2em .25em .15em;
- border: solid 0.08em #eee;
- border-radius: .1em; }
-
-.fa-pull-left {
- float: left; }
-
-.fa-pull-right {
- float: right; }
-
-.fa.fa-pull-left, .wy-menu-vertical li span.fa-pull-left.toctree-expand, .wy-menu-vertical li.on a span.fa-pull-left.toctree-expand, .wy-menu-vertical li.current > a span.fa-pull-left.toctree-expand, .rst-content .fa-pull-left.admonition-title, .rst-content h1 .fa-pull-left.headerlink, .rst-content h2 .fa-pull-left.headerlink, .rst-content h3 .fa-pull-left.headerlink, .rst-content h4 .fa-pull-left.headerlink, .rst-content h5 .fa-pull-left.headerlink, .rst-content h6 .fa-pull-left.headerlink, .rst-content dl dt .fa-pull-left.headerlink, .rst-content p.caption .fa-pull-left.headerlink, .rst-content tt.download span.fa-pull-left:first-child, .rst-content code.download span.fa-pull-left:first-child, .fa-pull-left.icon {
- margin-right: .3em; }
-
-.fa.fa-pull-right, .wy-menu-vertical li span.fa-pull-right.toctree-expand, .wy-menu-vertical li.on a span.fa-pull-right.toctree-expand, .wy-menu-vertical li.current > a span.fa-pull-right.toctree-expand, .rst-content .fa-pull-right.admonition-title, .rst-content h1 .fa-pull-right.headerlink, .rst-content h2 .fa-pull-right.headerlink, .rst-content h3 .fa-pull-right.headerlink, .rst-content h4 .fa-pull-right.headerlink, .rst-content h5 .fa-pull-right.headerlink, .rst-content h6 .fa-pull-right.headerlink, .rst-content dl dt .fa-pull-right.headerlink, .rst-content p.caption .fa-pull-right.headerlink, .rst-content tt.download span.fa-pull-right:first-child, .rst-content code.download span.fa-pull-right:first-child, .fa-pull-right.icon {
- margin-left: .3em; }
-
-/* Deprecated as of 4.4.0 */
-.pull-right {
- float: right; }
-
-.pull-left {
- float: left; }
-
-.fa.pull-left, .wy-menu-vertical li span.pull-left.toctree-expand, .wy-menu-vertical li.on a span.pull-left.toctree-expand, .wy-menu-vertical li.current > a span.pull-left.toctree-expand, .rst-content .pull-left.admonition-title, .rst-content h1 .pull-left.headerlink, .rst-content h2 .pull-left.headerlink, .rst-content h3 .pull-left.headerlink, .rst-content h4 .pull-left.headerlink, .rst-content h5 .pull-left.headerlink, .rst-content h6 .pull-left.headerlink, .rst-content dl dt .pull-left.headerlink, .rst-content p.caption .pull-left.headerlink, .rst-content tt.download span.pull-left:first-child, .rst-content code.download span.pull-left:first-child, .pull-left.icon {
- margin-right: .3em; }
-
-.fa.pull-right, .wy-menu-vertical li span.pull-right.toctree-expand, .wy-menu-vertical li.on a span.pull-right.toctree-expand, .wy-menu-vertical li.current > a span.pull-right.toctree-expand, .rst-content .pull-right.admonition-title, .rst-content h1 .pull-right.headerlink, .rst-content h2 .pull-right.headerlink, .rst-content h3 .pull-right.headerlink, .rst-content h4 .pull-right.headerlink, .rst-content h5 .pull-right.headerlink, .rst-content h6 .pull-right.headerlink, .rst-content dl dt .pull-right.headerlink, .rst-content p.caption .pull-right.headerlink, .rst-content tt.download span.pull-right:first-child, .rst-content code.download span.pull-right:first-child, .pull-right.icon {
- margin-left: .3em; }
-
-.fa-spin {
- -webkit-animation: fa-spin 2s infinite linear;
- animation: fa-spin 2s infinite linear; }
-
-.fa-pulse {
- -webkit-animation: fa-spin 1s infinite steps(8);
- animation: fa-spin 1s infinite steps(8); }
-
-@-webkit-keyframes fa-spin {
- 0% {
- -webkit-transform: rotate(0deg);
- transform: rotate(0deg); }
- 100% {
- -webkit-transform: rotate(359deg);
- transform: rotate(359deg); } }
-
-@keyframes fa-spin {
- 0% {
- -webkit-transform: rotate(0deg);
- transform: rotate(0deg); }
- 100% {
- -webkit-transform: rotate(359deg);
- transform: rotate(359deg); } }
-
-.fa-rotate-90 {
- -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";
- -webkit-transform: rotate(90deg);
- -ms-transform: rotate(90deg);
- transform: rotate(90deg); }
-
-.fa-rotate-180 {
- -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";
- -webkit-transform: rotate(180deg);
- -ms-transform: rotate(180deg);
- transform: rotate(180deg); }
-
-.fa-rotate-270 {
- -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";
- -webkit-transform: rotate(270deg);
- -ms-transform: rotate(270deg);
- transform: rotate(270deg); }
-
-.fa-flip-horizontal {
- -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";
- -webkit-transform: scale(-1, 1);
- -ms-transform: scale(-1, 1);
- transform: scale(-1, 1); }
-
-.fa-flip-vertical {
- -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";
- -webkit-transform: scale(1, -1);
- -ms-transform: scale(1, -1);
- transform: scale(1, -1); }
-
-:root .fa-rotate-90,
-:root .fa-rotate-180,
-:root .fa-rotate-270,
-:root .fa-flip-horizontal,
-:root .fa-flip-vertical {
- filter: none; }
-
-.fa-stack {
- position: relative;
- display: inline-block;
- width: 2em;
- height: 2em;
- line-height: 2em;
- vertical-align: middle; }
-
-.fa-stack-1x, .fa-stack-2x {
- position: absolute;
- left: 0;
- width: 100%;
- text-align: center; }
-
-.fa-stack-1x {
- line-height: inherit; }
-
-.fa-stack-2x {
- font-size: 2em; }
-
-.fa-inverse {
- color: #fff; }
-
-/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
- readers do not read off random characters that represent icons */
-.fa-glass:before {
- content: ""; }
-
-.fa-music:before {
- content: ""; }
-
-.fa-search:before, .icon-search:before {
- content: ""; }
-
-.fa-envelope-o:before {
- content: ""; }
-
-.fa-heart:before {
- content: ""; }
-
-.fa-star:before {
- content: ""; }
-
-.fa-star-o:before {
- content: ""; }
-
-.fa-user:before {
- content: ""; }
-
-.fa-film:before {
- content: ""; }
-
-.fa-th-large:before {
- content: ""; }
-
-.fa-th:before {
- content: ""; }
-
-.fa-th-list:before {
- content: ""; }
-
-.fa-check:before {
- content: ""; }
-
-.fa-remove:before,
-.fa-close:before,
-.fa-times:before {
- content: ""; }
-
-.fa-search-plus:before {
- content: ""; }
-
-.fa-search-minus:before {
- content: ""; }
-
-.fa-power-off:before {
- content: ""; }
-
-.fa-signal:before {
- content: ""; }
-
-.fa-gear:before,
-.fa-cog:before {
- content: ""; }
-
-.fa-trash-o:before {
- content: ""; }
-
-.fa-home:before, .icon-home:before {
- content: ""; }
-
-.fa-file-o:before {
- content: ""; }
-
-.fa-clock-o:before {
- content: ""; }
-
-.fa-road:before {
- content: ""; }
-
-.fa-download:before, .rst-content tt.download span:first-child:before, .rst-content code.download span:first-child:before {
- content: ""; }
-
-.fa-arrow-circle-o-down:before {
- content: ""; }
-
-.fa-arrow-circle-o-up:before {
- content: ""; }
-
-.fa-inbox:before {
- content: ""; }
-
-.fa-play-circle-o:before {
- content: ""; }
-
-.fa-rotate-right:before,
-.fa-repeat:before {
- content: ""; }
-
-.fa-refresh:before {
- content: ""; }
-
-.fa-list-alt:before {
- content: ""; }
-
-.fa-lock:before {
- content: ""; }
-
-.fa-flag:before {
- content: ""; }
-
-.fa-headphones:before {
- content: ""; }
-
-.fa-volume-off:before {
- content: ""; }
-
-.fa-volume-down:before {
- content: ""; }
-
-.fa-volume-up:before {
- content: ""; }
-
-.fa-qrcode:before {
- content: ""; }
-
-.fa-barcode:before {
- content: ""; }
-
-.fa-tag:before {
- content: ""; }
-
-.fa-tags:before {
- content: ""; }
-
-.fa-book:before, .icon-book:before {
- content: ""; }
-
-.fa-bookmark:before {
- content: ""; }
-
-.fa-print:before {
- content: ""; }
-
-.fa-camera:before {
- content: ""; }
-
-.fa-font:before {
- content: ""; }
-
-.fa-bold:before {
- content: ""; }
-
-.fa-italic:before {
- content: ""; }
-
-.fa-text-height:before {
- content: ""; }
-
-.fa-text-width:before {
- content: ""; }
-
-.fa-align-left:before {
- content: ""; }
-
-.fa-align-center:before {
- content: ""; }
-
-.fa-align-right:before {
- content: ""; }
-
-.fa-align-justify:before {
- content: ""; }
-
-.fa-list:before {
- content: ""; }
-
-.fa-dedent:before,
-.fa-outdent:before {
- content: ""; }
-
-.fa-indent:before {
- content: ""; }
-
-.fa-video-camera:before {
- content: ""; }
-
-.fa-photo:before,
-.fa-image:before,
-.fa-picture-o:before {
- content: ""; }
-
-.fa-pencil:before {
- content: ""; }
-
-.fa-map-marker:before {
- content: ""; }
-
-.fa-adjust:before {
- content: ""; }
-
-.fa-tint:before {
- content: ""; }
-
-.fa-edit:before,
-.fa-pencil-square-o:before {
- content: ""; }
-
-.fa-share-square-o:before {
- content: ""; }
-
-.fa-check-square-o:before {
- content: ""; }
-
-.fa-arrows:before {
- content: ""; }
-
-.fa-step-backward:before {
- content: ""; }
-
-.fa-fast-backward:before {
- content: ""; }
-
-.fa-backward:before {
- content: ""; }
-
-.fa-play:before {
- content: ""; }
-
-.fa-pause:before {
- content: ""; }
-
-.fa-stop:before {
- content: ""; }
-
-.fa-forward:before {
- content: ""; }
-
-.fa-fast-forward:before {
- content: ""; }
-
-.fa-step-forward:before {
- content: ""; }
-
-.fa-eject:before {
- content: ""; }
-
-.fa-chevron-left:before {
- content: ""; }
-
-.fa-chevron-right:before,
-.wy-menu-vertical li span.toctree-expand:before {
- content: ""; }
-
-.fa-plus-circle:before {
- content: ""; }
-
-.fa-minus-circle:before {
- content: ""; }
-
-.fa-times-circle:before, .wy-inline-validate.wy-inline-validate-danger .wy-input-context:before {
- content: ""; }
-
-.fa-check-circle:before, .wy-inline-validate.wy-inline-validate-success .wy-input-context:before {
- content: ""; }
-
-.fa-question-circle:before {
- content: ""; }
-
-.fa-info-circle:before {
- content: ""; }
-
-.fa-crosshairs:before {
- content: ""; }
-
-.fa-times-circle-o:before {
- content: ""; }
-
-.fa-check-circle-o:before {
- content: ""; }
-
-.fa-ban:before {
- content: ""; }
-
-.fa-arrow-left:before {
- content: ""; }
-
-.fa-arrow-right:before {
- content: ""; }
-
-.fa-arrow-up:before {
- content: ""; }
-
-.fa-arrow-down:before {
- content: ""; }
-
-.fa-mail-forward:before,
-.fa-share:before {
- content: ""; }
-
-.fa-expand:before {
- content: ""; }
-
-.fa-compress:before {
- content: ""; }
-
-.fa-plus:before {
- content: ""; }
-
-.fa-minus:before {
- content: ""; }
-
-.fa-asterisk:before {
- content: ""; }
-
-.fa-exclamation-circle:before, .wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, .wy-inline-validate.wy-inline-validate-info .wy-input-context:before, .rst-content .admonition-title:before {
- content: ""; }
-
-.fa-gift:before {
- content: ""; }
-
-.fa-leaf:before {
- content: ""; }
-
-.fa-fire:before, .icon-fire:before {
- content: ""; }
-
-.fa-eye:before {
- content: ""; }
-
-.fa-eye-slash:before {
- content: ""; }
-
-.fa-warning:before,
-.fa-exclamation-triangle:before {
- content: ""; }
-
-.fa-plane:before {
- content: ""; }
-
-.fa-calendar:before {
- content: ""; }
-
-.fa-random:before {
- content: ""; }
-
-.fa-comment:before {
- content: ""; }
-
-.fa-magnet:before {
- content: ""; }
-
-.fa-chevron-up:before {
- content: ""; }
-
-.fa-chevron-down:before,
-.wy-menu-vertical li.on a span.toctree-expand:before,
-.wy-menu-vertical li.current > a span.toctree-expand:before {
- content: ""; }
-
-.fa-retweet:before {
- content: ""; }
-
-.fa-shopping-cart:before {
- content: ""; }
-
-.fa-folder:before {
- content: ""; }
-
-.fa-folder-open:before {
- content: ""; }
-
-.fa-arrows-v:before {
- content: ""; }
-
-.fa-arrows-h:before {
- content: ""; }
-
-.fa-bar-chart-o:before,
-.fa-bar-chart:before {
- content: ""; }
-
-.fa-twitter-square:before {
- content: ""; }
-
-.fa-facebook-square:before {
- content: ""; }
-
-.fa-camera-retro:before {
- content: ""; }
-
-.fa-key:before {
- content: ""; }
-
-.fa-gears:before,
-.fa-cogs:before {
- content: ""; }
-
-.fa-comments:before {
- content: ""; }
-
-.fa-thumbs-o-up:before {
- content: ""; }
-
-.fa-thumbs-o-down:before {
- content: ""; }
-
-.fa-star-half:before {
- content: ""; }
-
-.fa-heart-o:before {
- content: ""; }
-
-.fa-sign-out:before {
- content: ""; }
-
-.fa-linkedin-square:before {
- content: ""; }
-
-.fa-thumb-tack:before {
- content: ""; }
-
-.fa-external-link:before {
- content: ""; }
-
-.fa-sign-in:before {
- content: ""; }
-
-.fa-trophy:before {
- content: ""; }
-
-.fa-github-square:before {
- content: ""; }
-
-.fa-upload:before {
- content: ""; }
-
-.fa-lemon-o:before {
- content: ""; }
-
-.fa-phone:before {
- content: ""; }
-
-.fa-square-o:before {
- content: ""; }
-
-.fa-bookmark-o:before {
- content: ""; }
-
-.fa-phone-square:before {
- content: ""; }
-
-.fa-twitter:before {
- content: ""; }
-
-.fa-facebook-f:before,
-.fa-facebook:before {
- content: ""; }
-
-.fa-github:before, .icon-github:before {
- content: ""; }
-
-.fa-unlock:before {
- content: ""; }
-
-.fa-credit-card:before {
- content: ""; }
-
-.fa-feed:before,
-.fa-rss:before {
- content: ""; }
-
-.fa-hdd-o:before {
- content: ""; }
-
-.fa-bullhorn:before {
- content: ""; }
-
-.fa-bell:before {
- content: ""; }
-
-.fa-certificate:before {
- content: ""; }
-
-.fa-hand-o-right:before {
- content: ""; }
-
-.fa-hand-o-left:before {
- content: ""; }
-
-.fa-hand-o-up:before {
- content: ""; }
-
-.fa-hand-o-down:before {
- content: ""; }
-
-.fa-arrow-circle-left:before, .icon-circle-arrow-left:before {
- margin: 2px 5px 1px 1px;
- content: ""; }
-
-.fa-arrow-circle-right:before, .icon-circle-arrow-right:before {
- margin: 2px 1px 1px 5px;
- content: ""; }
-
-.fa-arrow-circle-up:before {
- content: ""; }
-
-.fa-arrow-circle-down:before {
- content: ""; }
-
-.fa-globe:before {
- content: ""; }
-
-.fa-wrench:before {
- content: ""; }
-
-.fa-tasks:before {
- content: ""; }
-
-.fa-filter:before {
- content: ""; }
-
-.fa-briefcase:before {
- content: ""; }
-
-.fa-arrows-alt:before {
- content: ""; }
-
-.fa-group:before,
-.fa-users:before {
- content: ""; }
-
-.fa-chain:before,
-.fa-link:before,
-.icon-link:before {
- content: ""; }
-
-.fa-cloud:before {
- content: ""; }
-
-.fa-flask:before {
- content: ""; }
-
-.fa-cut:before,
-.fa-scissors:before {
- content: ""; }
-
-.fa-copy:before,
-.fa-files-o:before {
- content: ""; }
-
-.fa-paperclip:before {
- content: ""; }
-
-.fa-save:before,
-.fa-floppy-o:before {
- content: ""; }
-
-.fa-square:before {
- content: ""; }
-
-.fa-navicon:before,
-.fa-reorder:before,
-.fa-bars:before {
- content: ""; }
-
-.fa-list-ul:before {
- content: ""; }
-
-.fa-list-ol:before {
- content: ""; }
-
-.fa-strikethrough:before {
- content: ""; }
-
-.fa-underline:before {
- content: ""; }
-
-.fa-table:before {
- content: ""; }
-
-.fa-magic:before {
- content: ""; }
-
-.fa-truck:before {
- content: ""; }
-
-.fa-pinterest:before {
- content: ""; }
-
-.fa-pinterest-square:before {
- content: ""; }
-
-.fa-google-plus-square:before {
- content: ""; }
-
-.fa-google-plus:before {
- content: ""; }
-
-.fa-money:before {
- content: ""; }
-
-.fa-caret-down:before, .wy-dropdown .caret:before, .icon-caret-down:before {
- content: ""; }
-
-.fa-caret-up:before {
- content: ""; }
-
-.fa-caret-left:before {
- content: ""; }
-
-.fa-caret-right:before {
- content: ""; }
-
-.fa-columns:before {
- content: ""; }
-
-.fa-unsorted:before,
-.fa-sort:before {
- content: ""; }
-
-.fa-sort-down:before,
-.fa-sort-desc:before {
- content: ""; }
-
-.fa-sort-up:before,
-.fa-sort-asc:before {
- content: ""; }
-
-.fa-envelope:before {
- content: ""; }
-
-.fa-linkedin:before {
- content: ""; }
-
-.fa-rotate-left:before,
-.fa-undo:before {
- content: ""; }
-
-.fa-legal:before,
-.fa-gavel:before {
- content: ""; }
-
-.fa-dashboard:before,
-.fa-tachometer:before {
- content: ""; }
-
-.fa-comment-o:before {
- content: ""; }
-
-.fa-comments-o:before {
- content: ""; }
-
-.fa-flash:before,
-.fa-bolt:before {
- content: ""; }
-
-.fa-sitemap:before {
- content: ""; }
-
-.fa-umbrella:before {
- content: ""; }
-
-.fa-paste:before,
-.fa-clipboard:before {
- content: ""; }
-
-.fa-lightbulb-o:before {
- content: ""; }
-
-.fa-exchange:before {
- content: ""; }
-
-.fa-cloud-download:before {
- content: ""; }
-
-.fa-cloud-upload:before {
- content: ""; }
-
-.fa-user-md:before {
- content: ""; }
-
-.fa-stethoscope:before {
- content: ""; }
-
-.fa-suitcase:before {
- content: ""; }
-
-.fa-bell-o:before {
- content: ""; }
-
-.fa-coffee:before {
- content: ""; }
-
-.fa-cutlery:before {
- content: ""; }
-
-.fa-file-text-o:before {
- content: ""; }
-
-.fa-building-o:before {
- content: ""; }
-
-.fa-hospital-o:before {
- content: ""; }
-
-.fa-ambulance:before {
- content: ""; }
-
-.fa-medkit:before {
- content: ""; }
-
-.fa-fighter-jet:before {
- content: ""; }
-
-.fa-beer:before {
- content: ""; }
-
-.fa-h-square:before {
- content: ""; }
-
-.fa-plus-square:before {
- content: ""; }
-
-.fa-angle-double-left:before {
- content: ""; }
-
-.fa-angle-double-right:before {
- content: ""; }
-
-.fa-angle-double-up:before {
- content: ""; }
-
-.fa-angle-double-down:before {
- content: ""; }
-
-.fa-angle-left:before {
- content: ""; }
-
-.fa-angle-right:before {
- content: ""; }
-
-.fa-angle-up:before {
- content: ""; }
-
-.fa-angle-down:before {
- content: ""; }
-
-.fa-desktop:before {
- content: ""; }
-
-.fa-laptop:before {
- content: ""; }
-
-.fa-tablet:before {
- content: ""; }
-
-.fa-mobile-phone:before,
-.fa-mobile:before {
- content: ""; }
-
-.fa-circle-o:before {
- content: ""; }
-
-.fa-quote-left:before {
- content: ""; }
-
-.fa-quote-right:before {
- content: ""; }
-
-.fa-spinner:before {
- content: ""; }
-
-.fa-circle:before {
- content: ""; }
-
-.fa-mail-reply:before,
-.fa-reply:before {
- content: ""; }
-
-.fa-github-alt:before {
- content: ""; }
-
-.fa-folder-o:before {
- content: ""; }
-
-.fa-folder-open-o:before {
- content: ""; }
-
-.fa-smile-o:before {
- content: ""; }
-
-.fa-frown-o:before {
- content: ""; }
-
-.fa-meh-o:before {
- content: ""; }
-
-.fa-gamepad:before {
- content: ""; }
-
-.fa-keyboard-o:before {
- content: ""; }
-
-.fa-flag-o:before {
- content: ""; }
-
-.fa-flag-checkered:before {
- content: ""; }
-
-.fa-terminal:before {
- content: ""; }
-
-.fa-code:before {
- content: ""; }
-
-.fa-mail-reply-all:before,
-.fa-reply-all:before {
- content: ""; }
-
-.fa-star-half-empty:before,
-.fa-star-half-full:before,
-.fa-star-half-o:before {
- content: ""; }
-
-.fa-location-arrow:before {
- content: ""; }
-
-.fa-crop:before {
- content: ""; }
-
-.fa-code-fork:before {
- content: ""; }
-
-.fa-unlink:before,
-.fa-chain-broken:before {
- content: ""; }
-
-.fa-question:before {
- content: ""; }
-
-.fa-info:before {
- content: ""; }
-
-.fa-exclamation:before {
- content: ""; }
-
-.fa-superscript:before {
- content: ""; }
-
-.fa-subscript:before {
- content: ""; }
-
-.fa-eraser:before {
- content: ""; }
-
-.fa-puzzle-piece:before {
- content: ""; }
-
-.fa-microphone:before {
- content: ""; }
-
-.fa-microphone-slash:before {
- content: ""; }
-
-.fa-shield:before {
- content: ""; }
-
-.fa-calendar-o:before {
- content: ""; }
-
-.fa-fire-extinguisher:before {
- content: ""; }
-
-.fa-rocket:before {
- content: ""; }
-
-.fa-maxcdn:before {
- content: ""; }
-
-.fa-chevron-circle-left:before {
- content: ""; }
-
-.fa-chevron-circle-right:before {
- content: ""; }
-
-.fa-chevron-circle-up:before {
- content: ""; }
-
-.fa-chevron-circle-down:before {
- content: ""; }
-
-.fa-html5:before {
- content: ""; }
-
-.fa-css3:before {
- content: ""; }
-
-.fa-anchor:before {
- content: ""; }
-
-.fa-unlock-alt:before {
- content: ""; }
-
-.fa-bullseye:before {
- content: ""; }
-
-.fa-ellipsis-h:before {
- content: ""; }
-
-.fa-ellipsis-v:before {
- content: ""; }
-
-.fa-rss-square:before {
- content: ""; }
-
-.fa-play-circle:before {
- content: ""; }
-
-.fa-ticket:before {
- content: ""; }
-
-.fa-minus-square:before {
- content: ""; }
-
-.fa-minus-square-o:before {
- content: ""; }
-
-.fa-level-up:before {
- content: ""; }
-
-.fa-level-down:before {
- content: ""; }
-
-.fa-check-square:before {
- content: ""; }
-
-.fa-pencil-square:before {
- content: ""; }
-
-.fa-external-link-square:before {
- content: ""; }
-
-.fa-share-square:before {
- content: ""; }
-
-.fa-compass:before {
- content: ""; }
-
-.fa-toggle-down:before,
-.fa-caret-square-o-down:before {
- content: ""; }
-
-.fa-toggle-up:before,
-.fa-caret-square-o-up:before {
- content: ""; }
-
-.fa-toggle-right:before,
-.fa-caret-square-o-right:before {
- content: ""; }
-
-.fa-euro:before,
-.fa-eur:before {
- content: ""; }
-
-.fa-gbp:before {
- content: ""; }
-
-.fa-dollar:before,
-.fa-usd:before {
- content: ""; }
-
-.fa-rupee:before,
-.fa-inr:before {
- content: ""; }
-
-.fa-cny:before,
-.fa-rmb:before,
-.fa-yen:before,
-.fa-jpy:before {
- content: ""; }
-
-.fa-ruble:before,
-.fa-rouble:before,
-.fa-rub:before {
- content: ""; }
-
-.fa-won:before,
-.fa-krw:before {
- content: ""; }
-
-.fa-bitcoin:before,
-.fa-btc:before {
- content: ""; }
-
-.fa-file:before {
- content: ""; }
-
-.fa-file-text:before {
- content: ""; }
-
-.fa-sort-alpha-asc:before {
- content: ""; }
-
-.fa-sort-alpha-desc:before {
- content: ""; }
-
-.fa-sort-amount-asc:before {
- content: ""; }
-
-.fa-sort-amount-desc:before {
- content: ""; }
-
-.fa-sort-numeric-asc:before {
- content: ""; }
-
-.fa-sort-numeric-desc:before {
- content: ""; }
-
-.fa-thumbs-up:before {
- content: ""; }
-
-.fa-thumbs-down:before {
- content: ""; }
-
-.fa-youtube-square:before {
- content: ""; }
-
-.fa-youtube:before {
- content: ""; }
-
-.fa-xing:before {
- content: ""; }
-
-.fa-xing-square:before {
- content: ""; }
-
-.fa-youtube-play:before {
- content: ""; }
-
-.fa-dropbox:before {
- content: ""; }
-
-.fa-stack-overflow:before {
- content: ""; }
-
-.fa-instagram:before {
- content: ""; }
-
-.fa-flickr:before {
- content: ""; }
-
-.fa-adn:before {
- content: ""; }
-
-.fa-bitbucket:before, .icon-bitbucket:before {
- content: ""; }
-
-.fa-bitbucket-square:before {
- content: ""; }
-
-.fa-tumblr:before {
- content: ""; }
-
-.fa-tumblr-square:before {
- content: ""; }
-
-.fa-long-arrow-down:before {
- content: ""; }
-
-.fa-long-arrow-up:before {
- content: ""; }
-
-.fa-long-arrow-left:before {
- content: ""; }
-
-.fa-long-arrow-right:before {
- content: ""; }
-
-.fa-apple:before {
- content: ""; }
-
-.fa-windows:before {
- content: ""; }
-
-.fa-android:before {
- content: ""; }
-
-.fa-linux:before {
- content: ""; }
-
-.fa-dribbble:before {
- content: ""; }
-
-.fa-skype:before {
- content: ""; }
-
-.fa-foursquare:before {
- content: ""; }
-
-.fa-trello:before {
- content: ""; }
-
-.fa-female:before {
- content: ""; }
-
-.fa-male:before {
- content: ""; }
-
-.fa-gittip:before,
-.fa-gratipay:before {
- content: ""; }
-
-.fa-sun-o:before {
- content: ""; }
-
-.fa-moon-o:before {
- content: ""; }
-
-.fa-archive:before {
- content: ""; }
-
-.fa-bug:before {
- content: ""; }
-
-.fa-vk:before {
- content: ""; }
-
-.fa-weibo:before {
- content: ""; }
-
-.fa-renren:before {
- content: ""; }
-
-.fa-pagelines:before {
- content: ""; }
-
-.fa-stack-exchange:before {
- content: ""; }
-
-.fa-arrow-circle-o-right:before {
- content: ""; }
-
-.fa-arrow-circle-o-left:before {
- content: ""; }
-
-.fa-toggle-left:before,
-.fa-caret-square-o-left:before {
- content: ""; }
-
-.fa-dot-circle-o:before {
- content: ""; }
-
-.fa-wheelchair:before {
- content: ""; }
-
-.fa-vimeo-square:before {
- content: ""; }
-
-.fa-turkish-lira:before,
-.fa-try:before {
- content: ""; }
-
-.fa-plus-square-o:before {
- content: ""; }
-
-.fa-space-shuttle:before {
- content: ""; }
-
-.fa-slack:before {
- content: ""; }
-
-.fa-envelope-square:before {
- content: ""; }
-
-.fa-wordpress:before {
- content: ""; }
-
-.fa-openid:before {
- content: ""; }
-
-.fa-institution:before,
-.fa-bank:before,
-.fa-university:before {
- content: ""; }
-
-.fa-mortar-board:before,
-.fa-graduation-cap:before {
- content: ""; }
-
-.fa-yahoo:before {
- content: ""; }
-
-.fa-google:before {
- content: ""; }
-
-.fa-reddit:before {
- content: ""; }
-
-.fa-reddit-square:before {
- content: ""; }
-
-.fa-stumbleupon-circle:before {
- content: ""; }
-
-.fa-stumbleupon:before {
- content: ""; }
-
-.fa-delicious:before {
- content: ""; }
-
-.fa-digg:before {
- content: ""; }
-
-.fa-pied-piper-pp:before {
- content: ""; }
-
-.fa-pied-piper-alt:before {
- content: ""; }
-
-.fa-drupal:before {
- content: ""; }
-
-.fa-joomla:before {
- content: ""; }
-
-.fa-language:before {
- content: ""; }
-
-.fa-fax:before {
- content: ""; }
-
-.fa-building:before {
- content: ""; }
-
-.fa-child:before {
- content: ""; }
-
-.fa-paw:before {
- content: ""; }
-
-.fa-spoon:before {
- content: ""; }
-
-.fa-cube:before {
- content: ""; }
-
-.fa-cubes:before {
- content: ""; }
-
-.fa-behance:before {
- content: ""; }
-
-.fa-behance-square:before {
- content: ""; }
-
-.fa-steam:before {
- content: ""; }
-
-.fa-steam-square:before {
- content: ""; }
-
-.fa-recycle:before {
- content: ""; }
-
-.fa-automobile:before,
-.fa-car:before {
- content: ""; }
-
-.fa-cab:before,
-.fa-taxi:before {
- content: ""; }
-
-.fa-tree:before {
- content: ""; }
-
-.fa-spotify:before {
- content: ""; }
-
-.fa-deviantart:before {
- content: ""; }
-
-.fa-soundcloud:before {
- content: ""; }
-
-.fa-database:before {
- content: ""; }
-
-.fa-file-pdf-o:before {
- content: ""; }
-
-.fa-file-word-o:before {
- content: ""; }
-
-.fa-file-excel-o:before {
- content: ""; }
-
-.fa-file-powerpoint-o:before {
- content: ""; }
-
-.fa-file-photo-o:before,
-.fa-file-picture-o:before,
-.fa-file-image-o:before {
- content: ""; }
-
-.fa-file-zip-o:before,
-.fa-file-archive-o:before {
- content: ""; }
-
-.fa-file-sound-o:before,
-.fa-file-audio-o:before {
- content: ""; }
-
-.fa-file-movie-o:before,
-.fa-file-video-o:before {
- content: ""; }
-
-.fa-file-code-o:before {
- content: ""; }
-
-.fa-vine:before {
- content: ""; }
-
-.fa-codepen:before {
- content: ""; }
-
-.fa-jsfiddle:before {
- content: ""; }
-
-.fa-life-bouy:before,
-.fa-life-buoy:before,
-.fa-life-saver:before,
-.fa-support:before,
-.fa-life-ring:before {
- content: ""; }
-
-.fa-circle-o-notch:before {
- content: ""; }
-
-.fa-ra:before,
-.fa-resistance:before,
-.fa-rebel:before {
- content: ""; }
-
-.fa-ge:before,
-.fa-empire:before {
- content: ""; }
-
-.fa-git-square:before {
- content: ""; }
-
-.fa-git:before {
- content: ""; }
-
-.fa-y-combinator-square:before,
-.fa-yc-square:before,
-.fa-hacker-news:before {
- content: ""; }
-
-.fa-tencent-weibo:before {
- content: ""; }
-
-.fa-qq:before {
- content: ""; }
-
-.fa-wechat:before,
-.fa-weixin:before {
- content: ""; }
-
-.fa-send:before,
-.fa-paper-plane:before {
- content: ""; }
-
-.fa-send-o:before,
-.fa-paper-plane-o:before {
- content: ""; }
-
-.fa-history:before {
- content: ""; }
-
-.fa-circle-thin:before {
- content: ""; }
-
-.fa-header:before {
- content: ""; }
-
-.fa-paragraph:before {
- content: ""; }
-
-.fa-sliders:before {
- content: ""; }
-
-.fa-share-alt:before {
- content: ""; }
-
-.fa-share-alt-square:before {
- content: ""; }
-
-.fa-bomb:before {
- content: ""; }
-
-.fa-soccer-ball-o:before,
-.fa-futbol-o:before {
- content: ""; }
-
-.fa-tty:before {
- content: ""; }
-
-.fa-binoculars:before {
- content: ""; }
-
-.fa-plug:before {
- content: ""; }
-
-.fa-slideshare:before {
- content: ""; }
-
-.fa-twitch:before {
- content: ""; }
-
-.fa-yelp:before {
- content: ""; }
-
-.fa-newspaper-o:before {
- content: ""; }
-
-.fa-wifi:before {
- content: ""; }
-
-.fa-calculator:before {
- content: ""; }
-
-.fa-paypal:before {
- content: ""; }
-
-.fa-google-wallet:before {
- content: ""; }
-
-.fa-cc-visa:before {
- content: ""; }
-
-.fa-cc-mastercard:before {
- content: ""; }
-
-.fa-cc-discover:before {
- content: ""; }
-
-.fa-cc-amex:before {
- content: ""; }
-
-.fa-cc-paypal:before {
- content: ""; }
-
-.fa-cc-stripe:before {
- content: ""; }
-
-.fa-bell-slash:before {
- content: ""; }
-
-.fa-bell-slash-o:before {
- content: ""; }
-
-.fa-trash:before {
- content: ""; }
-
-.fa-copyright:before {
- content: ""; }
-
-.fa-at:before {
- content: ""; }
-
-.fa-eyedropper:before {
- content: ""; }
-
-.fa-paint-brush:before {
- content: ""; }
-
-.fa-birthday-cake:before {
- content: ""; }
-
-.fa-area-chart:before {
- content: ""; }
-
-.fa-pie-chart:before {
- content: ""; }
-
-.fa-line-chart:before {
- content: ""; }
-
-.fa-lastfm:before {
- content: ""; }
-
-.fa-lastfm-square:before {
- content: ""; }
-
-.fa-toggle-off:before {
- content: ""; }
-
-.fa-toggle-on:before {
- content: ""; }
-
-.fa-bicycle:before {
- content: ""; }
-
-.fa-bus:before {
- content: ""; }
-
-.fa-ioxhost:before {
- content: ""; }
-
-.fa-angellist:before {
- content: ""; }
-
-.fa-cc:before {
- content: ""; }
-
-.fa-shekel:before,
-.fa-sheqel:before,
-.fa-ils:before {
- content: ""; }
-
-.fa-meanpath:before {
- content: ""; }
-
-.fa-buysellads:before {
- content: ""; }
-
-.fa-connectdevelop:before {
- content: ""; }
-
-.fa-dashcube:before {
- content: ""; }
-
-.fa-forumbee:before {
- content: ""; }
-
-.fa-leanpub:before {
- content: ""; }
-
-.fa-sellsy:before {
- content: ""; }
-
-.fa-shirtsinbulk:before {
- content: ""; }
-
-.fa-simplybuilt:before {
- content: ""; }
-
-.fa-skyatlas:before {
- content: ""; }
-
-.fa-cart-plus:before {
- content: ""; }
-
-.fa-cart-arrow-down:before {
- content: ""; }
-
-.fa-diamond:before {
- content: ""; }
-
-.fa-ship:before {
- content: ""; }
-
-.fa-user-secret:before {
- content: ""; }
-
-.fa-motorcycle:before {
- content: ""; }
-
-.fa-street-view:before {
- content: ""; }
-
-.fa-heartbeat:before {
- content: ""; }
-
-.fa-venus:before {
- content: ""; }
-
-.fa-mars:before {
- content: ""; }
-
-.fa-mercury:before {
- content: ""; }
-
-.fa-intersex:before,
-.fa-transgender:before {
- content: ""; }
-
-.fa-transgender-alt:before {
- content: ""; }
-
-.fa-venus-double:before {
- content: ""; }
-
-.fa-mars-double:before {
- content: ""; }
-
-.fa-venus-mars:before {
- content: ""; }
-
-.fa-mars-stroke:before {
- content: ""; }
-
-.fa-mars-stroke-v:before {
- content: ""; }
-
-.fa-mars-stroke-h:before {
- content: ""; }
-
-.fa-neuter:before {
- content: ""; }
-
-.fa-genderless:before {
- content: ""; }
-
-.fa-facebook-official:before {
- content: ""; }
-
-.fa-pinterest-p:before {
- content: ""; }
-
-.fa-whatsapp:before {
- content: ""; }
-
-.fa-server:before {
- content: ""; }
-
-.fa-user-plus:before {
- content: ""; }
-
-.fa-user-times:before {
- content: ""; }
-
-.fa-hotel:before,
-.fa-bed:before {
- content: ""; }
-
-.fa-viacoin:before {
- content: ""; }
-
-.fa-train:before {
- content: ""; }
-
-.fa-subway:before {
- content: ""; }
-
-.fa-medium:before {
- content: ""; }
-
-.fa-yc:before,
-.fa-y-combinator:before {
- content: ""; }
-
-.fa-optin-monster:before {
- content: ""; }
-
-.fa-opencart:before {
- content: ""; }
-
-.fa-expeditedssl:before {
- content: ""; }
-
-.fa-battery-4:before,
-.fa-battery:before,
-.fa-battery-full:before {
- content: ""; }
-
-.fa-battery-3:before,
-.fa-battery-three-quarters:before {
- content: ""; }
-
-.fa-battery-2:before,
-.fa-battery-half:before {
- content: ""; }
-
-.fa-battery-1:before,
-.fa-battery-quarter:before {
- content: ""; }
-
-.fa-battery-0:before,
-.fa-battery-empty:before {
- content: ""; }
-
-.fa-mouse-pointer:before {
- content: ""; }
-
-.fa-i-cursor:before {
- content: ""; }
-
-.fa-object-group:before {
- content: ""; }
-
-.fa-object-ungroup:before {
- content: ""; }
-
-.fa-sticky-note:before {
- content: ""; }
-
-.fa-sticky-note-o:before {
- content: ""; }
-
-.fa-cc-jcb:before {
- content: ""; }
-
-.fa-cc-diners-club:before {
- content: ""; }
-
-.fa-clone:before {
- content: ""; }
-
-.fa-balance-scale:before {
- content: ""; }
-
-.fa-hourglass-o:before {
- content: ""; }
-
-.fa-hourglass-1:before,
-.fa-hourglass-start:before {
- content: ""; }
-
-.fa-hourglass-2:before,
-.fa-hourglass-half:before {
- content: ""; }
-
-.fa-hourglass-3:before,
-.fa-hourglass-end:before {
- content: ""; }
-
-.fa-hourglass:before {
- content: ""; }
-
-.fa-hand-grab-o:before,
-.fa-hand-rock-o:before {
- content: ""; }
-
-.fa-hand-stop-o:before,
-.fa-hand-paper-o:before {
- content: ""; }
-
-.fa-hand-scissors-o:before {
- content: ""; }
-
-.fa-hand-lizard-o:before {
- content: ""; }
-
-.fa-hand-spock-o:before {
- content: ""; }
-
-.fa-hand-pointer-o:before {
- content: ""; }
-
-.fa-hand-peace-o:before {
- content: ""; }
-
-.fa-trademark:before {
- content: ""; }
-
-.fa-registered:before {
- content: ""; }
-
-.fa-creative-commons:before {
- content: ""; }
-
-.fa-gg:before {
- content: ""; }
-
-.fa-gg-circle:before {
- content: ""; }
-
-.fa-tripadvisor:before {
- content: ""; }
-
-.fa-odnoklassniki:before {
- content: ""; }
-
-.fa-odnoklassniki-square:before {
- content: ""; }
-
-.fa-get-pocket:before {
- content: ""; }
-
-.fa-wikipedia-w:before {
- content: ""; }
-
-.fa-safari:before {
- content: ""; }
-
-.fa-chrome:before {
- content: ""; }
-
-.fa-firefox:before {
- content: ""; }
-
-.fa-opera:before {
- content: ""; }
-
-.fa-internet-explorer:before {
- content: ""; }
-
-.fa-tv:before,
-.fa-television:before {
- content: ""; }
-
-.fa-contao:before {
- content: ""; }
-
-.fa-500px:before {
- content: ""; }
-
-.fa-amazon:before {
- content: ""; }
-
-.fa-calendar-plus-o:before {
- content: ""; }
-
-.fa-calendar-minus-o:before {
- content: ""; }
-
-.fa-calendar-times-o:before {
- content: ""; }
-
-.fa-calendar-check-o:before {
- content: ""; }
-
-.fa-industry:before {
- content: ""; }
-
-.fa-map-pin:before {
- content: ""; }
-
-.fa-map-signs:before {
- content: ""; }
-
-.fa-map-o:before {
- content: ""; }
-
-.fa-map:before {
- content: ""; }
-
-.fa-commenting:before {
- content: ""; }
-
-.fa-commenting-o:before {
- content: ""; }
-
-.fa-houzz:before {
- content: ""; }
-
-.fa-vimeo:before {
- content: ""; }
-
-.fa-black-tie:before {
- content: ""; }
-
-.fa-fonticons:before {
- content: ""; }
-
-.fa-reddit-alien:before {
- content: ""; }
-
-.fa-edge:before {
- content: ""; }
-
-.fa-credit-card-alt:before {
- content: ""; }
-
-.fa-codiepie:before {
- content: ""; }
-
-.fa-modx:before {
- content: ""; }
-
-.fa-fort-awesome:before {
- content: ""; }
-
-.fa-usb:before {
- content: ""; }
-
-.fa-product-hunt:before {
- content: ""; }
-
-.fa-mixcloud:before {
- content: ""; }
-
-.fa-scribd:before {
- content: ""; }
-
-.fa-pause-circle:before {
- content: ""; }
-
-.fa-pause-circle-o:before {
- content: ""; }
-
-.fa-stop-circle:before {
- content: ""; }
-
-.fa-stop-circle-o:before {
- content: ""; }
-
-.fa-shopping-bag:before {
- content: ""; }
-
-.fa-shopping-basket:before {
- content: ""; }
-
-.fa-hashtag:before {
- content: ""; }
-
-.fa-bluetooth:before {
- content: ""; }
-
-.fa-bluetooth-b:before {
- content: ""; }
-
-.fa-percent:before {
- content: ""; }
-
-.fa-gitlab:before, .icon-gitlab:before {
- content: ""; }
-
-.fa-wpbeginner:before {
- content: ""; }
-
-.fa-wpforms:before {
- content: ""; }
-
-.fa-envira:before {
- content: ""; }
-
-.fa-universal-access:before {
- content: ""; }
-
-.fa-wheelchair-alt:before {
- content: ""; }
-
-.fa-question-circle-o:before {
- content: ""; }
-
-.fa-blind:before {
- content: ""; }
-
-.fa-audio-description:before {
- content: ""; }
-
-.fa-volume-control-phone:before {
- content: ""; }
-
-.fa-braille:before {
- content: ""; }
-
-.fa-assistive-listening-systems:before {
- content: ""; }
-
-.fa-asl-interpreting:before,
-.fa-american-sign-language-interpreting:before {
- content: ""; }
-
-.fa-deafness:before,
-.fa-hard-of-hearing:before,
-.fa-deaf:before {
- content: ""; }
-
-.fa-glide:before {
- content: ""; }
-
-.fa-glide-g:before {
- content: ""; }
-
-.fa-signing:before,
-.fa-sign-language:before {
- content: ""; }
-
-.fa-low-vision:before {
- content: ""; }
-
-.fa-viadeo:before {
- content: ""; }
-
-.fa-viadeo-square:before {
- content: ""; }
-
-.fa-snapchat:before {
- content: ""; }
-
-.fa-snapchat-ghost:before {
- content: ""; }
-
-.fa-snapchat-square:before {
- content: ""; }
-
-.fa-pied-piper:before {
- content: ""; }
-
-.fa-first-order:before {
- content: ""; }
-
-.fa-yoast:before {
- content: ""; }
-
-.fa-themeisle:before {
- content: ""; }
-
-.fa-google-plus-circle:before,
-.fa-google-plus-official:before {
- content: ""; }
-
-.fa-fa:before,
-.fa-font-awesome:before {
- content: ""; }
-
-.fa-handshake-o:before {
- content: ""; }
-
-.fa-envelope-open:before {
- content: ""; }
-
-.fa-envelope-open-o:before {
- content: ""; }
-
-.fa-linode:before {
- content: ""; }
-
-.fa-address-book:before {
- content: ""; }
-
-.fa-address-book-o:before {
- content: ""; }
-
-.fa-vcard:before,
-.fa-address-card:before {
- content: ""; }
-
-.fa-vcard-o:before,
-.fa-address-card-o:before {
- content: ""; }
-
-.fa-user-circle:before {
- content: ""; }
-
-.fa-user-circle-o:before {
- content: ""; }
-
-.fa-user-o:before {
- content: ""; }
-
-.fa-id-badge:before {
- content: ""; }
-
-.fa-drivers-license:before,
-.fa-id-card:before {
- content: ""; }
-
-.fa-drivers-license-o:before,
-.fa-id-card-o:before {
- content: ""; }
-
-.fa-quora:before {
- content: ""; }
-
-.fa-free-code-camp:before {
- content: ""; }
-
-.fa-telegram:before {
- content: ""; }
-
-.fa-thermometer-4:before,
-.fa-thermometer:before,
-.fa-thermometer-full:before {
- content: ""; }
-
-.fa-thermometer-3:before,
-.fa-thermometer-three-quarters:before {
- content: ""; }
-
-.fa-thermometer-2:before,
-.fa-thermometer-half:before {
- content: ""; }
-
-.fa-thermometer-1:before,
-.fa-thermometer-quarter:before {
- content: ""; }
-
-.fa-thermometer-0:before,
-.fa-thermometer-empty:before {
- content: ""; }
-
-.fa-shower:before {
- content: ""; }
-
-.fa-bathtub:before,
-.fa-s15:before,
-.fa-bath:before {
- content: ""; }
-
-.fa-podcast:before {
- content: ""; }
-
-.fa-window-maximize:before {
- content: ""; }
-
-.fa-window-minimize:before {
- content: ""; }
-
-.fa-window-restore:before {
- content: ""; }
-
-.fa-times-rectangle:before,
-.fa-window-close:before {
- content: ""; }
-
-.fa-times-rectangle-o:before,
-.fa-window-close-o:before {
- content: ""; }
-
-.fa-bandcamp:before {
- content: ""; }
-
-.fa-grav:before {
- content: ""; }
-
-.fa-etsy:before {
- content: ""; }
-
-.fa-imdb:before {
- content: ""; }
-
-.fa-ravelry:before {
- content: ""; }
-
-.fa-eercast:before {
- content: ""; }
-
-.fa-microchip:before {
- content: ""; }
-
-.fa-snowflake-o:before {
- content: ""; }
-
-.fa-superpowers:before {
- content: ""; }
-
-.fa-wpexplorer:before {
- content: ""; }
-
-.fa-meetup:before {
- content: ""; }
-
-.sr-only {
- position: absolute;
- width: 1px;
- height: 1px;
- padding: 0;
- margin: -1px;
- overflow: hidden;
- clip: rect(0, 0, 0, 0);
- border: 0; }
-
-.sr-only-focusable:active, .sr-only-focusable:focus {
- position: static;
- width: auto;
- height: auto;
- margin: 0;
- overflow: visible;
- clip: auto; }
-
-.fa, .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current > a span.toctree-expand, .rst-content .admonition-title, .rst-content h1 .headerlink, .rst-content h2 .headerlink, .rst-content h3 .headerlink, .rst-content h4 .headerlink, .rst-content h5 .headerlink, .rst-content h6 .headerlink, .rst-content dl dt .headerlink, .rst-content p.caption .headerlink, .rst-content tt.download span:first-child, .rst-content code.download span:first-child, .icon, .wy-dropdown .caret, .wy-inline-validate.wy-inline-validate-success .wy-input-context, .wy-inline-validate.wy-inline-validate-danger .wy-input-context, .wy-inline-validate.wy-inline-validate-warning .wy-input-context, .wy-inline-validate.wy-inline-validate-info .wy-input-context {
- font-family: inherit; }
-
-.fa:before, .wy-menu-vertical li span.toctree-expand:before, .wy-menu-vertical li.on a span.toctree-expand:before, .wy-menu-vertical li.current > a span.toctree-expand:before, .rst-content .admonition-title:before, .rst-content h1 .headerlink:before, .rst-content h2 .headerlink:before, .rst-content h3 .headerlink:before, .rst-content h4 .headerlink:before, .rst-content h5 .headerlink:before, .rst-content h6 .headerlink:before, .rst-content dl dt .headerlink:before, .rst-content p.caption .headerlink:before, .rst-content tt.download span:first-child:before, .rst-content code.download span:first-child:before, .icon:before, .wy-dropdown .caret:before, .wy-inline-validate.wy-inline-validate-success .wy-input-context:before, .wy-inline-validate.wy-inline-validate-danger .wy-input-context:before, .wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, .wy-inline-validate.wy-inline-validate-info .wy-input-context:before {
- font-family: "FontAwesome";
- display: inline-block;
- font-style: normal;
- font-weight: normal;
- line-height: 1;
- text-decoration: inherit; }
-
-a .fa, a .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li a span.toctree-expand, .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current > a span.toctree-expand, a .rst-content .admonition-title, .rst-content a .admonition-title, a .rst-content h1 .headerlink, .rst-content h1 a .headerlink, a .rst-content h2 .headerlink, .rst-content h2 a .headerlink, a .rst-content h3 .headerlink, .rst-content h3 a .headerlink, a .rst-content h4 .headerlink, .rst-content h4 a .headerlink, a .rst-content h5 .headerlink, .rst-content h5 a .headerlink, a .rst-content h6 .headerlink, .rst-content h6 a .headerlink, a .rst-content dl dt .headerlink, .rst-content dl dt a .headerlink, a .rst-content p.caption .headerlink, .rst-content p.caption a .headerlink, a .rst-content tt.download span:first-child, .rst-content tt.download a span:first-child, a .rst-content code.download span:first-child, .rst-content code.download a span:first-child, a .icon {
- display: inline-block;
- text-decoration: inherit; }
-
-.btn .fa, .btn .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li .btn span.toctree-expand, .btn .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.on a .btn span.toctree-expand, .btn .wy-menu-vertical li.current > a span.toctree-expand, .wy-menu-vertical li.current > a .btn span.toctree-expand, .btn .rst-content .admonition-title, .rst-content .btn .admonition-title, .btn .rst-content h1 .headerlink, .rst-content h1 .btn .headerlink, .btn .rst-content h2 .headerlink, .rst-content h2 .btn .headerlink, .btn .rst-content h3 .headerlink, .rst-content h3 .btn .headerlink, .btn .rst-content h4 .headerlink, .rst-content h4 .btn .headerlink, .btn .rst-content h5 .headerlink, .rst-content h5 .btn .headerlink, .btn .rst-content h6 .headerlink, .rst-content h6 .btn .headerlink, .btn .rst-content dl dt .headerlink, .rst-content dl dt .btn .headerlink, .btn .rst-content p.caption .headerlink, .rst-content p.caption .btn .headerlink, .btn .rst-content tt.download span:first-child, .rst-content tt.download .btn span:first-child, .btn .rst-content code.download span:first-child, .rst-content code.download .btn span:first-child, .btn .icon, .nav .fa, .nav .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li .nav span.toctree-expand, .nav .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.on a .nav span.toctree-expand, .nav .wy-menu-vertical li.current > a span.toctree-expand, .wy-menu-vertical li.current > a .nav span.toctree-expand, .nav .rst-content .admonition-title, .rst-content .nav .admonition-title, .nav .rst-content h1 .headerlink, .rst-content h1 .nav .headerlink, .nav .rst-content h2 .headerlink, .rst-content h2 .nav .headerlink, .nav .rst-content h3 .headerlink, .rst-content h3 .nav .headerlink, .nav .rst-content h4 .headerlink, .rst-content h4 .nav .headerlink, .nav .rst-content h5 .headerlink, .rst-content h5 .nav .headerlink, .nav .rst-content h6 .headerlink, .rst-content h6 .nav .headerlink, .nav .rst-content dl dt .headerlink, .rst-content dl dt .nav .headerlink, .nav .rst-content p.caption .headerlink, .rst-content p.caption .nav .headerlink, .nav .rst-content tt.download span:first-child, .rst-content tt.download .nav span:first-child, .nav .rst-content code.download span:first-child, .rst-content code.download .nav span:first-child, .nav .icon {
- display: inline; }
-
-.btn .fa.fa-large, .btn .wy-menu-vertical li span.fa-large.toctree-expand, .wy-menu-vertical li .btn span.fa-large.toctree-expand, .btn .rst-content .fa-large.admonition-title, .rst-content .btn .fa-large.admonition-title, .btn .rst-content h1 .fa-large.headerlink, .rst-content h1 .btn .fa-large.headerlink, .btn .rst-content h2 .fa-large.headerlink, .rst-content h2 .btn .fa-large.headerlink, .btn .rst-content h3 .fa-large.headerlink, .rst-content h3 .btn .fa-large.headerlink, .btn .rst-content h4 .fa-large.headerlink, .rst-content h4 .btn .fa-large.headerlink, .btn .rst-content h5 .fa-large.headerlink, .rst-content h5 .btn .fa-large.headerlink, .btn .rst-content h6 .fa-large.headerlink, .rst-content h6 .btn .fa-large.headerlink, .btn .rst-content dl dt .fa-large.headerlink, .rst-content dl dt .btn .fa-large.headerlink, .btn .rst-content p.caption .fa-large.headerlink, .rst-content p.caption .btn .fa-large.headerlink, .btn .rst-content tt.download span.fa-large:first-child, .rst-content tt.download .btn span.fa-large:first-child, .btn .rst-content code.download span.fa-large:first-child, .rst-content code.download .btn span.fa-large:first-child, .btn .fa-large.icon, .nav .fa.fa-large, .nav .wy-menu-vertical li span.fa-large.toctree-expand, .wy-menu-vertical li .nav span.fa-large.toctree-expand, .nav .rst-content .fa-large.admonition-title, .rst-content .nav .fa-large.admonition-title, .nav .rst-content h1 .fa-large.headerlink, .rst-content h1 .nav .fa-large.headerlink, .nav .rst-content h2 .fa-large.headerlink, .rst-content h2 .nav .fa-large.headerlink, .nav .rst-content h3 .fa-large.headerlink, .rst-content h3 .nav .fa-large.headerlink, .nav .rst-content h4 .fa-large.headerlink, .rst-content h4 .nav .fa-large.headerlink, .nav .rst-content h5 .fa-large.headerlink, .rst-content h5 .nav .fa-large.headerlink, .nav .rst-content h6 .fa-large.headerlink, .rst-content h6 .nav .fa-large.headerlink, .nav .rst-content dl dt .fa-large.headerlink, .rst-content dl dt .nav .fa-large.headerlink, .nav .rst-content p.caption .fa-large.headerlink, .rst-content p.caption .nav .fa-large.headerlink, .nav .rst-content tt.download span.fa-large:first-child, .rst-content tt.download .nav span.fa-large:first-child, .nav .rst-content code.download span.fa-large:first-child, .rst-content code.download .nav span.fa-large:first-child, .nav .fa-large.icon {
- line-height: 0.9em; }
-
-.btn .fa.fa-spin, .btn .wy-menu-vertical li span.fa-spin.toctree-expand, .wy-menu-vertical li .btn span.fa-spin.toctree-expand, .btn .rst-content .fa-spin.admonition-title, .rst-content .btn .fa-spin.admonition-title, .btn .rst-content h1 .fa-spin.headerlink, .rst-content h1 .btn .fa-spin.headerlink, .btn .rst-content h2 .fa-spin.headerlink, .rst-content h2 .btn .fa-spin.headerlink, .btn .rst-content h3 .fa-spin.headerlink, .rst-content h3 .btn .fa-spin.headerlink, .btn .rst-content h4 .fa-spin.headerlink, .rst-content h4 .btn .fa-spin.headerlink, .btn .rst-content h5 .fa-spin.headerlink, .rst-content h5 .btn .fa-spin.headerlink, .btn .rst-content h6 .fa-spin.headerlink, .rst-content h6 .btn .fa-spin.headerlink, .btn .rst-content dl dt .fa-spin.headerlink, .rst-content dl dt .btn .fa-spin.headerlink, .btn .rst-content p.caption .fa-spin.headerlink, .rst-content p.caption .btn .fa-spin.headerlink, .btn .rst-content tt.download span.fa-spin:first-child, .rst-content tt.download .btn span.fa-spin:first-child, .btn .rst-content code.download span.fa-spin:first-child, .rst-content code.download .btn span.fa-spin:first-child, .btn .fa-spin.icon, .nav .fa.fa-spin, .nav .wy-menu-vertical li span.fa-spin.toctree-expand, .wy-menu-vertical li .nav span.fa-spin.toctree-expand, .nav .rst-content .fa-spin.admonition-title, .rst-content .nav .fa-spin.admonition-title, .nav .rst-content h1 .fa-spin.headerlink, .rst-content h1 .nav .fa-spin.headerlink, .nav .rst-content h2 .fa-spin.headerlink, .rst-content h2 .nav .fa-spin.headerlink, .nav .rst-content h3 .fa-spin.headerlink, .rst-content h3 .nav .fa-spin.headerlink, .nav .rst-content h4 .fa-spin.headerlink, .rst-content h4 .nav .fa-spin.headerlink, .nav .rst-content h5 .fa-spin.headerlink, .rst-content h5 .nav .fa-spin.headerlink, .nav .rst-content h6 .fa-spin.headerlink, .rst-content h6 .nav .fa-spin.headerlink, .nav .rst-content dl dt .fa-spin.headerlink, .rst-content dl dt .nav .fa-spin.headerlink, .nav .rst-content p.caption .fa-spin.headerlink, .rst-content p.caption .nav .fa-spin.headerlink, .nav .rst-content tt.download span.fa-spin:first-child, .rst-content tt.download .nav span.fa-spin:first-child, .nav .rst-content code.download span.fa-spin:first-child, .rst-content code.download .nav span.fa-spin:first-child, .nav .fa-spin.icon {
- display: inline-block; }
-
-.btn.fa:before, .wy-menu-vertical li span.btn.toctree-expand:before, .rst-content .btn.admonition-title:before, .rst-content h1 .btn.headerlink:before, .rst-content h2 .btn.headerlink:before, .rst-content h3 .btn.headerlink:before, .rst-content h4 .btn.headerlink:before, .rst-content h5 .btn.headerlink:before, .rst-content h6 .btn.headerlink:before, .rst-content dl dt .btn.headerlink:before, .rst-content p.caption .btn.headerlink:before, .rst-content tt.download span.btn:first-child:before, .rst-content code.download span.btn:first-child:before, .btn.icon:before {
- opacity: 0.5;
- -webkit-transition: opacity 0.05s ease-in;
- -moz-transition: opacity 0.05s ease-in;
- transition: opacity 0.05s ease-in; }
-
-.btn.fa:hover:before, .wy-menu-vertical li span.btn.toctree-expand:hover:before, .rst-content .btn.admonition-title:hover:before, .rst-content h1 .btn.headerlink:hover:before, .rst-content h2 .btn.headerlink:hover:before, .rst-content h3 .btn.headerlink:hover:before, .rst-content h4 .btn.headerlink:hover:before, .rst-content h5 .btn.headerlink:hover:before, .rst-content h6 .btn.headerlink:hover:before, .rst-content dl dt .btn.headerlink:hover:before, .rst-content p.caption .btn.headerlink:hover:before, .rst-content tt.download span.btn:first-child:hover:before, .rst-content code.download span.btn:first-child:hover:before, .btn.icon:hover:before {
- opacity: 1; }
-
-.btn-mini .fa:before, .btn-mini .wy-menu-vertical li span.toctree-expand:before, .wy-menu-vertical li .btn-mini span.toctree-expand:before, .btn-mini .rst-content .admonition-title:before, .rst-content .btn-mini .admonition-title:before, .btn-mini .rst-content h1 .headerlink:before, .rst-content h1 .btn-mini .headerlink:before, .btn-mini .rst-content h2 .headerlink:before, .rst-content h2 .btn-mini .headerlink:before, .btn-mini .rst-content h3 .headerlink:before, .rst-content h3 .btn-mini .headerlink:before, .btn-mini .rst-content h4 .headerlink:before, .rst-content h4 .btn-mini .headerlink:before, .btn-mini .rst-content h5 .headerlink:before, .rst-content h5 .btn-mini .headerlink:before, .btn-mini .rst-content h6 .headerlink:before, .rst-content h6 .btn-mini .headerlink:before, .btn-mini .rst-content dl dt .headerlink:before, .rst-content dl dt .btn-mini .headerlink:before, .btn-mini .rst-content p.caption .headerlink:before, .rst-content p.caption .btn-mini .headerlink:before, .btn-mini .rst-content tt.download span:first-child:before, .rst-content tt.download .btn-mini span:first-child:before, .btn-mini .rst-content code.download span:first-child:before, .rst-content code.download .btn-mini span:first-child:before, .btn-mini .icon:before {
- font-size: 14px;
- vertical-align: -15%; }
-
-.wy-alert, .rst-content .note, .rst-content .attention, .rst-content .caution, .rst-content .danger, .rst-content .error, .rst-content .hint, .rst-content .important, .rst-content .tip, .rst-content .warning, .rst-content .seealso, .rst-content .admonition-todo, .rst-content .admonition {
- padding: 12px;
- line-height: 24px;
- margin-bottom: 24px;
- background: #e7f2fa;
- border-radius: 5px;
- overflow: hidden; }
-
-.wy-alert-title, .rst-content .admonition-title {
- color: #fff;
- font-weight: 600;
- display: block;
- color: #fff;
- background: #6ab0de;
- margin: -12px;
- padding: 6px 12px;
- margin-bottom: 12px; }
-
-.wy-alert.wy-alert-danger, .rst-content .wy-alert-danger.note, .rst-content .wy-alert-danger.attention, .rst-content .wy-alert-danger.caution, .rst-content .danger, .rst-content .error, .rst-content .wy-alert-danger.hint, .rst-content .wy-alert-danger.important, .rst-content .wy-alert-danger.tip, .rst-content .wy-alert-danger.warning, .rst-content .wy-alert-danger.seealso, .rst-content .wy-alert-danger.admonition-todo, .rst-content .wy-alert-danger.admonition {
- background: #fdf3f2; }
-
-.wy-alert.wy-alert-danger .wy-alert-title, .rst-content .wy-alert-danger.note .wy-alert-title, .rst-content .wy-alert-danger.attention .wy-alert-title, .rst-content .wy-alert-danger.caution .wy-alert-title, .rst-content .danger .wy-alert-title, .rst-content .error .wy-alert-title, .rst-content .wy-alert-danger.hint .wy-alert-title, .rst-content .wy-alert-danger.important .wy-alert-title, .rst-content .wy-alert-danger.tip .wy-alert-title, .rst-content .wy-alert-danger.warning .wy-alert-title, .rst-content .wy-alert-danger.seealso .wy-alert-title, .rst-content .wy-alert-danger.admonition-todo .wy-alert-title, .rst-content .wy-alert-danger.admonition .wy-alert-title, .wy-alert.wy-alert-danger .rst-content .admonition-title, .rst-content .wy-alert.wy-alert-danger .admonition-title, .rst-content .wy-alert-danger.note .admonition-title, .rst-content .wy-alert-danger.attention .admonition-title, .rst-content .wy-alert-danger.caution .admonition-title, .rst-content .danger .admonition-title, .rst-content .error .admonition-title, .rst-content .wy-alert-danger.hint .admonition-title, .rst-content .wy-alert-danger.important .admonition-title, .rst-content .wy-alert-danger.tip .admonition-title, .rst-content .wy-alert-danger.warning .admonition-title, .rst-content .wy-alert-danger.seealso .admonition-title, .rst-content .wy-alert-danger.admonition-todo .admonition-title, .rst-content .wy-alert-danger.admonition .admonition-title {
- background: #f29f97; }
-
-.wy-alert.wy-alert-warning, .rst-content .wy-alert-warning.note, .rst-content .attention, .rst-content .caution, .rst-content .wy-alert-warning.danger, .rst-content .wy-alert-warning.error, .rst-content .wy-alert-warning.hint, .rst-content .wy-alert-warning.important, .rst-content .wy-alert-warning.tip, .rst-content .warning, .rst-content .wy-alert-warning.seealso, .rst-content .admonition-todo, .rst-content .wy-alert-warning.admonition {
- background: #ffedcc; }
-
-.wy-alert.wy-alert-warning .wy-alert-title, .rst-content .wy-alert-warning.note .wy-alert-title, .rst-content .attention .wy-alert-title, .rst-content .caution .wy-alert-title, .rst-content .wy-alert-warning.danger .wy-alert-title, .rst-content .wy-alert-warning.error .wy-alert-title, .rst-content .wy-alert-warning.hint .wy-alert-title, .rst-content .wy-alert-warning.important .wy-alert-title, .rst-content .wy-alert-warning.tip .wy-alert-title, .rst-content .warning .wy-alert-title, .rst-content .wy-alert-warning.seealso .wy-alert-title, .rst-content .admonition-todo .wy-alert-title, .rst-content .wy-alert-warning.admonition .wy-alert-title, .wy-alert.wy-alert-warning .rst-content .admonition-title, .rst-content .wy-alert.wy-alert-warning .admonition-title, .rst-content .wy-alert-warning.note .admonition-title, .rst-content .attention .admonition-title, .rst-content .caution .admonition-title, .rst-content .wy-alert-warning.danger .admonition-title, .rst-content .wy-alert-warning.error .admonition-title, .rst-content .wy-alert-warning.hint .admonition-title, .rst-content .wy-alert-warning.important .admonition-title, .rst-content .wy-alert-warning.tip .admonition-title, .rst-content .warning .admonition-title, .rst-content .wy-alert-warning.seealso .admonition-title, .rst-content .admonition-todo .admonition-title, .rst-content .wy-alert-warning.admonition .admonition-title {
- background: #f0b37e; }
-
-.wy-alert.wy-alert-info, .rst-content .note, .rst-content .wy-alert-info.attention, .rst-content .wy-alert-info.caution, .rst-content .wy-alert-info.danger, .rst-content .wy-alert-info.error, .rst-content .wy-alert-info.hint, .rst-content .wy-alert-info.important, .rst-content .wy-alert-info.tip, .rst-content .wy-alert-info.warning, .rst-content .seealso, .rst-content .wy-alert-info.admonition-todo, .rst-content .wy-alert-info.admonition {
- background: #e7f2fa; }
-
-.wy-alert.wy-alert-info .wy-alert-title, .rst-content .note .wy-alert-title, .rst-content .wy-alert-info.attention .wy-alert-title, .rst-content .wy-alert-info.caution .wy-alert-title, .rst-content .wy-alert-info.danger .wy-alert-title, .rst-content .wy-alert-info.error .wy-alert-title, .rst-content .wy-alert-info.hint .wy-alert-title, .rst-content .wy-alert-info.important .wy-alert-title, .rst-content .wy-alert-info.tip .wy-alert-title, .rst-content .wy-alert-info.warning .wy-alert-title, .rst-content .seealso .wy-alert-title, .rst-content .wy-alert-info.admonition-todo .wy-alert-title, .rst-content .wy-alert-info.admonition .wy-alert-title, .wy-alert.wy-alert-info .rst-content .admonition-title, .rst-content .wy-alert.wy-alert-info .admonition-title, .rst-content .note .admonition-title, .rst-content .wy-alert-info.attention .admonition-title, .rst-content .wy-alert-info.caution .admonition-title, .rst-content .wy-alert-info.danger .admonition-title, .rst-content .wy-alert-info.error .admonition-title, .rst-content .wy-alert-info.hint .admonition-title, .rst-content .wy-alert-info.important .admonition-title, .rst-content .wy-alert-info.tip .admonition-title, .rst-content .wy-alert-info.warning .admonition-title, .rst-content .seealso .admonition-title, .rst-content .wy-alert-info.admonition-todo .admonition-title, .rst-content .wy-alert-info.admonition .admonition-title {
- background: #6ab0de; }
-
-.wy-alert.wy-alert-success, .rst-content .wy-alert-success.note, .rst-content .wy-alert-success.attention, .rst-content .wy-alert-success.caution, .rst-content .wy-alert-success.danger, .rst-content .wy-alert-success.error, .rst-content .hint, .rst-content .important, .rst-content .tip, .rst-content .wy-alert-success.warning, .rst-content .wy-alert-success.seealso, .rst-content .wy-alert-success.admonition-todo, .rst-content .wy-alert-success.admonition {
- background: #dbfaf4; }
-
-.wy-alert.wy-alert-success .wy-alert-title, .rst-content .wy-alert-success.note .wy-alert-title, .rst-content .wy-alert-success.attention .wy-alert-title, .rst-content .wy-alert-success.caution .wy-alert-title, .rst-content .wy-alert-success.danger .wy-alert-title, .rst-content .wy-alert-success.error .wy-alert-title, .rst-content .hint .wy-alert-title, .rst-content .important .wy-alert-title, .rst-content .tip .wy-alert-title, .rst-content .wy-alert-success.warning .wy-alert-title, .rst-content .wy-alert-success.seealso .wy-alert-title, .rst-content .wy-alert-success.admonition-todo .wy-alert-title, .rst-content .wy-alert-success.admonition .wy-alert-title, .wy-alert.wy-alert-success .rst-content .admonition-title, .rst-content .wy-alert.wy-alert-success .admonition-title, .rst-content .wy-alert-success.note .admonition-title, .rst-content .wy-alert-success.attention .admonition-title, .rst-content .wy-alert-success.caution .admonition-title, .rst-content .wy-alert-success.danger .admonition-title, .rst-content .wy-alert-success.error .admonition-title, .rst-content .hint .admonition-title, .rst-content .important .admonition-title, .rst-content .tip .admonition-title, .rst-content .wy-alert-success.warning .admonition-title, .rst-content .wy-alert-success.seealso .admonition-title, .rst-content .wy-alert-success.admonition-todo .admonition-title, .rst-content .wy-alert-success.admonition .admonition-title {
- background: #1abc9c; }
-
-.wy-alert.wy-alert-neutral, .rst-content .wy-alert-neutral.note, .rst-content .wy-alert-neutral.attention, .rst-content .wy-alert-neutral.caution, .rst-content .wy-alert-neutral.danger, .rst-content .wy-alert-neutral.error, .rst-content .wy-alert-neutral.hint, .rst-content .wy-alert-neutral.important, .rst-content .wy-alert-neutral.tip, .rst-content .wy-alert-neutral.warning, .rst-content .wy-alert-neutral.seealso, .rst-content .wy-alert-neutral.admonition-todo, .rst-content .wy-alert-neutral.admonition {
- background: #f3f6f6; }
-
-.wy-alert.wy-alert-neutral .wy-alert-title, .rst-content .wy-alert-neutral.note .wy-alert-title, .rst-content .wy-alert-neutral.attention .wy-alert-title, .rst-content .wy-alert-neutral.caution .wy-alert-title, .rst-content .wy-alert-neutral.danger .wy-alert-title, .rst-content .wy-alert-neutral.error .wy-alert-title, .rst-content .wy-alert-neutral.hint .wy-alert-title, .rst-content .wy-alert-neutral.important .wy-alert-title, .rst-content .wy-alert-neutral.tip .wy-alert-title, .rst-content .wy-alert-neutral.warning .wy-alert-title, .rst-content .wy-alert-neutral.seealso .wy-alert-title, .rst-content .wy-alert-neutral.admonition-todo .wy-alert-title, .rst-content .wy-alert-neutral.admonition .wy-alert-title, .wy-alert.wy-alert-neutral .rst-content .admonition-title, .rst-content .wy-alert.wy-alert-neutral .admonition-title, .rst-content .wy-alert-neutral.note .admonition-title, .rst-content .wy-alert-neutral.attention .admonition-title, .rst-content .wy-alert-neutral.caution .admonition-title, .rst-content .wy-alert-neutral.danger .admonition-title, .rst-content .wy-alert-neutral.error .admonition-title, .rst-content .wy-alert-neutral.hint .admonition-title, .rst-content .wy-alert-neutral.important .admonition-title, .rst-content .wy-alert-neutral.tip .admonition-title, .rst-content .wy-alert-neutral.warning .admonition-title, .rst-content .wy-alert-neutral.seealso .admonition-title, .rst-content .wy-alert-neutral.admonition-todo .admonition-title, .rst-content .wy-alert-neutral.admonition .admonition-title {
- color: #404040;
- background: #e1e4e5; }
-
-.wy-alert.wy-alert-neutral a, .rst-content .wy-alert-neutral.note a, .rst-content .wy-alert-neutral.attention a, .rst-content .wy-alert-neutral.caution a, .rst-content .wy-alert-neutral.danger a, .rst-content .wy-alert-neutral.error a, .rst-content .wy-alert-neutral.hint a, .rst-content .wy-alert-neutral.important a, .rst-content .wy-alert-neutral.tip a, .rst-content .wy-alert-neutral.warning a, .rst-content .wy-alert-neutral.seealso a, .rst-content .wy-alert-neutral.admonition-todo a, .rst-content .wy-alert-neutral.admonition a {
- color: #2980B9; }
-
-.wy-alert p:last-child, .rst-content .note p:last-child, .rst-content .attention p:last-child, .rst-content .caution p:last-child, .rst-content .danger p:last-child, .rst-content .error p:last-child, .rst-content .hint p:last-child, .rst-content .important p:last-child, .rst-content .tip p:last-child, .rst-content .warning p:last-child, .rst-content .seealso p:last-child, .rst-content .admonition-todo p:last-child, .rst-content .admonition p:last-child {
- margin-bottom: 0; }
-
-.wy-tray-container {
- position: fixed;
- bottom: 0px;
- left: 0;
- z-index: 600; }
-
-.wy-tray-container li {
- display: block;
- width: 300px;
- background: transparent;
- color: #fff;
- text-align: center;
- box-shadow: 0 5px 5px 0 rgba(0, 0, 0, 0.1);
- padding: 0 24px;
- min-width: 20%;
- opacity: 0;
- height: 0;
- line-height: 56px;
- overflow: hidden;
- -webkit-transition: all 0.3s ease-in;
- -moz-transition: all 0.3s ease-in;
- transition: all 0.3s ease-in; }
-
-.wy-tray-container li.wy-tray-item-success {
- background: #27AE60; }
-
-.wy-tray-container li.wy-tray-item-info {
- background: #2980B9; }
-
-.wy-tray-container li.wy-tray-item-warning {
- background: #E67E22; }
-
-.wy-tray-container li.wy-tray-item-danger {
- background: #E74C3C; }
-
-.wy-tray-container li.on {
- opacity: 1;
- height: 56px; }
-
-@media screen and (max-width: 768px) {
- .wy-tray-container {
- bottom: auto;
- top: 0;
- width: 100%; }
- .wy-tray-container li {
- width: 100%; } }
-
-button {
- font-size: 100%;
- margin: 0;
- vertical-align: baseline;
- *vertical-align: middle;
- cursor: pointer;
- line-height: normal;
- -webkit-appearance: button;
- *overflow: visible; }
-
-button::-moz-focus-inner, input::-moz-focus-inner {
- border: 0;
- padding: 0; }
-
-button[disabled] {
- cursor: default; }
-
-.btn {
- /* Structure */
- display: inline-flex;
- align-items: center;
- border-radius: 2px;
- line-height: normal;
- white-space: nowrap;
- text-align: center;
- cursor: pointer;
- font-size: 100%;
- padding: 6px 12px 6px 12px;
- color: #fff;
- background-color: #27AE60;
- text-decoration: none;
- border-radius: 5px;
- font-weight: normal;
- outline-none: false;
- vertical-align: middle;
- *display: inline;
- zoom: 1;
- -webkit-user-drag: none;
- -webkit-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
- -webkit-transition: all 0.1s linear;
- -moz-transition: all 0.1s linear;
- transition: all 0.1s linear; }
-
-.btn-hover {
- background: #2e8ece;
- color: #fff; }
-
-.btn:hover {
- background: #2cc36b;
- color: #fff; }
-
-.btn:focus {
- background: #2cc36b;
- outline: 0; }
-
-.btn:active {
- box-shadow: 0px -1px 0px 0px rgba(0, 0, 0, 0.05) inset, 0px 2px 0px 0px rgba(0, 0, 0, 0.1) inset; }
-
-.btn:visited {
- color: #fff; }
-
-.btn:disabled {
- background-image: none;
- filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
- filter: alpha(opacity=40);
- opacity: 0.4;
- cursor: not-allowed;
- box-shadow: none; }
-
-.btn-disabled {
- background-image: none;
- filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
- filter: alpha(opacity=40);
- opacity: 0.4;
- cursor: not-allowed;
- box-shadow: none; }
-
-.btn-disabled:hover, .btn-disabled:focus, .btn-disabled:active {
- background-image: none;
- filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
- filter: alpha(opacity=40);
- opacity: 0.4;
- cursor: not-allowed;
- box-shadow: none; }
-
-.btn::-moz-focus-inner {
- padding: 0;
- border: 0; }
-
-.btn-small {
- font-size: 80%; }
-
-.btn-info {
- background-color: #2980B9 !important; }
-
-.btn-info:hover {
- background-color: #2e8ece !important; }
-
-.btn-neutral {
- background-color: #f5f7f9 !important;
- color: #039bee !important; }
-
-.btn-neutral:hover {
- background-color: #057eb6 !important;
- color: #fff !important; }
-
-.btn-success {
- background-color: #27AE60 !important; }
-
-.btn-success:hover {
- background-color: #229955 !important; }
-
-.btn-danger {
- background-color: #E74C3C !important; }
-
-.btn-danger:hover {
- background-color: #ea6153 !important; }
-
-.btn-warning {
- background-color: #E67E22 !important; }
-
-.btn-warning:hover {
- background-color: #e98b39 !important; }
-
-.btn-invert {
- background-color: #222; }
-
-.btn-invert:hover {
- background-color: #2f2f2f !important; }
-
-.btn-link {
- background-color: transparent !important;
- color: #2980B9;
- box-shadow: none;
- border-color: transparent !important; }
-
-.btn-link:hover {
- background-color: transparent !important;
- color: #409ad5 !important;
- box-shadow: none; }
-
-.btn-link:active {
- background-color: transparent !important;
- color: #409ad5 !important;
- box-shadow: none; }
-
-.btn-link:visited {
- color: #9B59B6; }
-
-.wy-btn-group .btn, .wy-control .btn {
- vertical-align: middle; }
-
-.wy-btn-group {
- margin-bottom: 24px;
- *zoom: 1; }
-
-.wy-btn-group:before, .wy-btn-group:after {
- display: table;
- content: ""; }
-
-.wy-btn-group:after {
- clear: both; }
-
-.wy-dropdown {
- position: relative;
- display: inline-block; }
-
-.wy-dropdown-active .wy-dropdown-menu {
- display: block; }
-
-.wy-dropdown-menu {
- position: absolute;
- left: 0;
- display: none;
- float: left;
- top: 100%;
- min-width: 100%;
- background: #fcfcfc;
- z-index: 100;
- border: solid 1px #cfd7dd;
- box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.1);
- padding: 12px; }
-
-.wy-dropdown-menu > dd > a {
- display: block;
- clear: both;
- color: #404040;
- white-space: nowrap;
- font-size: 90%;
- padding: 0 12px;
- cursor: pointer; }
-
-.wy-dropdown-menu > dd > a:hover {
- background: #2980B9;
- color: #fff; }
-
-.wy-dropdown-menu > dd.divider {
- border-top: solid 1px #cfd7dd;
- margin: 6px 0; }
-
-.wy-dropdown-menu > dd.search {
- padding-bottom: 12px; }
-
-.wy-dropdown-menu > dd.search input[type="search"] {
- width: 100%; }
-
-.wy-dropdown-menu > dd.call-to-action {
- background: #e3e3e3;
- text-transform: uppercase;
- font-weight: 500;
- font-size: 80%; }
-
-.wy-dropdown-menu > dd.call-to-action:hover {
- background: #e3e3e3; }
-
-.wy-dropdown-menu > dd.call-to-action .btn {
- color: #fff; }
-
-.wy-dropdown.wy-dropdown-up .wy-dropdown-menu {
- bottom: 100%;
- top: auto;
- left: auto;
- right: 0; }
-
-.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu {
- background: #fcfcfc;
- margin-top: 2px; }
-
-.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a {
- padding: 6px 12px; }
-
-.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover {
- background: #2980B9;
- color: #fff; }
-
-.wy-dropdown.wy-dropdown-left .wy-dropdown-menu {
- right: 0;
- left: auto;
- text-align: right; }
-
-.wy-dropdown-arrow:before {
- content: " ";
- border-bottom: 5px solid whitesmoke;
- border-left: 5px solid transparent;
- border-right: 5px solid transparent;
- position: absolute;
- display: block;
- top: -4px;
- left: 50%;
- margin-left: -3px; }
-
-.wy-dropdown-arrow.wy-dropdown-arrow-left:before {
- left: 11px; }
-
-.wy-form-stacked select {
- display: block;
- padding-top: 30px;}
-
-.wy-form-aligned input, .wy-form-aligned textarea, .wy-form-aligned select, .wy-form-aligned .wy-help-inline, .wy-form-aligned label {
- display: inline-block;
- *display: inline;
- *zoom: 1;
- vertical-align: middle; }
-
-.wy-form-aligned .wy-control-group > label {
- display: inline-block;
- vertical-align: middle;
- width: 10em;
- margin: 6px 12px 0 0;
- float: left; }
-
-.wy-form-aligned .wy-control {
- float: left; }
-
-.wy-form-aligned .wy-control label {
- display: block; }
-
-.wy-form-aligned .wy-control select {
- margin-top: 6px; }
-
-fieldset {
- border: 0;
- margin: 0;
- padding: 0; }
-
-legend {
- display: block;
- width: 100%;
- border: 0;
- padding: 0;
- white-space: normal;
- margin-bottom: 24px;
- font-size: 150%;
- *margin-left: -7px; }
-
-label {
- display: block;
- margin: 0 0 0.3125em 0;
- color: #333;
- font-size: 90%; }
-
-input, select, textarea {
- font-size: 100%;
- margin: 0;
- vertical-align: baseline;
- *vertical-align: middle; }
-
-.wy-control-group {
- margin-bottom: 24px;
- *zoom: 1;
- max-width: 68em;
- margin-left: auto;
- margin-right: auto;
- *zoom: 1; }
-
-.wy-control-group:before, .wy-control-group:after {
- display: table;
- content: ""; }
-
-.wy-control-group:after {
- clear: both; }
-
-.wy-control-group:before, .wy-control-group:after {
- display: table;
- content: ""; }
-
-.wy-control-group:after {
- clear: both; }
-
-.wy-control-group.wy-control-group-required > label:after {
- content: " *";
- color: #E74C3C; }
-
-.wy-control-group .wy-form-full, .wy-control-group .wy-form-halves, .wy-control-group .wy-form-thirds {
- padding-bottom: 12px; }
-
-.wy-control-group .wy-form-full select, .wy-control-group .wy-form-halves select, .wy-control-group .wy-form-thirds select {
- width: 100%; }
-
-.wy-control-group .wy-form-full input[type="text"], .wy-control-group .wy-form-full input[type="password"], .wy-control-group .wy-form-full input[type="email"], .wy-control-group .wy-form-full input[type="url"], .wy-control-group .wy-form-full input[type="date"], .wy-control-group .wy-form-full input[type="month"], .wy-control-group .wy-form-full input[type="time"], .wy-control-group .wy-form-full input[type="datetime"], .wy-control-group .wy-form-full input[type="datetime-local"], .wy-control-group .wy-form-full input[type="week"], .wy-control-group .wy-form-full input[type="number"], .wy-control-group .wy-form-full input[type="search"], .wy-control-group .wy-form-full input[type="tel"], .wy-control-group .wy-form-full input[type="color"], .wy-control-group .wy-form-halves input[type="text"], .wy-control-group .wy-form-halves input[type="password"], .wy-control-group .wy-form-halves input[type="email"], .wy-control-group .wy-form-halves input[type="url"], .wy-control-group .wy-form-halves input[type="date"], .wy-control-group .wy-form-halves input[type="month"], .wy-control-group .wy-form-halves input[type="time"], .wy-control-group .wy-form-halves input[type="datetime"], .wy-control-group .wy-form-halves input[type="datetime-local"], .wy-control-group .wy-form-halves input[type="week"], .wy-control-group .wy-form-halves input[type="number"], .wy-control-group .wy-form-halves input[type="search"], .wy-control-group .wy-form-halves input[type="tel"], .wy-control-group .wy-form-halves input[type="color"], .wy-control-group .wy-form-thirds input[type="text"], .wy-control-group .wy-form-thirds input[type="password"], .wy-control-group .wy-form-thirds input[type="email"], .wy-control-group .wy-form-thirds input[type="url"], .wy-control-group .wy-form-thirds input[type="date"], .wy-control-group .wy-form-thirds input[type="month"], .wy-control-group .wy-form-thirds input[type="time"], .wy-control-group .wy-form-thirds input[type="datetime"], .wy-control-group .wy-form-thirds input[type="datetime-local"], .wy-control-group .wy-form-thirds input[type="week"], .wy-control-group .wy-form-thirds input[type="number"], .wy-control-group .wy-form-thirds input[type="search"], .wy-control-group .wy-form-thirds input[type="tel"], .wy-control-group .wy-form-thirds input[type="color"] {
- width: 100%; }
-
-.wy-control-group .wy-form-full {
- float: left;
- display: block;
- margin-right: 2.3576515979%;
- width: 100%;
- margin-right: 0; }
-
-.wy-control-group .wy-form-full:last-child {
- margin-right: 0; }
-
-.wy-control-group .wy-form-halves {
- float: left;
- display: block;
- margin-right: 2.3576515979%;
- width: 48.821174201%; }
-
-.wy-control-group .wy-form-halves:last-child {
- margin-right: 0; }
-
-.wy-control-group .wy-form-halves:nth-of-type(2n) {
- margin-right: 0; }
-
-.wy-control-group .wy-form-halves:nth-of-type(2n+1) {
- clear: left; }
-
-.wy-control-group .wy-form-thirds {
- float: left;
- display: block;
- margin-right: 2.3576515979%;
- width: 31.7615656014%; }
-
-.wy-control-group .wy-form-thirds:last-child {
- margin-right: 0; }
-
-.wy-control-group .wy-form-thirds:nth-of-type(3n) {
- margin-right: 0; }
-
-.wy-control-group .wy-form-thirds:nth-of-type(3n+1) {
- clear: left; }
-
-.wy-control-group.wy-control-group-no-input .wy-control {
- margin: 6px 0 0 0;
- font-size: 90%; }
-
-.wy-control-no-input {
- display: inline-block;
- margin: 6px 0 0 0;
- font-size: 90%; }
-
-.wy-control-group.fluid-input input[type="text"], .wy-control-group.fluid-input input[type="password"], .wy-control-group.fluid-input input[type="email"], .wy-control-group.fluid-input input[type="url"], .wy-control-group.fluid-input input[type="date"], .wy-control-group.fluid-input input[type="month"], .wy-control-group.fluid-input input[type="time"], .wy-control-group.fluid-input input[type="datetime"], .wy-control-group.fluid-input input[type="datetime-local"], .wy-control-group.fluid-input input[type="week"], .wy-control-group.fluid-input input[type="number"], .wy-control-group.fluid-input input[type="search"], .wy-control-group.fluid-input input[type="tel"], .wy-control-group.fluid-input input[type="color"] {
- width: 100%; }
-
-.wy-form-message-inline {
- display: inline-block;
- padding-left: 0.3em;
- color: #666;
- vertical-align: middle;
- font-size: 90%; }
-
-.wy-form-message {
- display: block;
- color: #999;
- font-size: 70%;
- margin-top: 0.3125em;
- font-style: italic; }
-
-.wy-form-message p {
- font-size: inherit;
- font-style: italic;
- margin-bottom: 6px; }
-
-.wy-form-message p:last-child {
- margin-bottom: 0; }
-
-input {
- line-height: normal; }
-
-input[type="button"], input[type="reset"], input[type="submit"] {
- -webkit-appearance: button;
- cursor: pointer;
- font-family: "Nunito", Arial, sans-serif;
- *overflow: visible; }
-
-input[type="text"], input[type="password"], input[type="email"], input[type="url"], input[type="date"], input[type="month"], input[type="time"], input[type="datetime"], input[type="datetime-local"], input[type="week"], input[type="number"], input[type="search"], input[type="tel"], input[type="color"] {
- -webkit-appearance: none;
- padding: 6px;
- display: inline-block;
- border: 1px solid #ccc;
- font-size: 80%;
- font-family: "Nunito", Arial, sans-serif;
- box-shadow: inset 0 1px 3px #ddd;
- border-radius: 0;
- -webkit-transition: border 0.3s linear;
- -moz-transition: border 0.3s linear;
- transition: border 0.3s linear; }
-
-input[type="datetime-local"] {
- padding: 0.34375em 0.625em; }
-
-input[disabled] {
- cursor: default; }
-
-input[type="checkbox"], input[type="radio"] {
- -webkit-box-sizing: border-box;
- -moz-box-sizing: border-box;
- box-sizing: border-box;
- padding: 0;
- margin-right: 0.3125em;
- *height: 13px;
- *width: 13px; }
-
-input[type="search"] {
- -webkit-box-sizing: border-box;
- -moz-box-sizing: border-box;
- box-sizing: border-box; }
-
-input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration {
- -webkit-appearance: none; }
-
-input[type="text"]:focus, input[type="password"]:focus, input[type="email"]:focus, input[type="url"]:focus, input[type="date"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="week"]:focus, input[type="number"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="color"]:focus {
- outline: 0;
- outline: thin dotted \9;
- border-color: #333; }
-
-input.no-focus:focus {
- border-color: #ccc !important; }
-
-input[type="file"]:focus, input[type="radio"]:focus, input[type="checkbox"]:focus {
- outline: thin dotted #333;
- outline: 1px auto #129FEA; }
-
-input[type="text"][disabled], input[type="password"][disabled], input[type="email"][disabled], input[type="url"][disabled], input[type="date"][disabled], input[type="month"][disabled], input[type="time"][disabled], input[type="datetime"][disabled], input[type="datetime-local"][disabled], input[type="week"][disabled], input[type="number"][disabled], input[type="search"][disabled], input[type="tel"][disabled], input[type="color"][disabled] {
- cursor: not-allowed;
- background-color: #fafafa; }
-
-input:focus:invalid, textarea:focus:invalid, select:focus:invalid {
- color: #E74C3C;
- border: 1px solid #E74C3C; }
-
-input:focus:invalid:focus, textarea:focus:invalid:focus, select:focus:invalid:focus {
- border-color: #E74C3C; }
-
-input[type="file"]:focus:invalid:focus, input[type="radio"]:focus:invalid:focus, input[type="checkbox"]:focus:invalid:focus {
- outline-color: #E74C3C; }
-
-input.wy-input-large {
- padding: 12px;
- font-size: 100%; }
-
-textarea {
- overflow: auto;
- vertical-align: top;
- width: 100%;
- font-family: "Nunito", Arial, sans-serif; }
-
-select, textarea {
- padding: 0.5em 0.625em;
- display: inline-block;
- border: 1px solid #ccc;
- font-size: 80%;
- box-shadow: inset 0 1px 3px #ddd;
- -webkit-transition: border 0.3s linear;
- -moz-transition: border 0.3s linear;
- transition: border 0.3s linear; }
-
-select {
- border: 1px solid #ccc;
- background-color: #fff; }
-
-select[multiple] {
- height: auto; }
-
-select:focus, textarea:focus {
- outline: 0; }
-
-select[disabled], textarea[disabled], input[readonly], select[readonly], textarea[readonly] {
- cursor: not-allowed;
- background-color: #fafafa; }
-
-input[type="radio"][disabled], input[type="checkbox"][disabled] {
- cursor: not-allowed; }
-
-.wy-checkbox, .wy-radio {
- margin: 6px 0;
- color: #404040;
- display: block; }
-
-.wy-checkbox input, .wy-radio input {
- vertical-align: baseline; }
-
-.wy-form-message-inline {
- display: inline-block;
- *display: inline;
- *zoom: 1;
- vertical-align: middle; }
-
-.wy-input-prefix, .wy-input-suffix {
- white-space: nowrap;
- padding: 6px; }
-
-.wy-input-prefix .wy-input-context, .wy-input-suffix .wy-input-context {
- line-height: 27px;
- padding: 0 8px;
- display: inline-block;
- font-size: 80%;
- background-color: #f3f6f6;
- border: solid 1px #ccc;
- color: #999; }
-
-.wy-input-suffix .wy-input-context {
- border-left: 0; }
-
-.wy-input-prefix .wy-input-context {
- border-right: 0; }
-
-.wy-switch {
- position: relative;
- display: block;
- height: 24px;
- margin-top: 12px;
- cursor: pointer; }
-
-.wy-switch:before {
- position: absolute;
- content: "";
- display: block;
- left: 0;
- top: 0;
- width: 36px;
- height: 12px;
- border-radius: 4px;
- background: #ccc;
- -webkit-transition: all 0.2s ease-in-out;
- -moz-transition: all 0.2s ease-in-out;
- transition: all 0.2s ease-in-out; }
-
-.wy-switch:after {
- position: absolute;
- content: "";
- display: block;
- width: 18px;
- height: 18px;
- border-radius: 4px;
- background: #999;
- left: -3px;
- top: -3px;
- -webkit-transition: all 0.2s ease-in-out;
- -moz-transition: all 0.2s ease-in-out;
- transition: all 0.2s ease-in-out; }
-
-.wy-switch span {
- position: absolute;
- left: 48px;
- display: block;
- font-size: 12px;
- color: #ccc;
- line-height: 1; }
-
-.wy-switch.active:before {
- background: #1e8449; }
-
-.wy-switch.active:after {
- left: 24px;
- background: #27AE60; }
-
-.wy-switch.disabled {
- cursor: not-allowed;
- opacity: 0.8; }
-
-.wy-control-group.wy-control-group-error .wy-form-message, .wy-control-group.wy-control-group-error > label {
- color: #E74C3C; }
-
-.wy-control-group.wy-control-group-error input[type="text"], .wy-control-group.wy-control-group-error input[type="password"], .wy-control-group.wy-control-group-error input[type="email"], .wy-control-group.wy-control-group-error input[type="url"], .wy-control-group.wy-control-group-error input[type="date"], .wy-control-group.wy-control-group-error input[type="month"], .wy-control-group.wy-control-group-error input[type="time"], .wy-control-group.wy-control-group-error input[type="datetime"], .wy-control-group.wy-control-group-error input[type="datetime-local"], .wy-control-group.wy-control-group-error input[type="week"], .wy-control-group.wy-control-group-error input[type="number"], .wy-control-group.wy-control-group-error input[type="search"], .wy-control-group.wy-control-group-error input[type="tel"], .wy-control-group.wy-control-group-error input[type="color"] {
- border: solid 1px #E74C3C; }
-
-.wy-control-group.wy-control-group-error textarea {
- border: solid 1px #E74C3C; }
-
-.wy-inline-validate {
- white-space: nowrap; }
-
-.wy-inline-validate .wy-input-context {
- padding: 0.5em 0.625em;
- display: inline-block;
- font-size: 80%; }
-
-.wy-inline-validate.wy-inline-validate-success .wy-input-context {
- color: #27AE60; }
-
-.wy-inline-validate.wy-inline-validate-danger .wy-input-context {
- color: #E74C3C; }
-
-.wy-inline-validate.wy-inline-validate-warning .wy-input-context {
- color: #E67E22; }
-
-.wy-inline-validate.wy-inline-validate-info .wy-input-context {
- color: #2980B9; }
-
-.rotate-90 {
- -webkit-transform: rotate(90deg);
- -moz-transform: rotate(90deg);
- -ms-transform: rotate(90deg);
- -o-transform: rotate(90deg);
- transform: rotate(90deg); }
-
-.rotate-180 {
- -webkit-transform: rotate(180deg);
- -moz-transform: rotate(180deg);
- -ms-transform: rotate(180deg);
- -o-transform: rotate(180deg);
- transform: rotate(180deg); }
-
-.rotate-270 {
- -webkit-transform: rotate(270deg);
- -moz-transform: rotate(270deg);
- -ms-transform: rotate(270deg);
- -o-transform: rotate(270deg);
- transform: rotate(270deg); }
-
-.mirror {
- -webkit-transform: scaleX(-1);
- -moz-transform: scaleX(-1);
- -ms-transform: scaleX(-1);
- -o-transform: scaleX(-1);
- transform: scaleX(-1); }
-
-.mirror.rotate-90 {
- -webkit-transform: scaleX(-1) rotate(90deg);
- -moz-transform: scaleX(-1) rotate(90deg);
- -ms-transform: scaleX(-1) rotate(90deg);
- -o-transform: scaleX(-1) rotate(90deg);
- transform: scaleX(-1) rotate(90deg); }
-
-.mirror.rotate-180 {
- -webkit-transform: scaleX(-1) rotate(180deg);
- -moz-transform: scaleX(-1) rotate(180deg);
- -ms-transform: scaleX(-1) rotate(180deg);
- -o-transform: scaleX(-1) rotate(180deg);
- transform: scaleX(-1) rotate(180deg); }
-
-.mirror.rotate-270 {
- -webkit-transform: scaleX(-1) rotate(270deg);
- -moz-transform: scaleX(-1) rotate(270deg);
- -ms-transform: scaleX(-1) rotate(270deg);
- -o-transform: scaleX(-1) rotate(270deg);
- transform: scaleX(-1) rotate(270deg); }
-
-@media only screen and (max-width: 480px) {
- .wy-form button[type="submit"] {
- margin: 0.7em 0 0; }
- .wy-form input[type="text"], .wy-form input[type="password"], .wy-form input[type="email"], .wy-form input[type="url"], .wy-form input[type="date"], .wy-form input[type="month"], .wy-form input[type="time"], .wy-form input[type="datetime"], .wy-form input[type="datetime-local"], .wy-form input[type="week"], .wy-form input[type="number"], .wy-form input[type="search"], .wy-form input[type="tel"], .wy-form input[type="color"] {
- margin-bottom: 0.3em;
- display: block; }
- .wy-form label {
- margin-bottom: 0.3em;
- display: block; }
- .wy-form input[type="password"], .wy-form input[type="email"], .wy-form input[type="url"], .wy-form input[type="date"], .wy-form input[type="month"], .wy-form input[type="time"], .wy-form input[type="datetime"], .wy-form input[type="datetime-local"], .wy-form input[type="week"], .wy-form input[type="number"], .wy-form input[type="search"], .wy-form input[type="tel"], .wy-form input[type="color"] {
- margin-bottom: 0; }
- .wy-form-aligned .wy-control-group label {
- margin-bottom: 0.3em;
- text-align: left;
- display: block;
- width: 100%; }
- .wy-form-aligned .wy-control {
- margin: 1.5em 0 0 0; }
- .wy-form .wy-help-inline, .wy-form-message-inline, .wy-form-message {
- display: block;
- font-size: 80%;
- padding: 6px 0; } }
-
-@media screen and (max-width: 768px) {
- .tablet-hide {
- display: none; } }
-
-@media screen and (max-width: 480px) {
- .mobile-hide {
- display: none; } }
-
-.float-left {
- float: left; }
-
-.float-right {
- float: right; }
-
-.full-width {
- width: 100%; }
-
-.wy-table, .rst-content table.docutils, .rst-content table.field-list {
- border-collapse: collapse;
- border-spacing: 0;
- empty-cells: show;
- margin-bottom: 24px; }
-
-.wy-table caption, .rst-content table.docutils caption, .rst-content table.field-list caption {
- color: #000;
- font: italic 85%/1 arial, sans-serif;
- padding: 1em 0;
- text-align: center; }
-
-.wy-table td, .rst-content table.docutils td, .rst-content table.field-list td, .wy-table th, .rst-content table.docutils th, .rst-content table.field-list th {
- font-size: 90%;
- margin: 0;
- overflow: visible;
- padding: 8px 16px; }
-
-.wy-table td:first-child, .rst-content table.docutils td:first-child, .rst-content table.field-list td:first-child, .wy-table th:first-child, .rst-content table.docutils th:first-child, .rst-content table.field-list th:first-child {
- border-left-width: 0; }
-
-.wy-table thead, .rst-content table.docutils thead, .rst-content table.field-list thead {
- color: #000;
- text-align: left;
- vertical-align: bottom;
- white-space: nowrap; }
-
-.wy-table thead th, .rst-content table.docutils thead th, .rst-content table.field-list thead th {
- font-weight: 600;
- border-bottom: solid 2px #e1e4e5; }
-
-.wy-table td, .rst-content table.docutils td, .rst-content table.field-list td {
- background-color: transparent;
- vertical-align: middle; }
-
-.wy-table td p, .rst-content table.docutils td p, .rst-content table.field-list td p {
- line-height: 18px; }
-
-.wy-table td p:last-child, .rst-content table.docutils td p:last-child, .rst-content table.field-list td p:last-child {
- margin-bottom: 0; }
-
-.wy-table .wy-table-cell-min, .rst-content table.docutils .wy-table-cell-min, .rst-content table.field-list .wy-table-cell-min {
- width: 1%;
- padding-right: 0; }
-
-.wy-table .wy-table-cell-min input[type=checkbox], .rst-content table.docutils .wy-table-cell-min input[type=checkbox], .rst-content table.field-list .wy-table-cell-min input[type=checkbox], .wy-table .wy-table-cell-min input[type=checkbox], .rst-content table.docutils .wy-table-cell-min input[type=checkbox], .rst-content table.field-list .wy-table-cell-min input[type=checkbox] {
- margin: 0; }
-
-.wy-table-secondary {
- color: gray;
- font-size: 90%; }
-
-.wy-table-tertiary {
- color: gray;
- font-size: 80%; }
-
-.wy-table-odd td, .wy-table-striped tr:nth-child(2n-1) td, .rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td {
- background-color: #f3f6f6; }
-
-.wy-table-backed {
- background-color: #f3f6f6; }
-
-/* BORDERED TABLES */
-.wy-table-bordered-all, .rst-content table.docutils {
- border: 1px solid #e1e4e5; }
-
-.wy-table-bordered-all td, .rst-content table.docutils td {
- border-bottom: 1px solid #e1e4e5;
- border-left: 1px solid #e1e4e5; }
-
-.wy-table-bordered-all tbody > tr:last-child td, .rst-content table.docutils tbody > tr:last-child td {
- border-bottom-width: 0; }
-
-.wy-table-bordered {
- border: 1px solid #e1e4e5; }
-
-.wy-table-bordered-rows td {
- border-bottom: 1px solid #e1e4e5; }
-
-.wy-table-bordered-rows tbody > tr:last-child td {
- border-bottom-width: 0; }
-
-.wy-table-horizontal tbody > tr:last-child td {
- border-bottom-width: 0; }
-
-.wy-table-horizontal td, .wy-table-horizontal th {
- border-width: 0 0 1px 0;
- border-bottom: 1px solid #e1e4e5; }
-
-.wy-table-horizontal tbody > tr:last-child td {
- border-bottom-width: 0; }
-
-/* RESPONSIVE TABLES */
-.wy-table-responsive {
- margin-bottom: 24px;
- max-width: 100%;
- overflow: auto; }
-
-.wy-table-responsive table {
- margin-bottom: 0 !important; }
-
-.wy-table-responsive table td, .wy-table-responsive table th {
- white-space: nowrap; }
-
-a {
- color: #0099ee;
- text-decoration: none;
- cursor: pointer; }
-
-a:hover {
- color: #057eb6; }
-
-a:visited {
- color: #007ba8; }
-
-html {
- height: 100%;
- overflow-x: hidden; }
-
-body {
- font-family: "Nunito", Arial, sans-serif;
- font-weight: normal;
- color: #404040;
- min-height: 100%;
- overflow-x: hidden;
- background: #edf0f2; }
-
-.wy-text-left {
- text-align: left; }
-
-.wy-text-center {
- text-align: center; }
-
-.wy-text-right {
- text-align: right; }
-
-.wy-text-large {
- font-size: 120%; }
-
-.wy-text-normal {
- font-size: 100%; }
-
-.wy-text-small, small {
- font-size: 80%; }
-
-.wy-text-strike {
- text-decoration: line-through; }
-
-.wy-text-warning {
- color: #E67E22 !important; }
-
-a.wy-text-warning:hover {
- color: #eb9950 !important; }
-
-.wy-text-info {
- color: #2980B9 !important; }
-
-a.wy-text-info:hover {
- color: #409ad5 !important; }
-
-.wy-text-success {
- color: #27AE60 !important; }
-
-a.wy-text-success:hover {
- color: #36d278 !important; }
-
-.wy-text-danger {
- color: #E74C3C !important; }
-
-a.wy-text-danger:hover {
- color: #ed7669 !important; }
-
-.wy-text-neutral {
- color: #404040 !important; }
-
-a.wy-text-neutral:hover {
- color: #595959 !important; }
-
-h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend {
- margin-top: 0;
- font-weight: 700;
- font-family: "Nunito", Arial, sans-serif; }
-
-p {
- line-height: 24px;
- margin: 0;
- font-size: 16px;
- margin-bottom: 24px; }
-
-h1 {
- font-size: 36px; }
-
-h2, .rst-content .toctree-wrapper p.caption {
- font-size: 28px; }
-
-h3 {
- font-size: 20px; }
-
-h4 {
- font-size: 16px; }
-
-h5 {
- font-size: 16px; }
-
-h6 {
- font-size: 16px; }
-
-hr {
- display: block;
- height: 1px;
- border: 0;
- border-top: 1px solid #e1e4e5;
- margin: 24px 0;
- padding: 0; }
-
-code, .rst-content tt, .rst-content code {
- white-space: nowrap;
- max-width: 100%;
- background: #fff;
- border: solid 1px #e1e4e5;
- font-size: 90%;
- padding: 0 5px;
- font-family: "Operator mono", "Hack", "Menlo", Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
- color: #E74C3C;
- overflow-x: auto; }
-
-code.code-large, .rst-content tt.code-large {
- font-size: 90%; }
-
-.wy-plain-list-disc, .rst-content .section ul, .rst-content .toctree-wrapper ul, article ul {
- list-style: disc;
- line-height: 24px;
- margin-bottom: 24px; }
-
-.wy-plain-list-disc li, .rst-content .section ul li, .rst-content .toctree-wrapper ul li, article ul li {
- list-style: disc;
- margin-left: 24px; }
-
-.wy-plain-list-disc li p:last-child, .rst-content .section ul li p:last-child, .rst-content .toctree-wrapper ul li p:last-child, article ul li p:last-child {
- margin-bottom: 0; }
-
-.wy-plain-list-disc li ul, .rst-content .section ul li ul, .rst-content .toctree-wrapper ul li ul, article ul li ul {
- margin-bottom: 0; }
-
-.wy-plain-list-disc li li, .rst-content .section ul li li, .rst-content .toctree-wrapper ul li li, article ul li li {
- list-style: circle; }
-
-.wy-plain-list-disc li li li, .rst-content .section ul li li li, .rst-content .toctree-wrapper ul li li li, article ul li li li {
- list-style: square; }
-
-.wy-plain-list-disc li ol li, .rst-content .section ul li ol li, .rst-content .toctree-wrapper ul li ol li, article ul li ol li {
- list-style: decimal; }
-
-.wy-plain-list-decimal, .rst-content .section ol, .rst-content ol.arabic, article ol {
- list-style: decimal;
- line-height: 24px;
- margin-bottom: 24px; }
-
-.wy-plain-list-decimal li, .rst-content .section ol li, .rst-content ol.arabic li, article ol li {
- list-style: decimal;
- margin-left: 24px; }
-
-.wy-plain-list-decimal li p:last-child, .rst-content .section ol li p:last-child, .rst-content ol.arabic li p:last-child, article ol li p:last-child {
- margin-bottom: 0; }
-
-.wy-plain-list-decimal li ul, .rst-content .section ol li ul, .rst-content ol.arabic li ul, article ol li ul {
- margin-bottom: 0; }
-
-.wy-plain-list-decimal li ul li, .rst-content .section ol li ul li, .rst-content ol.arabic li ul li, article ol li ul li {
- list-style: disc; }
-
-.codeblock-example {
- border: 1px solid #e1e4e5;
- border-bottom: none;
- padding: 24px;
- padding-top: 48px;
- font-weight: 500;
- background: #fff;
- position: relative; }
-
-.codeblock-example:after {
- content: "Example";
- position: absolute;
- top: 0px;
- left: 0px;
- background: #9B59B6;
- color: white;
- padding: 6px 12px; }
-
-.codeblock-example.prettyprint-example-only {
- border: 1px solid #e1e4e5;
- margin-bottom: 24px; }
-
-.codeblock, pre.literal-block, .rst-content .literal-block, .rst-content pre.literal-block, div[class^='highlight'] {
- padding: 0px;
- overflow-x: auto;
- background: #fff;
- margin: 1px 0 24px 0; }
-
-.codeblock div[class^='highlight'], pre.literal-block div[class^='highlight'], .rst-content .literal-block div[class^='highlight'], div[class^='highlight'] div[class^='highlight'] {
- border: none;
- background: #F5F7F9;
- margin: 0; }
-
-div[class^='highlight'] td.code {
- width: 100%; }
-
-.linenodiv pre {
- border-right: solid 1px #e6e9ea;
- margin: 0;
- padding: 12px 12px;
- font-family: "Operator mono", "Hack", "Menlo", Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
- font-size: 14px;
- line-height: 1.5;
- color: #d9d9d9; }
-
-div[class^='highlight'] pre {
- white-space: pre;
- margin: 0;
- padding: 12px 12px;
- font-family: "Operator mono", "Hack", "Menlo", Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
- font-size: 14px;
- line-height: 1.5;
- display: block;
- overflow: auto;
- color: #404040; }
-
-@media print {
- .codeblock, pre.literal-block, .rst-content .literal-block, .rst-content pre.literal-block, div[class^='highlight'], div[class^='highlight'] pre {
- white-space: pre-wrap; } }
-
-.hll {
- background-color: #ffffcc;
- margin: 0 -12px;
- padding: 0 12px;
- display: block; }
-
-.c {
- color: #999988;
- font-style: italic; }
-
-.err {
- color: #a61717;
- background-color: #e3d2d2; }
-
-.k {
- font-weight: bold; }
-
-.o {
- font-weight: bold; }
-
-.cm {
- color: #999988;
- font-style: italic; }
-
-.cp {
- color: #999999;
- font-weight: bold; }
-
-.c1 {
- color: #999988;
- font-style: italic; }
-
-.cs {
- color: #999999;
- font-weight: bold;
- font-style: italic; }
-
-.gd {
- color: #000000;
- background-color: #ffdddd; }
-
-.gd .x {
- color: #000000;
- background-color: #ffaaaa; }
-
-.ge {
- font-style: italic; }
-
-.gr {
- color: #aa0000; }
-
-.gh {
- color: #999999; }
-
-.gi {
- color: #000000;
- background-color: #ddffdd; }
-
-.gi .x {
- color: #000000;
- background-color: #aaffaa; }
-
-.go {
- color: #888888; }
-
-.gp {
- color: #555555; }
-
-.gs {
- font-weight: bold; }
-
-.gu {
- color: #800080;
- font-weight: bold; }
-
-.gt {
- color: #aa0000; }
-
-.kc {
- font-weight: bold; }
-
-.kd {
- font-weight: bold; }
-
-.kn {
- font-weight: bold; }
-
-.kp {
- font-weight: bold; }
-
-.kr {
- font-weight: bold; }
-
-.kt {
- color: #445588;
- font-weight: bold; }
-
-.m {
- color: #009999; }
-
-.s {
- color: #dd1144; }
-
-.n {
- color: #333333; }
-
-.na {
- color: teal; }
-
-.nb {
- color: #0086b3; }
-
-.nc {
- color: #445588;
- font-weight: bold; }
-
-.no {
- color: teal; }
-
-.ni {
- color: purple; }
-
-.ne {
- color: #990000;
- font-weight: bold; }
-
-.nf {
- color: #990000;
- font-weight: bold; }
-
-.nn {
- color: #555555; }
-
-.nt {
- color: navy; }
-
-.nv {
- color: teal; }
-
-.ow {
- font-weight: bold; }
-
-.w {
- color: #bbbbbb; }
-
-.mf {
- color: #009999; }
-
-.mh {
- color: #009999; }
-
-.mi {
- color: #009999; }
-
-.mo {
- color: #009999; }
-
-.sb {
- color: #dd1144; }
-
-.sc {
- color: #dd1144; }
-
-.sd {
- color: #dd1144; }
-
-.s2 {
- color: #dd1144; }
-
-.se {
- color: #dd1144; }
-
-.sh {
- color: #dd1144; }
-
-.si {
- color: #dd1144; }
-
-.sx {
- color: #dd1144; }
-
-.sr {
- color: #009926; }
-
-.s1 {
- color: #dd1144; }
-
-.ss {
- color: #990073; }
-
-.bp {
- color: #999999; }
-
-.vc {
- color: teal; }
-
-.vg {
- color: teal; }
-
-.vi {
- color: teal; }
-
-.il {
- color: #009999; }
-
-.gc {
- color: #999;
- background-color: #EAF2F5; }
-
-.wy-breadcrumbs li {
- display: inline-block; }
-
-.wy-breadcrumbs li.wy-breadcrumbs-aside {
- float: right; }
-
-.wy-breadcrumbs li a {
- display: inline-block;
- padding: 5px; }
-
-.wy-breadcrumbs li a:first-child {
- padding-left: 0; }
-
-.wy-breadcrumbs li code, .wy-breadcrumbs li .rst-content tt, .rst-content .wy-breadcrumbs li tt {
- padding: 5px;
- border: none;
- background: none; }
-
-.wy-breadcrumbs li code.literal, .wy-breadcrumbs li .rst-content tt.literal, .rst-content .wy-breadcrumbs li tt.literal {
- color: #404040; }
-
-.wy-breadcrumbs-extra {
- margin-bottom: 0;
- color: #b3b3b3;
- font-size: 80%;
- display: inline-block; }
-
-@media screen and (max-width: 480px) {
- .wy-breadcrumbs-extra {
- display: none; }
- .wy-breadcrumbs li.wy-breadcrumbs-aside {
- display: none; } }
-
-@media print {
- .wy-breadcrumbs li.wy-breadcrumbs-aside {
- display: none; } }
-
-.wy-affix {
- position: fixed;
- top: 0; }
-
-.wy-menu a:hover {
- text-decoration: none; }
-
-.wy-menu-horiz {
- *zoom: 1; }
-
-.wy-menu-horiz:before, .wy-menu-horiz:after {
- display: table;
- content: ""; }
-
-.wy-menu-horiz:after {
- clear: both; }
-
-.wy-menu-horiz ul, .wy-menu-horiz li {
- display: inline-block; }
-
-.wy-menu-horiz li:hover {
- background: rgba(255, 255, 255, 0.1); }
-
-.wy-menu-horiz li.divide-left {
- border-left: solid 1px #404040; }
-
-.wy-menu-horiz li.divide-right {
- border-right: solid 1px #404040; }
-
-.wy-menu-horiz a {
- height: 32px;
- display: inline-block;
- line-height: 32px;
- padding: 0 16px; }
-
-.wy-menu-vertical {
- width: 300px; }
-
-.wy-menu-vertical header, .wy-menu-vertical p.caption {
- height: 32px;
- display: inline-block;
- line-height: 32px;
- padding: 0 36px;
- margin-bottom: 0;
- display: block;
- font-weight: 600;
- text-transform: uppercase;
- font-size: 80%;
- color: #6f6f6f;
- white-space: nowrap; }
-
-.wy-menu-vertical ul {
- margin-bottom: 0; }
-
-.wy-menu-vertical li.current a {
- color: gray;
- padding: 0.4045em 20px 0.4045em 50px; }
-
-.wy-menu-vertical li code, .wy-menu-vertical li .rst-content tt, .rst-content .wy-menu-vertical li tt {
- border: none;
- background: inherit;
- color: inherit;
- padding-left: 0;
- padding-right: 0; }
-
-.wy-menu-vertical li span.toctree-expand {
- display: block;
- float: left;
- margin-left: -1.2em;
- font-size: 0.8em;
- line-height: 1.6em;
- color: #4d4d4d; }
-
-.wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
- color: #404040;
- padding: 0.4045em 36px;
- font-weight: 600;
- position: relative;
- border: none; }
-
-.wy-menu-vertical li.on a:hover span.toctree-expand, .wy-menu-vertical li.current > a:hover span.toctree-expand {
- color: gray; }
-
-.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current > a span.toctree-expand {
- display: block;
- font-size: 0.8em;
- line-height: 1.6em;
- color: #333333; }
-
-.wy-menu-vertical li.toctree-l1.current li.toctree-l2 > ul, .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > ul {
- display: none; }
-
-.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current > ul, .wy-menu-vertical li.toctree-l2.current li.toctree-l3.current > ul {
- display: block; }
-
-.toctree-l2 {
- font-size: 14px; }
-
-.wy-menu-vertical li.toctree-l2.current > a {
- padding: 0.4045em 20px 0.4045em 52px;
- font-size: 14px; }
-
-.wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
- display: block;
- padding: 0.4045em 20px 0.4045em 72px; }
-
-.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand {
- color: gray; }
-
-.wy-menu-vertical li.toctree-l2 span.toctree-expand {
- color: #a3a3a3; }
-
-.wy-menu-vertical .current > a > span.toctree-expand:before {
- margin-left: -2px; }
-
-.toctree-expand:before {
- margin-top: 1px; }
-
-.wy-menu-vertical li.toctree-l3 {
- font-size: 14px; }
-
-.wy-menu-vertical li.toctree-l3.current > a {
- padding: 0.4045em 20px 0.4045em 72px; }
-
-.wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
- display: block;
- padding: 0.4045em 20px 0.4045em 92px;
- border-top: none;
- border-bottom: none; }
-
-.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand {
- color: gray; }
-
-.wy-menu-vertical li.toctree-l3 span.toctree-expand {
- color: #969696; }
-
-.wy-menu-vertical li.toctree-l4 {
- font-size: 14px; }
-
-.wy-menu-vertical li.current ul {
- display: block; }
-
-.wy-menu-vertical li ul {
- margin-bottom: 0;
- display: none; }
-
-.wy-menu-vertical .local-toc li ul {
- display: block; }
-
-.wy-menu-vertical li ul li a {
- margin-bottom: 0;
- color: #b3b3b3;
- font-weight: normal; }
-
-.wy-menu-vertical a {
- display: inline-block;
- padding: 0.4045em 36px;
- display: block;
- position: relative;
- color: #b3b3b3; }
-
-.wy-menu-vertical a:hover {
- cursor: pointer; }
-
-.wy-menu-vertical a:hover span.toctree-expand {
- color: #b3b3b3; }
-
-.wy-menu-vertical a:active {
- cursor: pointer; }
-
-.wy-side-nav-search {
- display: block;
- width: 100%;
- z-index: 200;
- text-align: center;
- display: block;
- color: #fcfcfc; }
-
-.wy-side-nav-search input[type=text] {
- width: 100%;
- border-radius: 5px;
- padding: 6px 33px 6px 12px;
- border: none;
- height: auto;
- font-size: 14px;
- box-shadow: none; }
-
-.wy-side-nav-search img {
- display: block;
- margin: auto auto 0.809em auto;
- height: 45px;
- width: 45px;
- background-color: #2980B9;
- padding: 5px;
- border-radius: 100%; }
-
-.wy-side-nav-search > a, .wy-side-nav-search .wy-dropdown > a {
- color: black;
- width: 75%;
- font-size: 100%;
- font-weight: 600;
- display: inline-block;
- padding: 4px 6px;
- margin-bottom: 0.809em; }
-
-.wy-side-nav-search > a:hover, .wy-side-nav-search .wy-dropdown > a:hover {
- background: rgba(255, 255, 255, 0.1); }
-
-.wy-side-nav-search > a img.logo, .wy-side-nav-search .wy-dropdown > a img.logo {
- display: block;
- margin: 0;
- height: 100%;
- width: 100%;
- border-radius: 0;
- background: transparent; }
-
-.wy-side-nav-search > a.icon img.logo, .wy-side-nav-search .wy-dropdown > a.icon img.logo {
- margin: 0; }
-
-.wy-side-nav-search > div.version {
- margin-top: -0.4045em;
- margin-bottom: 0.809em;
- font-weight: normal;
- color: rgba(255, 255, 255, 0.3); }
-
-.wy-nav .wy-menu-vertical header {
- color: #2980B9; }
-
-.wy-nav .wy-menu-vertical a {
- color: #b3b3b3; }
-
-.wy-nav .wy-menu-vertical a:hover {
- background-color: #2980B9;
- color: #fff; }
-
-[data-menu-wrap] {
- -webkit-transition: all 0.2s ease-in;
- -moz-transition: all 0.2s ease-in;
- transition: all 0.2s ease-in;
- position: absolute;
- opacity: 1;
- width: 100%;
- opacity: 0; }
-
-[data-menu-wrap].move-center {
- left: 0;
- right: auto;
- opacity: 1; }
-
-[data-menu-wrap].move-left {
- right: auto;
- left: -100%;
- opacity: 0; }
-
-[data-menu-wrap].move-right {
- right: -100%;
- left: auto;
- opacity: 0; }
-
-.wy-grid-for-nav {
- position: absolute;
- width: 100%;
- height: 100%; }
-
-.wy-nav-side {
- position: fixed;
- bottom: 0;
- left: 0;
- top: 0;
- padding-bottom: 60px;
- width: 300px;
- overflow-x: hidden;
- overflow-y: hidden;
- min-height: 100%;
- background: #343131;
- z-index: 200; }
- @media (min-width: 768px) {
- .wy-nav-side {
- top: 0px;
- min-height: calc(100% - 60px); } }
-
-.wy-side-scroll {
- width: 320px;
- position: relative;
- overflow-x: hidden;
- overflow-y: scroll;
- padding-bottom: 10px;
- height: 100%; }
-
-.wy-nav-top {
- display: none;
- background: #2980B9;
- color: #fff;
- padding: 0.4045em 0.809em;
- position: relative;
- line-height: 50px;
- text-align: center;
- font-size: 100%;
- *zoom: 1; }
-
-.wy-nav-top:before, .wy-nav-top:after {
- display: table;
- content: ""; }
-
-.wy-nav-top:after {
- clear: both; }
-
-.wy-nav-top a {
- color: #fff;
- font-weight: 600; }
-
-.wy-nav-top img {
- margin-right: 12px;
- height: 45px;
- width: 45px;
- background-color: #2980B9;
- padding: 5px;
- border-radius: 100%; }
-
-.wy-nav-top i {
- font-size: 30px;
- float: left;
- cursor: pointer;
- padding-top: inherit; }
-
-.wy-nav-content-wrap {
- margin-left: 300px;
- min-height: 100%; }
-
-.wy-nav-content {
- padding: 24px 46px 18px;
- height: 100%;
- max-width: 850px; }
-
-.wy-body-mask {
- position: fixed;
- width: 100%;
- height: 100%;
- background: rgba(0, 0, 0, 0.2);
- display: none;
- z-index: 499; }
-
-.wy-body-mask.on {
- display: block; }
-
-footer {
- color: gray; }
-
-footer p {
- margin-bottom: 12px; }
-
-footer span.commit code, footer span.commit .rst-content tt, .rst-content footer span.commit tt {
- padding: 0px;
- font-family: "Operator mono", "Hack", "Menlo", Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
- font-size: 1em;
- background: none;
- border: none;
- color: gray; }
-
-.rst-footer-buttons {
- *zoom: 1; }
-
-.rst-footer-buttons:before, .rst-footer-buttons:after {
- width: 100%; }
-
-.rst-footer-buttons:before, .rst-footer-buttons:after {
- display: table;
- content: ""; }
-
-.rst-footer-buttons:after {
- clear: both; }
-
-.rst-breadcrumbs-buttons {
- margin-top: 12px;
- *zoom: 1; }
-
-.rst-breadcrumbs-buttons:before, .rst-breadcrumbs-buttons:after {
- display: table;
- content: ""; }
-
-.rst-breadcrumbs-buttons:after {
- clear: both; }
-
-#search-results .search li {
- margin-bottom: 24px;
- border-bottom: solid 1px #e1e4e5;
- padding-bottom: 24px; }
-
-#search-results .search li:first-child {
- border-top: solid 1px #e1e4e5;
- padding-top: 24px; }
-
-#search-results .search li a {
- font-size: 120%;
- margin-bottom: 12px;
- display: inline-block; }
-
-#search-results .context {
- color: gray;
- font-size: 90%; }
-
-@media screen and (max-width: 768px) {
- .wy-body-for-nav {
- background: #fcfcfc; }
- .wy-nav-top {
- display: block; }
- .wy-nav-side {
- left: -300px; }
- .wy-nav-side.shift {
- width: 85%;
- left: 0; }
- .wy-side-scroll {
- width: auto; }
- .wy-side-nav-search {
- width: auto; }
- .wy-menu.wy-menu-vertical {
- width: auto; }
- .wy-nav-content-wrap {
- margin-left: 0; }
- .wy-nav-content-wrap .wy-nav-content {
- padding: 26px 20px; }
- .wy-nav-content-wrap.shift {
- position: fixed;
- min-width: 100%;
- left: 85%;
- height: 100%;
- overflow: hidden;
- top: 0; } }
- @media screen and (max-width: 768px) and (min-width: 768px) {
- .wy-nav-content-wrap.shift {
- top: 60px; } }
-
-@media screen and (min-width: 1400px) {
- .wy-nav-content {
- margin: 0; } }
-
-@media print {
- .rst-versions, footer, .wy-nav-side {
- display: none; }
- .wy-nav-content-wrap {
- margin-left: 0; } }
-
-.rst-versions {
- position: fixed;
- bottom: 0;
- left: 0;
- width: 300px;
- color: black;
- background: white;
- z-index: 400;
- border-top-left-radius: 5px;
- border-top-right-radius: 5px;
- box-shadow: rgba(0, 0, 0, 0.25) 0px 2px 4px; }
-
-.rst-versions dt {
- color: black;
- font-weight: 600; }
-
-.rst-versions a {
- color: #2980B9;
- text-decoration: none; }
-
-.rst-versions .rst-badge-small {
- display: none; }
-
-.rst-versions .rst-current-version {
- padding: 20px;
- display: flex;
- text-align: right;
- font-size: 90%;
- cursor: pointer;
- color: black;
- *zoom: 1;
- justify-content: space-between;
- z-index: 999;
- position: relative; }
- .rst-versions .rst-current-version .fa {
- font-size: 12px; }
-
-.rst-versions .rst-current-version .fa, .rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand, .wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand, .rst-versions .rst-current-version .rst-content .admonition-title, .rst-content .rst-versions .rst-current-version .admonition-title, .rst-versions .rst-current-version .rst-content h1 .headerlink, .rst-content h1 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content h2 .headerlink, .rst-content h2 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content h3 .headerlink, .rst-content h3 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content h4 .headerlink, .rst-content h4 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content h5 .headerlink, .rst-content h5 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content h6 .headerlink, .rst-content h6 .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content dl dt .headerlink, .rst-content dl dt .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content p.caption .headerlink, .rst-content p.caption .rst-versions .rst-current-version .headerlink, .rst-versions .rst-current-version .rst-content tt.download span:first-child, .rst-content tt.download .rst-versions .rst-current-version span:first-child, .rst-versions .rst-current-version .rst-content code.download span:first-child, .rst-content code.download .rst-versions .rst-current-version span:first-child, .rst-versions .rst-current-version .icon {
- color: black; }
-
-.rst-versions .rst-current-version.rst-out-of-date {
- background-color: #E74C3C;
- color: #fff; }
-
-.rst-versions .rst-current-version.rst-active-old-version {
- background-color: #F1C40F;
- color: #000; }
-
-.rst-versions.shift-up .rst-other-versions {
- display: block; }
-
-.rst-versions .rst-other-versions {
- font-size: 14px;
- color: gray;
- display: none;
- font: 0/0 a;
- padding: 0 !important; }
- .rst-versions .rst-other-versions .rst-other-versions {
- padding: 20px !important;
- margin-bottom: 40px; }
- .rst-versions .rst-other-versions .rst-current-version {
- display: none !important; }
- .rst-versions .rst-other-versions dl {
- font-family: "Nunito", sans-serif;
- font-size: 14px;
- line-height: 20px; }
- .rst-versions .rst-other-versions hr {
- display: none; }
-
-.rst-versions .rst-other-versions dd {
- display: inline-block;
- margin: 0; }
-
-.rst-versions .rst-other-versions dt + dd {
- margin-left: -6px; }
-
-.rst-versions .rst-other-versions dt + strong {
- margin-left: -6px; }
-
-.rst-versions .rst-other-versions dd a {
- display: inline-block;
- padding: 6px;
- color: black; }
-
-@media screen and (max-width: 768px) {
- .rst-versions {
- width: 85%;
- display: none; }
- .rst-versions.shift {
- display: block; } }
-
-.rst-content img {
- max-width: 100%;
- height: auto !important; }
-
-.rst-content div.figure {
- margin-bottom: 24px; }
-
-.rst-content div.figure p.caption {
- font-style: italic; }
-
-.rst-content div.figure.align-center {
- text-align: center; }
-
-.rst-content .section > img, .rst-content .section > a > img {
- margin-bottom: 24px; }
-
-.rst-content blockquote {
- padding-left: 20px;
- line-height: 24px;
- margin-bottom: 24px;
- border-left: 4px solid #E1E4E5; }
-
-.rst-content .note .last, .rst-content .attention .last, .rst-content .caution .last, .rst-content .danger .last, .rst-content .error .last, .rst-content .hint .last, .rst-content .important .last, .rst-content .tip .last, .rst-content .warning .last, .rst-content .seealso .last, .rst-content .admonition-todo .last, .rst-content .admonition .last {
- margin-bottom: 0; }
-
-.rst-content .admonition-title:before {
- margin-right: 4px; }
-
-.rst-content .admonition table {
- border-color: rgba(0, 0, 0, 0.1); }
-
-.rst-content .admonition table td, .rst-content .admonition table th {
- background: transparent !important;
- border-color: rgba(0, 0, 0, 0.1) !important; }
-
-.rst-content .section ol.loweralpha, .rst-content .section ol.loweralpha li {
- list-style: lower-alpha; }
-
-.rst-content .section ol.upperalpha, .rst-content .section ol.upperalpha li {
- list-style: upper-alpha; }
-
-.rst-content .section ol p, .rst-content .section ul p {
- margin-bottom: 12px; }
-
-.rst-content .line-block {
- margin-left: 24px; }
-
-.rst-content .topic-title {
- font-weight: 600;
- margin-bottom: 12px; }
-
-.rst-content .toc-backref {
- color: #404040; }
-
-.rst-content .align-right {
- float: right;
- margin: 0px 0px 24px 24px; }
-
-.rst-content .align-left {
- float: left;
- margin: 0px 24px 24px 0px; }
-
-.rst-content .align-center {
- margin: auto;
- display: block; }
-
-.rst-content h1 .headerlink, .rst-content h2 .headerlink, .rst-content .toctree-wrapper p.caption .headerlink, .rst-content h3 .headerlink, .rst-content h4 .headerlink, .rst-content h5 .headerlink, .rst-content h6 .headerlink, .rst-content dl dt .headerlink, .rst-content p.caption .headerlink {
- display: none;
- visibility: hidden;
- font-size: 14px; }
-
-.rst-content h1 .headerlink:after, .rst-content h2 .headerlink:after, .rst-content .toctree-wrapper p.caption .headerlink:after, .rst-content h3 .headerlink:after, .rst-content h4 .headerlink:after, .rst-content h5 .headerlink:after, .rst-content h6 .headerlink:after, .rst-content dl dt .headerlink:after, .rst-content p.caption .headerlink:after {
- visibility: visible;
- content: "";
- font-family: FontAwesome;
- display: inline-block; }
-
-.rst-content h1:hover .headerlink, .rst-content h2:hover .headerlink, .rst-content .toctree-wrapper p.caption:hover .headerlink, .rst-content h3:hover .headerlink, .rst-content h4:hover .headerlink, .rst-content h5:hover .headerlink, .rst-content h6:hover .headerlink, .rst-content dl dt:hover .headerlink, .rst-content p.caption:hover .headerlink {
- display: inline-block; }
-
-.rst-content .centered {
- text-align: center; }
-
-.rst-content .sidebar {
- float: right;
- width: 40%;
- display: block;
- margin: 0 0 24px 24px;
- padding: 24px;
- background: #f3f6f6;
- border: solid 1px #e1e4e5; }
-
-.rst-content .sidebar p, .rst-content .sidebar ul, .rst-content .sidebar dl {
- font-size: 90%; }
-
-.rst-content .sidebar .last {
- margin-bottom: 0; }
-
-.rst-content .sidebar .sidebar-title {
- display: block;
- font-family: "Nunito", Arial, sans-serif;
- font-weight: 600;
- background: #e1e4e5;
- padding: 6px 12px;
- margin: -24px;
- margin-bottom: 24px;
- font-size: 100%; }
-
-.rst-content .highlighted {
- background: #F1C40F;
- display: inline-block;
- font-weight: 600;
- padding: 0 6px; }
-
-.rst-content .footnote-reference, .rst-content .citation-reference {
- vertical-align: super;
- font-size: 90%; }
-
-.rst-content table.docutils.citation, .rst-content table.docutils.footnote {
- background: none;
- border: none;
- color: gray; }
-
-.rst-content table.docutils.citation td, .rst-content table.docutils.citation tr, .rst-content table.docutils.footnote td, .rst-content table.docutils.footnote tr {
- border: none;
- background-color: transparent !important;
- white-space: normal; }
-
-.rst-content table.docutils.citation td.label, .rst-content table.docutils.footnote td.label {
- padding-left: 0;
- padding-right: 0;
- vertical-align: top; }
-
-.rst-content table.docutils.citation tt, .rst-content table.docutils.citation code, .rst-content table.docutils.footnote tt, .rst-content table.docutils.footnote code {
- color: #555; }
-
-.rst-content table.field-list {
- border: none; }
-
-.rst-content table.field-list td {
- border: none; }
-
-.rst-content table.field-list td > strong {
- display: inline-block; }
-
-.rst-content table.field-list .field-name {
- padding-right: 10px;
- text-align: left;
- white-space: nowrap; }
-
-.rst-content table.field-list .field-body {
- text-align: left; }
-
-.rst-content tt, .rst-content tt, .rst-content code {
- color: #000;
- padding: 2px 5px; }
-
-.rst-content tt big, .rst-content tt em, .rst-content tt big, .rst-content code big, .rst-content tt em, .rst-content code em {
- font-size: 100% !important;
- line-height: normal; }
-
-.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
- color: #E74C3C; }
-
-.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref, .rst-content code.xref, a .rst-content tt, a .rst-content code {
- font-weight: 600;
- color: #404040; }
-
-.rst-content a tt, .rst-content a tt, .rst-content a code {
- color: #2980B9; }
-
-.rst-content dl {
- margin-bottom: 24px; }
-
-.rst-content dl dt {
- font-weight: 600; }
-
-.rst-content dl p, .rst-content dl table, .rst-content dl ul, .rst-content dl ol {
- margin-bottom: 12px !important; }
-
-.rst-content dl dd {
- margin: 0 0 12px 24px; }
-
-.rst-content dl:not(.docutils) {
- margin-bottom: 24px; }
-
-.rst-content dl:not(.docutils) dt {
- display: table;
- margin: 6px 0;
- font-size: 90%;
- line-height: normal;
- background: #e7f2fa;
- color: #2980B9;
- border-top: solid 3px #6ab0de;
- padding: 6px;
- position: relative; }
-
-.rst-content dl:not(.docutils) dt:before {
- color: #6ab0de; }
-
-.rst-content dl:not(.docutils) dt .headerlink {
- color: #404040;
- font-size: 100% !important; }
-
-.rst-content dl:not(.docutils) dl dt {
- margin-bottom: 6px;
- border: none;
- border-left: solid 3px #cccccc;
- background: #f0f0f0;
- color: #555; }
-
-.rst-content dl:not(.docutils) dl dt .headerlink {
- color: #404040;
- font-size: 100% !important; }
-
-.rst-content dl:not(.docutils) dt:first-child {
- margin-top: 0; }
-
-.rst-content dl:not(.docutils) tt, .rst-content dl:not(.docutils) tt, .rst-content dl:not(.docutils) code {
- font-weight: 600; }
-
-.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname {
- background-color: transparent;
- border: none;
- padding: 0;
- font-size: 100% !important; }
-
-.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname {
- font-weight: 600; }
-
-.rst-content dl:not(.docutils) .optional {
- display: inline-block;
- padding: 0 4px;
- color: #000;
- font-weight: 600; }
-
-.rst-content dl:not(.docutils) .property {
- display: inline-block;
- padding-right: 8px; }
-
-.rst-content .viewcode-link, .rst-content .viewcode-back {
- display: inline-block;
- color: #27AE60;
- font-size: 80%;
- padding-left: 24px; }
-
-.rst-content .viewcode-back {
- display: block;
- float: right; }
-
-.rst-content p.rubric {
- margin-bottom: 12px;
- font-weight: 600; }
-
-.rst-content tt.download, .rst-content code.download {
- background: inherit;
- padding: inherit;
- font-weight: normal;
- font-family: inherit;
- font-size: inherit;
- color: inherit;
- border: inherit;
- white-space: inherit; }
-
-.rst-content tt.download span:first-child:before, .rst-content code.download span:first-child:before {
- margin-right: 4px; }
-
-.rst-content .guilabel {
- border: 1px solid #7fbbe3;
- background: #e7f2fa;
- font-size: 80%;
- font-weight: 700;
- border-radius: 4px;
- padding: 2.4px 6px;
- margin: auto 2px; }
-
-.rst-content .versionmodified {
- font-style: italic; }
-
-@media screen and (max-width: 480px) {
- .rst-content .sidebar {
- width: 100%; } }
-
-span[id*='MathJax-Span'] {
- color: #404040; }
-
-.math {
- text-align: center; }
-
-@media (min-width: 768px) {
- body {
- padding-top: 0px; } }
-
-.wy-side-nav-search {
- color: black; }
-
-.wy-side-nav-search > div.version {
- color: rgba(0, 0, 0, 0.3); }
-
-.wy-nav-side,
-.wy-side-nav-search {
- background-color: #f5f7f9; }
-
-.wy-nav-content-wrap {
- background-color: white; }
-
-.wy-menu-vertical li a,
-.wy-menu-vertical li ul li a,
-.wy-menu-vertical li.current a {
- color: black; }
-
-.header {
- display: flex;
- background-color: white;
- width: 100%;
- border-bottom: 1px solid #e1e4e5;
- z-index: 999;
- height: 60px;
- padding: 0 20px;
- flex-direction: row;
- align-items: center;
- transform: translateZ(0);
- left: 0;
- top: 0;
- position: absolute; }
- @media (min-width: 768px) {
- .header {
- position: fixed; } }
- .header .logo-link {
- display: block;
- margin-left: auto; }
- @media (max-width: 767px) {
- .header .logo-link {
- display: none; } }
- .header .fa-bars {
- cursor: pointer;
- color: black;
- display: none;
- font-size: 30px;
- margin-right: 20px; }
- @media (max-width: 767px) {
- .header .fa-bars {
- display: block; } }
-
-.header-title-wrap {
- width: 280px; }
-
-.header-title {
- font-size: 20px; }
- .header-title, .header-title:hover, .header-title:visited, .header-title:focus {
- color: black;
- text-decoration: none; }
-
-.logo {
- height: 16px; }
-
-.wy-breadcrumbs {
- font-size: 16px;
- font-weight: 600;
- min-height: 30px;
- margin-bottom: 20px; }
- .wy-breadcrumbs li a {
- padding: 0; }
-
-.wy-side-nav-search {
- height: auto;
- border-bottom: 1px solid #e1e4e5;
- padding-left: 20px;
- padding-right: 20px;
- display: block;
- align-items: flex-start;
- margin-bottom: 20px; }
- @media (max-width: 767px) {
- .wy-side-nav-search {
- height: 60px; } }
- .wy-side-nav-search .search-form,
- .wy-side-nav-search .search-form form {
- width: 100%;
- position: relative; }
-
-.search-btn {
- -webkit-appearance: none;
- -moz-appearance: none;
- appearance: none;
- background: transparent;
- border: none;
- width: auto;
- height: auto;
- padding: 0;
- margin: 0;
- position: absolute;
- right: 10px;
- top: 4px; }
-
-.rst-footer-buttons {
- margin-top: 50px; }
-
-footer > hr {
- margin-bottom: 14px; }
-
-.footer-info {
- color: #8392a4;
- display: flex;
- flex-direction: column; }
- @media (min-width: 768px) {
- .footer-info {
- flex-direction: row;
- justify-content: space-between; } }
- .footer-info p {
- font-size: 12px; }
- @media (min-width: 768px) {
- .footer-info p {
- margin-bottom: 0px; } }
- .footer-info a {
- color: #8392a4 !important;
- text-decoration: underline; }
-
-.divio-cloud {
- margin: 20px 20px 10px;
- border: 1px solid #e0e0e0;
- background: white;
- border-radius: 5px;
- padding: 20px 16px; }
- @media (max-width: 767px) {
- .divio-cloud {
- display: none; } }
-
-.divio-cloud-caption {
- font-size: 12px;
- text-transform: uppercase;
- line-height: 16px;
- margin-bottom: 16px;
- color: rgba(0, 0, 0, 0.5);
- display: block; }
-
-.divio-cloud-heading {
- font-size: 22px;
- font-weight: normal;
- line-height: 26px;
- padding-right: 10px;
- margin-bottom: 18px; }
-
-.divio-cloud-features {
- padding-left: 15px; }
-
-.divio-cloud-features li {
- margin-bottom: 8px;
- font-size: 14px;
- line-height: 18px;
- position: relative; }
-
-.divio-cloud-features li:before {
- content: "";
- width: 7px;
- height: 7px;
- background: #0bf;
- border-radius: 7px;
- display: block;
- position: absolute;
- left: -14px;
- top: 5px; }
-
-.divio-cloud-btn {
- display: block;
- text-align: center;
- height: 36px;
- line-height: 36px;
- color: white;
- background: #0bf;
- padding: 0 !important;
- border-radius: 5px;
- margin-top: 20px;
- font-size: 14px; }
- .divio-cloud-btn:visited {
- background-color: #0bf !important; }
-
-.wy-menu.wy-menu-vertical ~ div {
- display: none !important; }
-
-.rst-content h1 code,
-.rst-content h2 code,
-.rst-content h3 code,
-.rst-content h4 code,
-.rst-content h5 code,
-.rst-content h6 code {
- border: none;
- padding-left: 0px;
- padding-right: 0px; }
-
-.tabs__nav {
- display: block; }
-
-.tabs__link {
- display: inline-block;
- padding: 10px 15px;
- border-bottom: 2px solid transparent;
- color: black !important; }
- .tabs__link:hover, .tabs__link:visited, .tabs__link:active {
- color: black !important; }
-
-.tabs__link--active {
- color: #0bf !important;
- font-weight: bold;
- border-bottom: 2px solid #0bf !important; }
- .tabs__link--active:hover, .tabs__link--active:visited, .tabs__link--active:active {
- color: #0bf !important; }
-
-.tabs__content {
- padding-top: 20px; }
-
-.tabs-pane {
- display: none; }
-
-.rst-content .sidebar {
- background: #f5f7f9;
-}
-
-.rst-content .sidebar .sidebar-title {
- background: #057eb6;
- color: #fff;
-}
-
-.rst-content .sidebar .sidebar-subtitle {
- font-weight: bold;
-}
\ No newline at end of file
diff --git a/docs/_static/images/BAAL-horizontal-logo-black.png b/docs/_static/images/BAAL-horizontal-logo-black.png
new file mode 100644
index 00000000..6e4a9520
Binary files /dev/null and b/docs/_static/images/BAAL-horizontal-logo-black.png differ
diff --git a/docs/_static/images/logo-horizontal-transparent.png b/docs/_static/images/logo-horizontal-transparent.png
new file mode 100644
index 00000000..4f5f7bd0
Binary files /dev/null and b/docs/_static/images/logo-horizontal-transparent.png differ
diff --git a/docs/_static/images/logo-transparent.png b/docs/_static/images/logo-transparent.png
index b3773368..9400f543 100644
Binary files a/docs/_static/images/logo-transparent.png and b/docs/_static/images/logo-transparent.png differ
diff --git a/docs/_static/images/logo-vertical.png b/docs/_static/images/logo-vertical.png
new file mode 100644
index 00000000..36800051
Binary files /dev/null and b/docs/_static/images/logo-vertical.png differ
diff --git a/docs/_static/images/logo-with-bg-solid.png b/docs/_static/images/logo-with-bg-solid.png
new file mode 100644
index 00000000..abe5389f
Binary files /dev/null and b/docs/_static/images/logo-with-bg-solid.png differ
diff --git a/docs/_static/images/logo-with-bg.jpg b/docs/_static/images/logo-with-bg.jpg
new file mode 100644
index 00000000..1c2a9d81
Binary files /dev/null and b/docs/_static/images/logo-with-bg.jpg differ
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
deleted file mode 100644
index 462ed4c0..00000000
--- a/docs/_templates/layout.html
+++ /dev/null
@@ -1,6 +0,0 @@
-{% extends "!layout.html" %}
-
-{%- block extrahead %}
-
-
-{% endblock %}
\ No newline at end of file
diff --git a/docs/api/bayesian.md b/docs/api/bayesian.md
index 73441544..e0e7902e 100644
--- a/docs/api/bayesian.md
+++ b/docs/api/bayesian.md
@@ -22,11 +22,10 @@ model = MCDropoutConnectModule(model, layers=["Linear"], weight_dropout=0.5)
## API
-```eval_rst
-.. autoclass:: baal.bayesian.dropout.MCDropoutModule
- :members: __init__
+### baal.bayesian.dropout.MCDropoutModule
-..autoclass:: baal.bayesian.weight_drop.MCDropoutConnectModule
- :members: __init__
+::: baal.bayesian.dropout.MCDropoutModule
-```
\ No newline at end of file
+### baal.bayesian.weight_drop.MCDropoutConnectModule
+
+::: baal.bayesian.weight_drop.MCDropoutConnectModule
\ No newline at end of file
diff --git a/docs/api/calibration.md b/docs/api/calibration.md
index 6501afa3..5ba08511 100644
--- a/docs/api/calibration.md
+++ b/docs/api/calibration.md
@@ -1,6 +1,5 @@
# Calibration Wrapper
-```eval_rst
-.. autoclass:: baal.calibration.DirichletCalibrator
- :members:
-```
+### baal.calibration.DirichletCalibrator
+
+::: baal.calibration.DirichletCalibrator
diff --git a/docs/api/compatibility/huggingface.md b/docs/api/compatibility/huggingface.md
index 3d1e6b6b..757667f6 100644
--- a/docs/api/compatibility/huggingface.md
+++ b/docs/api/compatibility/huggingface.md
@@ -1,10 +1,7 @@
## HuggingFace Compatibility
- ```eval_rst
-.. autoclass:: baal.transformers_trainer_wrapper.BaalTransformersTrainer
- :members: predict_on_dataset, predict_on_dataset_generator
+**baal.transformers_trainer_wrapper.BaalTransformersTrainer**
+::: baal.transformers_trainer_wrapper.BaalTransformersTrainer
-.. autoclass:: baal.active.nlp_datasets.HuggingFaceDatasets
- :members:
-
-```
\ No newline at end of file
+**baal.active.dataset.nlp_datasets.HuggingFaceDatasets**
+::: baal.active.dataset.nlp_datasets.HuggingFaceDatasets
\ No newline at end of file
diff --git a/docs/api/compatibility/pytorch-lightning.md b/docs/api/compatibility/pytorch-lightning.md
index f1fffb4d..d9f5a27e 100644
--- a/docs/api/compatibility/pytorch-lightning.md
+++ b/docs/api/compatibility/pytorch-lightning.md
@@ -1,12 +1,10 @@
## Pytorch Lightning Compatibility
- ```eval_rst
-.. autoclass:: baal.utils.pytorch_lightning.ResetCallback
- :members: on_train_start
+**baal.utils.pytorch_lightning.ResetCallback**
+::: baal.utils.pytorch_lightning.ResetCallback
-.. autoclass:: baal.utils.pytorch_lightning.BaalTrainer
- :members: predict_on_dataset, predict_on_dataset_generator
+**baal.utils.pytorch_lightning.BaalTrainer**
+::: baal.utils.pytorch_lightning.BaalTrainer
-.. autoclass:: baal.utils.pytorch_lightning.BaaLDataModule
- :members: pool_dataloader
-```
\ No newline at end of file
+**baal.utils.pytorch_lightning.BaaLDataModule**
+::: baal.utils.pytorch_lightning.BaaLDataModule
\ No newline at end of file
diff --git a/docs/api/dataset_management.md b/docs/api/dataset_management.md
index 342be9bc..0f9c56e0 100644
--- a/docs/api/dataset_management.md
+++ b/docs/api/dataset_management.md
@@ -36,13 +36,11 @@ assert al_dataset.pool.transform is None
### API
-```eval_rst
-.. autoclass:: baal.active.ActiveLearningDataset
- :members:
+### baal.active.ActiveLearningDataset
+::: baal.active.ActiveLearningDataset
-.. autoclass:: baal.active.ActiveLearningLoop
- :members:
+### baal.active.ActiveLearningLoop
+::: baal.active.ActiveLearningLoop
-.. autoclass:: baal.active.FileDataset
- :members:
-```
\ No newline at end of file
+### baal.active.FileDataset
+::: baal.active.FileDataset
\ No newline at end of file
diff --git a/docs/api/heuristics.md b/docs/api/heuristics.md
index 635847cb..8172e239 100644
--- a/docs/api/heuristics.md
+++ b/docs/api/heuristics.md
@@ -33,13 +33,14 @@ BALD(reduction="mean")
### API
-```eval_rst
-.. autoclass:: baal.active.heuristics.AbstractHeuristic
- :members:
+### baal.active.heuristics.AbstractHeuristic
+::: baal.active.heuristics.AbstractHeuristic
-.. autoclass:: baal.active.heuristics.BALD
+### baal.active.heuristics.BALD
+::: baal.active.heuristics.BALD
-.. autoclass:: baal.active.heuristics.Random
+### baal.active.heuristics.Random
+::: baal.active.heuristics.Random
-.. autoclass:: baal.active.heuristics.Entropy
-```
\ No newline at end of file
+### baal.active.heuristics.Entropy
+::: baal.active.heuristics.Entropy
\ No newline at end of file
diff --git a/docs/api/index.md b/docs/api/index.md
index 3d28b609..1c5464d5 100644
--- a/docs/api/index.md
+++ b/docs/api/index.md
@@ -1,25 +1,18 @@
# API Reference
-```eval_rst
-.. toctree::
- :caption: API Definition
- :maxdepth: 1
-
- baal.modelwrapper.ModelWrapper <./modelwrapper>
- baal.bayesian <./bayesian>
- baal.active <./dataset_management>
- baal.active.heuristics <./heuristics>
- baal.calibration <./calibration>
- baal.utils <./utils>
-
-.. toctree::
- :caption: Compatibility
- :maxdepth: 1
-
- baal.utils.pytorch_lightning <./compatibility/pytorch-lightning>
- baal.transformers_trainer_wrapper <./compatibility/huggingface>
-
-```
+### :material-file-tree: API Definition
+
+* [baal.modelwrapper.ModelWrapper](./modelwrapper.md)
+* [baal.bayesian](./bayesian.md)
+* [baal.active](./dataset_management.md)
+* [baal.active.heuristics](./heuristics.md)
+* [baal.calibration](./calibration.md)
+* [baal.utils](./utils.md)
+
+### :material-file-tree: Compatibility
+
+* [baal.utils.pytorch_lightning] (./compatibility/pytorch-lightning)
+* [baal.transformers_trainer_wrapper](./compatibility/huggingface)
diff --git a/docs/api/modelwrapper.md b/docs/api/modelwrapper.md
index e8ef4eeb..72811d32 100644
--- a/docs/api/modelwrapper.md
+++ b/docs/api/modelwrapper.md
@@ -32,7 +32,6 @@ predictions.shape
### API
-```eval_rst
-.. autoclass:: baal.ModelWrapper
- :members:
-```
\ No newline at end of file
+### baal.ModelWrapper
+
+::: baal.ModelWrapper
\ No newline at end of file
diff --git a/docs/api/utils.md b/docs/api/utils.md
index 0278e929..becc99a8 100644
--- a/docs/api/utils.md
+++ b/docs/api/utils.md
@@ -47,7 +47,6 @@ print(wrapper.active_learning_metrics)
"""
```
-```eval_rst
-.. automodule:: baal.utils.metrics
- :members:
-```
\ No newline at end of file
+### baal.utils.metrics
+
+::: baal.utils.metrics
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
deleted file mode 100644
index 14c88552..00000000
--- a/docs/conf.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Configuration file for the Sphinx documentation builder.
-#
-# This file does only contain a selection of the most common options. For a
-# full list see the documentation:
-# http://www.sphinx-doc.org/en/master/config
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-import os
-import pathlib
-import shutil
-import sys
-
-from recommonmark.transform import AutoStructify
-import sphinx_rtd_theme
-from recommonmark.parser import CommonMarkParser
-
-pjoin = os.path.join
-parent_dir = pathlib.Path(__file__).resolve().parents[1]
-sys.path.insert(0, os.path.abspath('./../'))
-
-shutil.rmtree('notebooks', ignore_errors=True)
-shutil.copytree(pjoin(parent_dir, 'notebooks'), 'notebooks')
-
-# -- Project information -----------------------------------------------------
-
-# Disable notebook execution
-nbsphinx_execute = 'never'
-
-project = 'baal'
-copyright = '2019, Parmida Atighehchian, Frédéric Branchaud-Charron, Jan Freyberg'
-author = 'Parmida Atighehchian, Frédéric Branchaud-Charron, Jan Freyberg'
-
-# The short X.Y version
-version = ''
-# The full version, including alpha/beta/rc tags
-release = '1.6.0'
-
-# -- General configuration ---------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.autosummary',
- 'sphinx.ext.doctest',
- 'sphinx.ext.todo',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.viewcode',
- 'sphinx_copybutton',
- "sphinx_automodapi.automodapi",
- "nbsphinx",
- "recommonmark",
- "numpydoc",
- "sphinx.ext.napoleon"
-]
-
-# We need to mock these packages to compile without deps.
-autodoc_mock_imports = ["PIL", "tqdm", "structlog", "torch", "torchvision", "numpy", "sklearn",
- "scipy", "baal.utils.cuda_utils", "transformers", "pytorch_lightning",
- "datasets"]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-
-
-source_parsers = {
- '.md': CommonMarkParser,
-}
-
-# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-# source_suffix = ['.rst', '.md', '.ipynb']
-
-# The master toctree document.
-master_doc = 'index'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.ipynb_checkpoints']
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = None
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-#
-html_theme = "sphinx_rtd_theme"
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
-html_logo = "_static/images/logo-transparent.png"
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#
-# html_theme_options = {}
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static', '_static/images']
-html_css_files = [
- 'css/default.css',
-]
-
-# Custom sidebar templates, must be a dictionary that maps document names
-# to template names.
-#
-# The default sidebars (for documents that don't match any pattern) are
-# defined by theme itself. Builtin themes are using these templates by
-# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
-# 'searchbox.html']``.
-#
-# html_sidebars = {}
-
-
-# -- Options for HTMLHelp output ---------------------------------------------
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'baaldoc'
-
-# -- Options for LaTeX output ------------------------------------------------
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- # 'papersize': 'letterpaper',
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- (
- master_doc,
- 'baal.tex',
- 'baal Documentation',
- 'Parmida Atighehchian, Frédéric Branchaud-Charron, Jan Freyberg',
- 'manual',
- )
-]
-
-# -- Options for manual page output ------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, 'baal', 'baal Documentation', [author], 1)]
-
-# -- Options for Texinfo output ----------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- (
- master_doc,
- 'baal',
- 'baal Documentation',
- author,
- 'baal',
- 'One line description of project.',
- 'Miscellaneous',
- )
-]
-
-# -- Options for Epub output -------------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = project
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#
-# epub_identifier = ''
-
-# A unique identification for the text.
-#
-# epub_uid = ''
-
-# A list of files that should not be packed into the epub file.
-epub_exclude_files = ['search.html']
-
-# -- Extension configuration -------------------------------------------------
-
-# -- Options for todo extension ----------------------------------------------
-
-# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = True
-
-
-# At the bottom of conf.py
-def setup(app):
- app.add_config_value('recommonmark_config', {
- 'enable_auto_toc_tree': True,
- 'enable_eval_rst': True,
- 'enable_math': True,
- 'enable_inline_math': True,
- 'auto_toc_tree_section': 'Contents',
- }, True)
- app.add_transform(AutoStructify)
diff --git a/docs/index.md b/docs/index.md
index 460d9e43..2c4c3ce6 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,14 +1,8 @@
-```eval_rst
-.. baal documentation master file, created by
- sphinx-quickstart on Thu Apr 4 14:15:25 2019.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-```
+
+
+
+
-# Welcome to the documentation for baal (**ba**yesian **a**ctive **l**earning)
-
-
-Star
Baal is a Bayesian active learning library.
We provide methods to estimate sampling from the posterior distribution
@@ -19,61 +13,23 @@ To know more on what is Bayesian active learning, see our [User guide](user_guid
We are a member of Pytorch's ecosystem, and we welcome contributions from the community.
If you have any question, we are reachable on [Slack](https://join.slack.com/t/baal-world/shared_invite/zt-z0izhn4y-Jt6Zu5dZaV2rsAS9sdISfg).
-## Support
-
-For support, we have several ways to help you:
-
-* Our [FAQ](faq.md)
-* Submit an issue on Github [here](https://github.com/ElementAI/baal/issues/new/choose)
-* Join our [Slack](https://join.slack.com/t/baal-world/shared_invite/zt-z0izhn4y-Jt6Zu5dZaV2rsAS9sdISfg)!
+## Installation
-```eval_rst
-.. toctree::
- :caption: Learn more about Baal
- :maxdepth: 1
+Baal is available as a package on PyPI:
- User guide
- Active learning dataset and training loop classes
- Methods for approximating bayesian posteriors
- API Index
- FAQ
+`pip install baal`
-.. toctree ::
- :caption: Tutorials
- :maxdepth: 1
+??? "Additional dependencies for vision and NLP"
- How to use Baal with Label Studio
- How to do research and plot progress
- How to use in production
- How to use deep ensembles
+ `baal[nlp]` installs needed dependencies for HuggingFace support.
-.. toctree ::
- :caption: Compatibility with other libraries
- :maxdepth: 1
-
- How to use with Pytorch Lightning
- How to use with HuggingFace
- How to use with Scikit-Learn
-
-.. toctree ::
- :caption: Technical Reports
- :maxdepth: 1
-
- Combining calibration and variational inference for active learning
- Double descend in active learning
- Can active learning mitigate bias in datasets
+ `baal[vision]` installs dependencies for our Lightning-Flash integration.
-.. toctree::
- :caption: Literature and support
- :maxdepth: 2
- Background literature
- Cheat Sheet
-```
-
-## Indices and tables
+## Support
+
+For support, we have several ways to help you:
-```eval_rst
-* :ref:`genindex`
-* :ref:`search`
-```
+* Our [:material-help: FAQ](support/faq.md)
+* Submit an issue on Github [here](https://github.com/baal-org/baal/issues/new/choose)
+* Join our [:material-slack: Slack](https://join.slack.com/t/baal-world/shared_invite/zt-z0izhn4y-Jt6Zu5dZaV2rsAS9sdISfg)!
diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js
new file mode 100644
index 00000000..06dbf38b
--- /dev/null
+++ b/docs/javascripts/mathjax.js
@@ -0,0 +1,16 @@
+window.MathJax = {
+ tex: {
+ inlineMath: [["\\(", "\\)"]],
+ displayMath: [["\\[", "\\]"]],
+ processEscapes: true,
+ processEnvironments: true
+ },
+ options: {
+ ignoreHtmlClass: ".*|",
+ processHtmlClass: "arithmatex"
+ }
+};
+
+document$.subscribe(() => {
+ MathJax.typesetPromise()
+})
diff --git a/docs/literature/index.md b/docs/literature/index.md
deleted file mode 100644
index 325468b8..00000000
--- a/docs/literature/index.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Active learning literature
-
-This page is here to collect summaries of papers that focus on active learning.
-The idea is to share knowledge on recent developments in active learning.
-
-If you've read a paper recently, write a little summary in markdown, put it in
-the folder `docs/literature` and make a pull request. You can even do all of
-that right in the github web UI!
-
-```eval_rst
-.. toctree::
- :caption: Literature review
- :maxdepth: 1
- :glob:
-
- *
-```
-
----
\ No newline at end of file
diff --git a/docs/literature/more_papers.md b/docs/literature/more_papers.md
deleted file mode 100644
index 0326ab1a..00000000
--- a/docs/literature/more_papers.md
+++ /dev/null
@@ -1,12 +0,0 @@
-## Additional papers that are interesting
-
-In this section, we put additional papers that can be interesting.
-
-```eval_rst
-.. toctree::
- :maxdepth: 1
- :caption: Additional papers
- :glob:
-
- Additional papers/*
-```
\ No newline at end of file
diff --git a/docs/notebooks b/docs/notebooks
new file mode 120000
index 00000000..8f9a5b2e
--- /dev/null
+++ b/docs/notebooks
@@ -0,0 +1 @@
+../notebooks
\ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 27ffc2fd..4152a7b0 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,13 +1,5 @@
-sphinx>2
-sphinx_rtd_theme
-asteroid-sphinx-theme
-jupyter_sphinx
-Pygments>=2.6.1
-nbsphinx==0.8.6
-sphinx_automodapi
-sphinx-copybutton
-numpydoc
-recommonmark
-docutils==0.16
-Jinja2==2.11.3
-markupsafe==2.0.1
+mkdocs==1.4.0
+mkdocs-exclude-search==0.6.4
+mkdocs-jupyter==0.21.0
+mkdocstrings[python]==0.18.1
+Pygments==2.13.0
\ No newline at end of file
diff --git a/docs/reports/dirichlet_calibration.md b/docs/research/dirichlet_calibration.md
similarity index 60%
rename from docs/reports/dirichlet_calibration.md
rename to docs/research/dirichlet_calibration.md
index 33a6df2d..ebafe12e 100644
--- a/docs/reports/dirichlet_calibration.md
+++ b/docs/research/dirichlet_calibration.md
@@ -4,15 +4,12 @@ A [paper recently published at NeurIPS 2019](https://dirichletcal.github.io/) pr
To achieve that, they add a new linear layer at the end of the network and train it individually on a held-out set.
-Here is a figure from the authors' NeurIPS 2019 presentation. You can find the full presention on the website above.
-
-```eval_rst
-.. figure:: images/dirichlet_calib.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-```
+Here is a figure from the authors' NeurIPS 2019 presentation. You can find the full presentation on the website above.
+
+
Our hypothesis is as follows: by modelling the uncertainty on an held-out set, we want to create a better estimation of the overall uncertainty.
@@ -24,15 +21,15 @@ Current SotA methods for active learning rely on VI to estimate the model uncert
## Methodology
-Our methodology follows a standard active learning pipeline, but we add a new training set :math:`D_{L}` which is used to train the calibration layer. After training the model on the training set :math:`D_{train}` to convergence, we train it on this held-out set and train the newly added layer.
+Our methodology follows a standard active learning pipeline, but we add a new training set $D_{L}$ which is used to train the calibration layer. After training the model on the training set $D_{train}$ to convergence, we train it on this held-out set and train the newly added layer.
-We call the augmented model :math:`M_{calib}`. We perform the sample selection using one of the following techniques:
+We call the augmented model $M_{calib}$. We perform the sample selection using one of the following techniques:
-* Entropy: :math:`\sum_c p_i \log(p_i)`
-* BALD using MC-Dropout: :math:`H[y \mid x, D_{L}] - E_{p(w \mid D_L)}(H[y \mid x, w])`
+* Entropy: $\sum_c p_i \log(p_i)$
+* BALD using MC-Dropout: $H[y \mid x, D_{L}] - E_{p(w \mid D_L)}(H[y \mid x, w])$
* Uniform random selection
-Because we want to analyze the actual gain of using calibration, we compare the effect of using :math:`M` versus :math:`M_{calib}` across all techniques.
+Because we want to analyze the actual gain of using calibration, we compare the effect of using $M$ versus $M_{calib}$ across all techniques.
## Experiments
@@ -42,70 +39,51 @@ We test our hypothesis on CIFAR10 using a VGG-16. We initially label 1000 sample
We first want to ensure that calibration works properly. In Fig. 2, we show that throughout the active learning procedure, the calibrated loss is better than the non-calibrated loss.
-```eval_rst
-.. figure:: images/CBALDvsBALD.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Comparison between the calibrated loss and the uncalibrated loss.
-```
+
+
Furthermore, we compute the ECE between both cases.
-```eval_rst
-.. figure:: images/CBALDvsBALDECE.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Comparison between ECE for both Calibrated BALD and BALD.
-```
+
### Impact of calibration on active learning
For each method, we present the calibrated NLL at each active learning step.
-We want to compare the selection process between :math:`M` and :math:`M_{calib}`.
+We want to compare the selection process between $M$ and $M_{calib}$.
Our reasoning is as follow. We want to see if the calibrated model would pick better items over the normal one.
-To do so we make two experiments, one where we use :math:`M` to select the new samples and the other uses :math:`M_{calib}`.
+To do so we make two experiments, one where we use $M$ to select the new samples and the other uses $M_{calib}$.
In both cases, we will get a calibrated model to compare the calibrated loss.
-```eval_rst
-.. figure:: images/BALDvsCBALD_active.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Comparison between a calibrated selector and an uncalibrated one using BALD.
-
-
-.. figure:: images/EntvsCEnt_active.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Comparison between a calibrated selector and an uncalibrated one using Entropy.
+
+
+
+
+
+
In addition, we show that BALD is still better in all cases.
-.. figure:: images/ALL_active.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Comparison between calibrated selectors.
-```
## Discussion
-While we have not seen improvments by using calibration on an active learning benchmark, we still find this report useful. Active learning is but a part of the Human-ai-interaction (HAII) process. By adding an easy to use calibration method, we can further the collaboration between the human and our model.
+While we have not seen improvements by using calibration on an active learning benchmark, we still find this report useful. Active learning is but a part of the Human-ai-interaction (HAII) process. By adding an easy to use calibration method, we can further the collaboration between the human and our model.
By giving more nuanced predictions, the model is deemed more trustable by the human annotator.
diff --git a/docs/reports/double_descend.md b/docs/research/double_descent.md
similarity index 81%
rename from docs/reports/double_descend.md
rename to docs/research/double_descent.md
index 129f3a66..7313a9f7 100644
--- a/docs/reports/double_descend.md
+++ b/docs/research/double_descent.md
@@ -40,47 +40,26 @@ We ran 4 categories of experiments:
Dataset: CIFAR10
Model: Vgg16 trained on imagenet
-```eval_rst
-.. figure:: images/doubledescend_03.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Using early stopping and reset the weights of the linear layers after each active learning step.
-```
-
-
-```eval_rst
-.. figure:: images/doubledescend_04.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Using early stopping and reset all the weights after each active learning step.
-```
-
-
-```eval_rst
-.. figure:: images/doubledescend_02.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Overfitting the training set and reset the weights of the linear layers after each active learning step.
-```
-
-```eval_rst
-.. figure:: images/doubledescend_01.png
- :width: 400px
- :height: 200px
- :alt: alternate text
- :align: center
-
- Overfitting the training set and reset all the weights after each active learning step.
-```
+
+
+
+
+
+
+
+
In the first two experiments, if we are using early stopping, the partial reset will provoke a double descent. A closer
look in the second diagram shows that although in the case of fully resetting the model weights, we can prevent the
@@ -95,12 +74,11 @@ with a negligible peak. Moreover, letting the model train well before performing
key to encourage smooth training, we show the difference between letting the model to train for 10 epochs vs 5 epochs
before adding samples to the labelled set.
-```eval_rst
-NOTE: In the case of not using early stopping, `p` is used to show the number of epochs we train the model before
-estimating uncertainties and increase the labelled set.
-All in all, not using early stopping and fully resetting the model weights i.e. the last graph, could certify a smooth
-training procedure without being worried about other elements such as weight decay.
-```
+!!! note
+ In the case of not using early stopping, `p` is used to show the number of epochs we train the model before
+ estimating uncertainties and increase the labelled set.
+ All in all, not using early stopping and fully resetting the model weights i.e. the last graph, could certify a smooth
+ training procedure without being worried about other elements such as weight decay.
### Our Hypothesis
diff --git a/docs/reports/images/ALL_active.png b/docs/research/images/ALL_active.png
similarity index 100%
rename from docs/reports/images/ALL_active.png
rename to docs/research/images/ALL_active.png
diff --git a/docs/reports/images/BALDvsCBALD_active.png b/docs/research/images/BALDvsCBALD_active.png
similarity index 100%
rename from docs/reports/images/BALDvsCBALD_active.png
rename to docs/research/images/BALDvsCBALD_active.png
diff --git a/docs/reports/images/CBALDvsBALD.png b/docs/research/images/CBALDvsBALD.png
similarity index 100%
rename from docs/reports/images/CBALDvsBALD.png
rename to docs/research/images/CBALDvsBALD.png
diff --git a/docs/reports/images/CBALDvsBALDECE.png b/docs/research/images/CBALDvsBALDECE.png
similarity index 100%
rename from docs/reports/images/CBALDvsBALDECE.png
rename to docs/research/images/CBALDvsBALDECE.png
diff --git a/docs/reports/images/EntvsCEnt_active.png b/docs/research/images/EntvsCEnt_active.png
similarity index 100%
rename from docs/reports/images/EntvsCEnt_active.png
rename to docs/research/images/EntvsCEnt_active.png
diff --git a/docs/reports/images/dirichlet_calib.png b/docs/research/images/dirichlet_calib.png
similarity index 100%
rename from docs/reports/images/dirichlet_calib.png
rename to docs/research/images/dirichlet_calib.png
diff --git a/docs/reports/images/doubledescend_01.png b/docs/research/images/doubledescend_01.png
similarity index 100%
rename from docs/reports/images/doubledescend_01.png
rename to docs/research/images/doubledescend_01.png
diff --git a/docs/reports/images/doubledescend_02.png b/docs/research/images/doubledescend_02.png
similarity index 100%
rename from docs/reports/images/doubledescend_02.png
rename to docs/research/images/doubledescend_02.png
diff --git a/docs/reports/images/doubledescend_03.png b/docs/research/images/doubledescend_03.png
similarity index 100%
rename from docs/reports/images/doubledescend_03.png
rename to docs/research/images/doubledescend_03.png
diff --git a/docs/reports/images/doubledescend_04.png b/docs/research/images/doubledescend_04.png
similarity index 100%
rename from docs/reports/images/doubledescend_04.png
rename to docs/research/images/doubledescend_04.png
diff --git a/docs/research/index.md b/docs/research/index.md
new file mode 100644
index 00000000..86598ed8
--- /dev/null
+++ b/docs/research/index.md
@@ -0,0 +1,12 @@
+# Bayesian deep active learning research
+
+Research in this field is quite dynamic with multiple labs around the world working on this problem.
+
+In a nutshell, we want to:
+
+> Optimize labelling by maximizing the information obtained after each label.
+
+Another critical goal of our research is to better understand the sampling bias active learning creates.
+Recent research has shown that active learning creates more balanced, fairer datasets.
+
+We strongly suggest to go through our [literature review](./literature/index.md).
diff --git a/docs/literature/Additional papers/dmi.md b/docs/research/literature/Additional papers/dmi.md
similarity index 100%
rename from docs/literature/Additional papers/dmi.md
rename to docs/research/literature/Additional papers/dmi.md
diff --git a/docs/literature/Additional papers/duq.md b/docs/research/literature/Additional papers/duq.md
similarity index 80%
rename from docs/literature/Additional papers/duq.md
rename to docs/research/literature/Additional papers/duq.md
index e7219528..206e118d 100644
--- a/docs/literature/Additional papers/duq.md
+++ b/docs/research/literature/Additional papers/duq.md
@@ -17,22 +17,22 @@ DUQ uses a RBF Network to compute centroids for each class. The model is trained
For a model f, a centroid matrix W and a centroid e, we compute the similarity using a RBF kernel. Theta is a hyper parameter.
-``$`K_c(f_\theta, e_c) = exp(-\frac{\frac{1}{n}\mid \mid W_cf_\theta(x) - e_c\mid\mid^2_2}{2\sigma^2})`$``
+$K_c(f_\theta, e_c) = exp(-\frac{\frac{1}{n}\mid \mid W_cf_\theta(x) - e_c\mid\mid^2_2}{2\sigma^2})$
with this similarity we can make a prediction by selecting the centroid with the highest similarity.
The loss function is now simply
-``$`L(x,y) = - \sum_c y_clog(K_c) + (1 - y_c)log(1-K_c)`$``,
+$L(x,y) = - \sum_c y_clog(K_c) + (1 - y_c)log(1-K_c)$,
-where ``$`K_c(f_\theta, e_c)=K_c`$``
+where $K_c(f_\theta, e_c)=K_c$
After each batch, we update the centroid matrix using an exponential moving average.
### Regularization
-To avoid feature collapse, the authors introduce a gradient penalty directly applied to ``$`K_c`$``:
-``$`\lambda* (\mid\mid \nabla_x \sum_c K_c\mid\mid^2_2 - 1)^2`$``
-where 1 is the Lipschitz constant. In their experiments, they use ``$`\lambda=0.05`$``.
+To avoid feature collapse, the authors introduce a gradient penalty directly applied to $K_c$:
+$\lambda* (\mid\mid \nabla_x \sum_c K_c\mid\mid^2_2 - 1)^2$
+where 1 is the Lipschitz constant. In their experiments, they use $\lambda=0.05$.
In summary, this simple technique is faster and better than ensembles. It also shows that RBF networks work on large datasets.
diff --git a/docs/literature/Additional papers/gyolov3.md b/docs/research/literature/Additional papers/gyolov3.md
similarity index 100%
rename from docs/literature/Additional papers/gyolov3.md
rename to docs/research/literature/Additional papers/gyolov3.md
diff --git a/docs/literature/Additional papers/lightcoresets.md b/docs/research/literature/Additional papers/lightcoresets.md
similarity index 62%
rename from docs/literature/Additional papers/lightcoresets.md
rename to docs/research/literature/Additional papers/lightcoresets.md
index a1108ef0..7d9b4d32 100644
--- a/docs/literature/Additional papers/lightcoresets.md
+++ b/docs/research/literature/Additional papers/lightcoresets.md
@@ -6,16 +6,16 @@
This paper presents a novel Coreset algorithm called *Light Coreset*.
-Let ``$`X`$`` be the dataset, ``$`d`$`` a distance function and ``$`\mu(X)`$`` the mean of the dataset per feature.
+Let $X$ be the dataset, $d$ a distance function and $\mu(X)$ the mean of the dataset per feature.
-We compute the distribution ``$`q`$``with:
+We compute the distribution $q$with:
-``$`q(x) = 0.5 * \frac{1}{\vert X \vert} + 0.5 * \frac{d(x, \mu(X))^2}{\sum_{x' \in X} d(x', \mu(X))^2}`$``,
-where ``$`x \in X`$``.
+$q(x) = 0.5 * \frac{1}{\vert X \vert} + 0.5 * \frac{d(x, \mu(X))^2}{\sum_{x' \in X} d(x', \mu(X))^2}$,
+where $x \in X$.
-We can then select ``$`m`$`` samples by sampling from this distribution. For their experiments, they used the L2 distance for *d*.
+We can then select $m$ samples by sampling from this distribution. For their experiments, they used the L2 distance for *d*.
-Let A be the first part of the equation ``$`q`$`` and B the second. The authors offers the following explanation :
+Let A be the first part of the equation $q$ and B the second. The authors offers the following explanation :
>The first component (A) is the uniform distribution and ensures
that all points are sampled with nonzero probability. The second
diff --git a/docs/literature/Additional papers/sparse_selection.md b/docs/research/literature/Additional papers/sparse_selection.md
similarity index 79%
rename from docs/literature/Additional papers/sparse_selection.md
rename to docs/research/literature/Additional papers/sparse_selection.md
index 0e029c58..91244022 100644
--- a/docs/literature/Additional papers/sparse_selection.md
+++ b/docs/research/literature/Additional papers/sparse_selection.md
@@ -11,16 +11,16 @@ Published at NeurIPS 2019
A known issue of BALD, when used in Batch Active Learning is that it selects highly correlated samples.
By combining BNNs with a novel coreset algorithm, the authors propose a way to estimate the true posterior data distribution.
-In brief, they want to select a batch ``$`D'`$`` such that the posterior distribution best approximate the complete data posterior.
+In brief, they want to select a batch $D'$ such that the posterior distribution best approximate the complete data posterior.
Because we do not know the complete posterior, the authors approximate it using the predictive distribution. The idea is summarized in Eq. 4.
![](../images/sparse_selection/eq4.png)
-This measure can be optimized using Frank-Wolfe which uses the dot-product ``$`\langle L_m, L_n\rangle`$`` to estimate the affectations.
+This measure can be optimized using Frank-Wolfe which uses the dot-product $\langle L_m, L_n\rangle$ to estimate the affectations.
-While a closed-form procedure exists to compute this dot-product, it is expensive to run (``$`O(||P||^2)`$``).
-The authors suggest the use of random projections drawn from the parameters distribution ``$`\hat\pi`$``.
-This approximation makes the algorithm ``$`O(||P||J)`$``, where J is the number of samples drawn from ``$`\hat\pi`$``.
+While a closed-form procedure exists to compute this dot-product, it is expensive to run ($O(||P||^2)$).
+The authors suggest the use of random projections drawn from the parameters distribution $\hat\pi$.
+This approximation makes the algorithm $O(||P||J)$, where J is the number of samples drawn from $\hat\pi$.
diff --git a/docs/literature/Additional papers/vaal.md b/docs/research/literature/Additional papers/vaal.md
similarity index 100%
rename from docs/literature/Additional papers/vaal.md
rename to docs/research/literature/Additional papers/vaal.md
diff --git a/docs/literature/images/Baalscheme.svg b/docs/research/literature/images/Baalscheme.svg
similarity index 100%
rename from docs/literature/images/Baalscheme.svg
rename to docs/research/literature/images/Baalscheme.svg
diff --git a/docs/literature/images/GYOLOV3/fig1.png b/docs/research/literature/images/GYOLOV3/fig1.png
similarity index 100%
rename from docs/literature/images/GYOLOV3/fig1.png
rename to docs/research/literature/images/GYOLOV3/fig1.png
diff --git a/docs/literature/images/GYOLOV3/fig2.png b/docs/research/literature/images/GYOLOV3/fig2.png
similarity index 100%
rename from docs/literature/images/GYOLOV3/fig2.png
rename to docs/research/literature/images/GYOLOV3/fig2.png
diff --git a/docs/literature/images/GYOLOV3/fig3.png b/docs/research/literature/images/GYOLOV3/fig3.png
similarity index 100%
rename from docs/literature/images/GYOLOV3/fig3.png
rename to docs/research/literature/images/GYOLOV3/fig3.png
diff --git a/docs/literature/images/dmi/fig3.png b/docs/research/literature/images/dmi/fig3.png
similarity index 100%
rename from docs/literature/images/dmi/fig3.png
rename to docs/research/literature/images/dmi/fig3.png
diff --git a/docs/literature/images/experiment_results/iterations_mcdc.png b/docs/research/literature/images/experiment_results/iterations_mcdc.png
similarity index 100%
rename from docs/literature/images/experiment_results/iterations_mcdc.png
rename to docs/research/literature/images/experiment_results/iterations_mcdc.png
diff --git a/docs/literature/images/lightcoreset/q_func.png b/docs/research/literature/images/lightcoreset/q_func.png
similarity index 100%
rename from docs/literature/images/lightcoreset/q_func.png
rename to docs/research/literature/images/lightcoreset/q_func.png
diff --git a/docs/literature/images/logo_original.png b/docs/research/literature/images/logo_original.png
similarity index 100%
rename from docs/literature/images/logo_original.png
rename to docs/research/literature/images/logo_original.png
diff --git a/docs/literature/images/repo_logo_25.jpg b/docs/research/literature/images/repo_logo_25.jpg
similarity index 100%
rename from docs/literature/images/repo_logo_25.jpg
rename to docs/research/literature/images/repo_logo_25.jpg
diff --git a/docs/literature/images/repo_logo_25_no_corner.svg b/docs/research/literature/images/repo_logo_25_no_corner.svg
similarity index 100%
rename from docs/literature/images/repo_logo_25_no_corner.svg
rename to docs/research/literature/images/repo_logo_25_no_corner.svg
diff --git a/docs/literature/images/sparse_selection/eq4.png b/docs/research/literature/images/sparse_selection/eq4.png
similarity index 100%
rename from docs/literature/images/sparse_selection/eq4.png
rename to docs/research/literature/images/sparse_selection/eq4.png
diff --git a/docs/literature/images/sparse_selection/fig4.png b/docs/research/literature/images/sparse_selection/fig4.png
similarity index 100%
rename from docs/literature/images/sparse_selection/fig4.png
rename to docs/research/literature/images/sparse_selection/fig4.png
diff --git a/docs/literature/images/vaal/fig1.png b/docs/research/literature/images/vaal/fig1.png
similarity index 100%
rename from docs/literature/images/vaal/fig1.png
rename to docs/research/literature/images/vaal/fig1.png
diff --git a/docs/literature/images/vaal/fig2.png b/docs/research/literature/images/vaal/fig2.png
similarity index 100%
rename from docs/literature/images/vaal/fig2.png
rename to docs/research/literature/images/vaal/fig2.png
diff --git a/docs/literature/core-papers.md b/docs/research/literature/index.md
similarity index 85%
rename from docs/literature/core-papers.md
rename to docs/research/literature/index.md
index 0606e5fd..02f7dc58 100644
--- a/docs/literature/core-papers.md
+++ b/docs/research/literature/index.md
@@ -1,39 +1,48 @@
-# The theory behind Bayesian active learning
-
-In this document, we keep a list of the papers to get you started in Bayesian deep learning and Bayesian active learning.
-
-We hope to include a summary for each of then in the future, but for now we have this list with some notes.
-
-
-### How to estimate uncertainty in Deep Learning networks
-
-* [Excellent tutorial from AGW on Bayesian Deep Learning](https://icml.cc/virtual/2020/tutorial/5750)
- * This is inspired by his publication [Bayesian Deep Learning and a Probabilistic Perspective of Generalization](https://arxiv.org/abs/2002.08791)
-* [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](https://arxiv.org/pdf/1506.02142.pdf) (Gal and Ghahramani, 2016)
- * This describes Monte-Carlo Dropout, a way to estimate uncertainty through stochastic dropout at test time
-* [Bayesian Uncertainty Estimation for Batch Normalized Deep Networks](https://arxiv.org/abs/1802.06455) (Teye et al. 2018)
- * This describes Monte-Carlo BatchNorm, a way to estimate uncertainty through random batch norm parameters at test time
-* [Bayesian Deep Learning and a Probabilistic Perspective of Generalization](https://arxiv.org/abs/2002.08791) (Gordon Wilson and Izmailov, 2020)
- * Presentation of multi-SWAG a mix between VI and Ensembles.
-* [Advances in Variational inference](https://arxiv.org/pdf/1711.05597.pdf) (Zhang et al, 2018)
- * Gives a quick introduction to VI and the most recent advances.
-* [A Simple Baseline for Bayesian Uncertainty in Deep Learning](https://arxiv.org/abs/1902.02476) (Maddox et al. 2019)
- * Presents SWAG, an easy way to create ensembles.
-
-
-
-
-### Bayesian active learning
-* [Deep Bayesian Active Learning with Image Data](https://arxiv.org/pdf/1703.02910.pdf) (Gal and Islam and Ghahramani, 2017)
- * Fundamental paper on how to do Bayesian active learning. A must read.
-* [Sampling bias in active learning](http://cseweb.ucsd.edu/~dasgupta/papers/twoface.pdf) (Dasgupta 2009)
- * Presents sampling bias and how to solve it by combining heuristics and random selection.
-
-* [Bayesian Active Learning for Classification and Preference Learning](https://arxiv.org/pdf/1112.5745.pdf) (Houlsby et al. 2011)
- * Fundamental paper on one of the main heuristic BALD.
-
-
-### Bayesian active learning on NLP
-
-* [Deep Bayesian Active Learning for Natural Language Processing: Results of a Large-Scale Empirical Study](https://arxiv.org/abs/1808.05697) (Siddhant and Lipton, 2018)
- * Experimental paper on how to use Bayesian active learning on NLP tasks.
+# Active learning literature
+
+This page is here to collect summaries of papers that focus on active learning.
+The idea is to share knowledge on recent developments in active learning.
+
+If you've read a paper recently, write a little summary in markdown, put it in
+the folder `docs/research/literature` and make a pull request. You can even do all of
+that right in the Github web UI!
+
+## The theory behind Bayesian active learning
+
+In this document, we keep a list of the papers to get you started in Bayesian deep learning and Bayesian active learning.
+
+We hope to include a summary for each of then in the future, but for now we have this list with some notes.
+
+
+### How to estimate uncertainty in Deep Learning networks
+
+* [Excellent tutorial from AGW on Bayesian Deep Learning](https://icml.cc/virtual/2020/tutorial/5750)
+ * This is inspired by his publication [Bayesian Deep Learning and a Probabilistic Perspective of Generalization](https://arxiv.org/abs/2002.08791)
+* [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](https://arxiv.org/pdf/1506.02142.pdf) (Gal and Ghahramani, 2016)
+ * This describes Monte-Carlo Dropout, a way to estimate uncertainty through stochastic dropout at test time
+* [Bayesian Uncertainty Estimation for Batch Normalized Deep Networks](https://arxiv.org/abs/1802.06455) (Teye et al. 2018)
+ * This describes Monte-Carlo BatchNorm, a way to estimate uncertainty through random batch norm parameters at test time
+* [Bayesian Deep Learning and a Probabilistic Perspective of Generalization](https://arxiv.org/abs/2002.08791) (Gordon Wilson and Izmailov, 2020)
+ * Presentation of multi-SWAG a mix between VI and Ensembles.
+* [Advances in Variational inference](https://arxiv.org/pdf/1711.05597.pdf) (Zhang et al, 2018)
+ * Gives a quick introduction to VI and the most recent advances.
+* [A Simple Baseline for Bayesian Uncertainty in Deep Learning](https://arxiv.org/abs/1902.02476) (Maddox et al. 2019)
+ * Presents SWAG, an easy way to create ensembles.
+
+
+
+
+### Bayesian active learning
+* [Deep Bayesian Active Learning with Image Data](https://arxiv.org/pdf/1703.02910.pdf) (Gal and Islam and Ghahramani, 2017)
+ * Fundamental paper on how to do Bayesian active learning. A must read.
+* [Sampling bias in active learning](http://cseweb.ucsd.edu/~dasgupta/papers/twoface.pdf) (Dasgupta 2009)
+ * Presents sampling bias and how to solve it by combining heuristics and random selection.
+
+* [Bayesian Active Learning for Classification and Preference Learning](https://arxiv.org/pdf/1112.5745.pdf) (Houlsby et al. 2011)
+ * Fundamental paper on one of the main heuristic BALD.
+
+
+### Bayesian active learning on NLP
+
+* [Deep Bayesian Active Learning for Natural Language Processing: Results of a Large-Scale Empirical Study](https://arxiv.org/abs/1808.05697) (Siddhant and Lipton, 2018)
+ * Experimental paper on how to use Bayesian active learning on NLP tasks.
diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css
new file mode 100644
index 00000000..61f80500
--- /dev/null
+++ b/docs/stylesheets/extra.css
@@ -0,0 +1,3 @@
+img.rounded-corners {
+ border-radius: 50%;
+}
\ No newline at end of file
diff --git a/docs/faq.md b/docs/support/faq.md
similarity index 95%
rename from docs/faq.md
rename to docs/support/faq.md
index d96d1400..8fd9b1f3 100644
--- a/docs/faq.md
+++ b/docs/support/faq.md
@@ -1,3 +1,8 @@
+---
+search:
+ boost: 2
+---
+
# Baal FAQ
If you have more questions, please submit an issue, and we will include it here!
@@ -103,11 +108,11 @@ al_dataset.label_randomly(10)
pool = al_dataset.pool
```
-From a rigorous point of view: ``$`D = ds `$`` , ``$`D_L=al\_dataset `$`` and ``$`D_U = D \setminus D_L = pool `$``.
-Then, we train our model on ``$`D_L `$`` and compute the uncertainty on ``$`D_U `$``. The most uncertains samples are
-labelled and added to ``$`D_L `$``, removed from ``$`D_U `$``.
+From a rigorous point of view: $D = ds$ , $D_L=al\_dataset$ and $D_U = D \setminus D_L = pool$.
+Then, we train our model on $D_L$ and compute the uncertainty on $D_U $. The most uncertains samples are
+labelled and added to $D_L$, removed from $D_U$.
-Let a method `query_human` performs the annotations, we can label our dataset using indices relative to``$`D_U `$``.
+Let a method `query_human` performs the annotations, we can label our dataset using indices relative to$D_U $.
This assumes that your dataset class `YourDataset` has a method named `label` which has the following
definition: `def label(self, idx, value)` where we give the label for index `idx`. There the index is not relative to
the pool, so you don't have to worry about it.
@@ -135,7 +140,7 @@ active_dataset.label(ranks, labels)
Bayesian active learning is a relatively small field with a lot of unknowns. This section aims at presenting some of our
findings so that newcomers can quickly learn.
-Don't forget to look at our [literature review](../literature/index.md) for a good introduction to the field.
+Don't forget to look at our [literature review](../research/literature/index.md) for a good introduction to the field.
### Should you use early stopping?
diff --git a/docs/support/index.md b/docs/support/index.md
new file mode 100644
index 00000000..20d656be
--- /dev/null
+++ b/docs/support/index.md
@@ -0,0 +1,9 @@
+# Support
+
+For support, we have several ways to help you:
+
+* Our [:material-help: FAQ](faq.md)
+* Submit an issue on Github [here](https://github.com/baal-org/baal/issues/new/choose)
+* Join our [:material-slack: Slack](https://join.slack.com/t/baal-world/shared_invite/zt-z0izhn4y-Jt6Zu5dZaV2rsAS9sdISfg)!
+ * General questions can be asked under the #questions channel
+
\ No newline at end of file
diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md
new file mode 100644
index 00000000..c1e3095b
--- /dev/null
+++ b/docs/tutorials/index.md
@@ -0,0 +1,17 @@
+# Tutorials
+
+Tutorials are split in two sections, "How-to" and "Compatibility". The first one focuses on Baal's capabilities and the
+latter on how we integrate with other common frameworks such as Label Studio, HuggingFace or Lightning Flash.
+
+## :material-file-tree: How to
+
+* [Run an active learning experiments](notebooks/active_learning_process.ipynb)
+* [Active learning in production](notebooks/baal_prod_cls.ipynb)
+* [Deep Ensembles](../notebooks/deep_ensemble.ipynb)
+
+## :material-file-tree: Compatibility
+
+* [:material-link: Lightning Flash](https://devblog.pytorchlightning.ai/active-learning-made-simple-using-flash-and-baal-2216df6f872c)
+* [HuggingFace](../notebooks/compatibility/nlp_classification.ipynb)
+* [Scikit-Learn](../notebooks/compatibility/sklearn_tutorial.ipynb)
+* [Label Studio](./label-studio.md)
\ No newline at end of file
diff --git a/docs/tutorials/label-studio.md b/docs/tutorials/label-studio.md
index 003e6021..c585fbfe 100644
--- a/docs/tutorials/label-studio.md
+++ b/docs/tutorials/label-studio.md
@@ -4,24 +4,32 @@
In this tutorial, we will see how to use Baal inside of Label Studio, a widely known labelling tool.
-By using Bayesian active learning in your labelling setup, you will be able to label only the most informative examples. This will avoid labelling duplicates and easy examples.
+By using Bayesian active learning in your labelling setup, you will be able to label only the most informative examples.
+This will avoid labelling duplicates and easy examples.
-This is also a good way to start the conversation between your labelling team and your machine learning team as they need to communicate early in the process!
+This is also a good way to start the conversation between your labelling team and your machine learning team as they
+need to communicate early in the process!
-We will built upon Label Studio's [Pytorch transfer learning](https://github.com/heartexlabs/label-studio-ml-backend/blob/master/label_studio_ml/examples/pytorch_transfer_learning.py) example, so be sure to download it and try to run it before adding Baal to it. The full example can be found [here](https://gist.github.com/Dref360/288845b2fbb0504e4cfc216a76b547e7).
+We will built upon Label
+Studio's [Pytorch transfer learning](https://github.com/heartexlabs/label-studio-ml-backend/blob/master/label_studio_ml/examples/pytorch_transfer_learning.py)
+example, so be sure to download it and try to run it before adding Baal to it. The full example can be
+found [here](https://gist.github.com/Dref360/288845b2fbb0504e4cfc216a76b547e7).
More info:
+
* [Baal documentation](https://baal.readthedocs.io/en/latest/)
* [Bayesian Deep Learning cheatsheet](https://baal.readthedocs.io/en/latest/user_guide/baal_cheatsheet.html)
Support:
+
* [Github](https://github.com/ElementAI/baal)
* [Gitter](https://gitter.im/eai-baal/community)
-
## Installing Baal
-To install Baal, you will need to add `baal` in the [generated `Dockerfile`](https://github.com/heartexlabs/label-studio-ml-backend/blob/master/label_studio_ml/default_configs/Dockerfile).
+To install Baal, you will need to add `baal` in
+the [generated `Dockerfile`](https://github.com/heartexlabs/label-studio-ml-backend/blob/master/label_studio_ml/default_configs/Dockerfile)
+.
```dockerfile
# Dockerfile
@@ -30,7 +38,7 @@ RUN pip install --no-cache \
uwsgi==2.0.19.1 \
supervisor==4.2.2 \
label-studio==1.0.2 \
- baal==1.3.0 \
+ baal \
click==7.1.2 \
git+https://github.com/heartexlabs/label-studio-ml-backend
```
@@ -39,17 +47,19 @@ and when developing, you should install Baal in your local environment.
`pip install baal==1.3.0`
-
## Modifying `pytorch_transfer_learning.py`
-The overall changes are pretty minor, so we will go step by step, specifying the class and method we are modifying. Again, the full script is available [here](https://gist.github.com/Dref360/288845b2fbb0504e4cfc216a76b547e7).
+The overall changes are pretty minor, so we will go step by step, specifying the class and method we are modifying.
+Again, the full script is available [here](https://gist.github.com/Dref360/288845b2fbb0504e4cfc216a76b547e7).
### Model
-The simplest way of doing Bayesian uncertainty estimation in active learning is MC-Dropout (Gal and Ghahramani, 2015) which requires Dropout layers. To use this, we use VGG-16 instead of the default ResNet-18.
+The simplest way of doing Bayesian uncertainty estimation in active learning is MC-Dropout (Gal and Ghahramani, 2015)
+which requires Dropout layers. To use this, we use VGG-16 instead of the default ResNet-18.
```python
from baal.bayesian.dropout import patch_module
+
# ImageClassifier.__init__
self.model = models.vgg16(pretrained=True)
last_layer_idx = 6
@@ -59,7 +69,11 @@ self.model.classifier[last_layer_idx] = nn.Linear(num_ftrs, num_classes)
self.model = patch_module(self.model)
```
-Next, we will wrap our model using `baal.modelwrapper.ModelWrapper` from Baal which will simplify the different loops. If you use another framework, feel free to checkout our [Pytorch Lightning integration](https://baal.readthedocs.io/en/latest/notebooks/compatibility/pytorch_lightning.html) and our [HuggingFace integration](https://baal.readthedocs.io/en/latest/notebooks/compatibility/nlp_classification.html).
+Next, we will wrap our model using `baal.modelwrapper.ModelWrapper` from Baal which will simplify the different loops.
+If you use another framework, feel free to checkout
+our [Pytorch Lightning integration](https://baal.readthedocs.io/en/latest/notebooks/compatibility/pytorch_lightning.html)
+and our [HuggingFace integration](https://baal.readthedocs.io/en/latest/notebooks/compatibility/nlp_classification.html)
+.
```python
# ImageClassifier.__init__
@@ -85,10 +99,11 @@ def train(self, dataset, num_epochs=5):
return self.model
```
-
### Prediction
-We can draw multiple predictions from the model's parameter distribution using MC-Dropout. In this script we will make 20 predictions per example:
+We can draw multiple predictions from the model's parameter distribution using MC-Dropout. In this script we will make
+20 predictions per example:
+
```python
# ImageClassifier
def predict(self, image_urls):
@@ -98,7 +113,9 @@ def predict(self, image_urls):
```
-In `ImageClassifierAPI` we will leverage this set of predictions and BALD (Houlsby et al, 2013) to estimate the model's uncertainty and to get the "average prediction" which would be more trustworthy:
+In `ImageClassifierAPI` we will leverage this set of predictions and BALD (Houlsby et al, 2013) to estimate the model's
+uncertainty and to get the "average prediction" which would be more trustworthy:
+
```python
# ImageClassifierAPI.predict
@@ -109,7 +126,6 @@ predicted_label_indices = np.argmax(average_prediction, axis=1)
predicted_scores = BALD().get_uncertainties(logits)
```
-
## Launching LabelStudio
Following Label Studio tutorial, you can start your ML Backend as usual.
@@ -117,40 +133,33 @@ In the Settings, do not forget to checkbox all boxes:
![](https://i.imgur.com/4vcj2u8.png)
-
-and to use active learning, order by Predictions score:
+and to use active learning, order by Predictions score:
![](https://i.imgur.com/cGVngqw.png)
-
## Labeling in action!
-To test this setup, we imported in Label Studio a subset of [MIO-TCD](http://podoce.dinf.usherbrooke.ca/), a dataset that is similar to real production data. This dataset suffers from heavy class imbalance, the class *car* represents 90% of all images in the dataset.
+To test this setup, we imported in Label Studio a subset of [MIO-TCD](http://podoce.dinf.usherbrooke.ca/), a dataset
+that is similar to real production data. This dataset suffers from heavy class imbalance, the class *car* represents 90%
+of all images in the dataset.
-After labelling randomly 100 images, I start training my model. On a subset of 10k unlabelled images, we get the following most uncertain predictions:
+After labelling randomly 100 images, I start training my model. On a subset of 10k unlabelled images, we get the
+following most uncertain predictions:
-```eval_rst
-.. |logo1| image:: https://i.imgur.com/7LuI4qf.jpg
- :align: middle
-.. |logo2| image:: https://i.imgur.com/YjViSz6.jpg
- :align: middle
-.. |logo3| image:: https://i.imgur.com/9SyYMfR.jpg
- :align: middle
+| ![](https://i.imgur.com/7LuI4qf.jpg) | ![](https://i.imgur.com/YjViSz6.jpg) | ![]( https://i.imgur.com/9SyYMfR.jpg) |
+|--------------------------------------|--------------------------------------|---------------------------------------|
+| Articulated Truck | Bicycle | Background |
-+-------------------+---------+------------+
-| |logo1| | |logo2| | |logo3| |
-+-------------------+---------+------------+
-| Articulated Truck | Bicycle | Background |
-+-------------------+---------+------------+
-```
+The model has seen enough cars, and wants to label new classes as they would be the most informatives. If we continue
+labelling, we will see a similar behavior, where the class *car* is undersampled and the others are oversampled.
-
-The model has seen enough cars, and wants to label new classes as they would be the most informatives. If we continue labelling, we will see a similar behavior, where the class *car* is undersampled and the others are oversampled.
-
-In [Atighehchian et al. 2019](https://arxiv.org/abs/2006.09916), we compare BALD to Uniform sampling on this dataset and we get better performance on underrepresented classes.
+In [Atighehchian et al. 2019](https://arxiv.org/abs/2006.09916), we compare BALD to Uniform sampling on this dataset and
+we get better performance on underrepresented classes.
In the image below, we have the F1 for two underrepresented classes:
![](https://i.imgur.com/dWP7QIJ.png)
-
-**In conlusion**, we can now use Bayesian active learning in Label Studio which would help your labelling process be more efficient. Please do not hesitate to reach out on our Gitter or on Label Studio's [Slack](http://slack.labelstud.io.s3-website-us-east-1.amazonaws.com/?source=site-header) if you have feedback or questions.
+**In conlusion**, we can now use Bayesian active learning in Label Studio which would help your labelling process be
+more efficient. Please do not hesitate to reach out on our Gitter or on Label
+Studio's [Slack](http://slack.labelstud.io.s3-website-us-east-1.amazonaws.com/?source=site-header) if you have feedback
+or questions.
diff --git a/docs/user_guide/baal_cheatsheet.md b/docs/user_guide/baal_cheatsheet.md
index 298d7862..5d10f0b8 100644
--- a/docs/user_guide/baal_cheatsheet.md
+++ b/docs/user_guide/baal_cheatsheet.md
@@ -7,9 +7,9 @@ In the table below, we have a mapping between common equations and the Baal API.
Here are the types for all variables needed.
```python
-model : torch.nn.Module
-wrapper : baal.ModelWrapper
-dataset: torch.utils.data_utils.Dataset
+model: torch.nn.Module
+wrapper: baal.ModelWrapper
+dataset: torch.utils.data_utils.Dataset
bald = baal.active.heuristics.BALD()
entropy = baal.active.heuristics.Entropy()
```
@@ -18,17 +18,12 @@ We assume that `baal.bayesian.dropout.patch_module` has been applied to the mode
`model = baal.bayesian.dropout.patch_module(model)`
-```eval_rst
-.. csv-table:: Baal cheat sheet
- :header: "Description", "Equation", "Baal"
- :widths: 20, 20, 40
-
- "Bayesian Model Averaging", ":math:`\hat{T} = p(y \mid x, {\cal D})= \int p(y \mid x, \theta)p(\theta \mid D) d\theta`", "`wrapper.predict_on_dataset(dataset, batch_size=B, iterations=I, use_cuda=True).mean(-1)`"
- "MC-Dropout", ":math:`T = \{p(y\mid x_j, \theta_i)\} \mid x_j \in {\cal D}' ,i \in \{1, \ldots, I\}`", "`wrapper.predict_on_dataset(dataset, batch_size=B, iterations=I, use_cuda=True)`"
- "BALD", ":math:`{\cal I}[y, \theta \mid x, {\cal D}] = {\cal H}[y \mid x, {\cal D}] - {\cal E}_{p(\theta \mid {\cal D})}[{\cal H}[y \mid x, \theta]]`", "`bald.get_uncertainties(T)`"
- "Entropy", ":math:`\sum_c \hat{T}_c \log(\hat{T}_c)`", "`entropy.get_uncertainties(T)`"
-
-```
+| Description | Equation | Baal |
+|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
+| Bayesian Model Averaging | $\hat{T} = p(y \mid x, {\cal D})= \int p(y \mid x, \theta)p(\theta \mid D) d\theta$ | `wrapper.predict_on_dataset(dataset, batch_size=B, iterations=I, use_cuda=True).mean(-1)` |
+| MC-Dropout | $T = \{p(y\mid x_j, \theta_i)\} \mid x_j \in {\cal D}' ,i \in \{1, \ldots, I\}$ | `wrapper.predict_on_dataset(dataset, batch_size=B, iterations=I, use_cuda=True)` |
+| BALD | ${\cal I}[y, \theta \mid x, {\cal D}] = {\cal H}[y \mid x, {\cal D}] - {\cal E}_{p(\theta \mid {\cal D})}[{\cal H}[y \mid x, \theta]]$ | `bald.get_uncertainties(T)` |
+| Entropy | $\sum_c \hat{T}_c \log(\hat{T}_c)$ | `entropy.get_uncertainties(T)` |
**Contributing**
diff --git a/docs/user_guide/heuristics.md b/docs/user_guide/heuristics.md
new file mode 100644
index 00000000..5c87f89f
--- /dev/null
+++ b/docs/user_guide/heuristics.md
@@ -0,0 +1,34 @@
+# Active learning heuristics
+
+**Heuristics** take a set of predictions and outputs the order in which they should be labelled.
+
+A simple heuristic would be to prioritize items where the model had low confidence.
+We will cover the two main heuristics: **Entropy** and **BALD**.
+
+
+### Entropy
+
+The goal of this heuristic is to maximize information. To do so, we will compute the entropy of each prediction before ordering them.
+
+Let $p_{c}(x)$ be the probability of input $x$ to be from class $c$. The entropy can be computed as:
+
+$$
+H(x) = \sum_c^C p_c(x)
+$$
+
+This score reflects the informativeness of knowing the true label of $x$.
+Naturally the next item to label would be $argmax_{x \in {\cal D}} H(x)$, where ${\cal D} is our dataset$
+
+A drawback of this method is that it doesn't differentiate between *aleatoric* uncertainty and *epistemic* uncertainty.
+To do so, we will use BALD
+
+### BALD
+
+Bayesian active learning by disagreement or BALD (Houslby et al. 2013) is the basis of most modern active learning heuristics.
+
+From a Bayesian model $f$, we draw $I$ predictions per sample $x$.
+
+Then, we want to maximize the mutual information between a prediction and the model's parameters. This is done by looking at how the predictions are disagreeing with each others.
+If the prediction "flips" often, it means that the item is close to a decision boundary and thus hard to fit.
+
+ ${\cal I}[y, \theta \mid x, {\cal D}] = {\cal H}[y \mid x, {\cal D}] - {\cal E}_{p(\theta \mid {\cal D})}[{\cal H}[y \mid x, \theta]]$
diff --git a/docs/user_guide/index.md b/docs/user_guide/index.md
index 91ab00f0..09b4df21 100644
--- a/docs/user_guide/index.md
+++ b/docs/user_guide/index.md
@@ -6,9 +6,9 @@ In addition, we propose a [cheat sheet](./baal_cheatsheet.md) that will help use
### Notations and glossary
-* Training dataset ``$`D_L`$``
-* Pool, the unlabelled portion of the dataset ``$`D_U`$``
-* Heuristic, the function that computes the uncertainty (ex. BALD) ``$`U `$``
+* Training dataset $D_L$
+* Pool, the unlabelled portion of the dataset $D_U$
+* Heuristic, the function that computes the uncertainty (ex. BALD) $U$
* Active learning step, the sequence of training, selecting and labelling one or many examples.
* BALD, an heuristic that works well with deep learning models that are overconfident.
* Query size, the number of items to label between retraining.
@@ -53,13 +53,14 @@ We hope that work in this area continues so that we can better understand the im
**Resources**
-* [Literature review](../literature/index.md)
+* [Literature review](../research/literature/index.md)
* [Active learning dataset and training loop classes](../notebooks/fundamentals/active-learning)
* [Methods for approximating bayesian posteriors](../notebooks/fundamentals/posteriors)
* [Full active learning example](../notebooks/active_learning_process)
**References**
+
* Kirsch, Andreas, Joost Van Amersfoort, and Yarin Gal. "Batchbald: Efficient and diverse batch acquisition for deep bayesian active learning." NeurIPS (2019).
* Jain, Siddhartha, Ge Liu, and David Gifford. "Information Condensing Active Learning." arXiv preprint arXiv:2002.07916 (2020).
* Houlsby, Neil, et al. "Bayesian active learning for classification and preference learning." arXiv preprint arXiv:1112.5745 (2011).
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 00000000..4ab14a4b
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,113 @@
+site_name: Baal Documentation
+repo_url: https://github.com/baal-org/baal
+edit_uri: edit/master/docs/
+theme:
+ name: material
+ logo: _static/images/logo-transparent.png
+ palette:
+ # Palette toggle for light mode
+ - media: "(prefers-color-scheme: light)"
+ scheme: default
+ primary: black
+ toggle:
+ icon: material/brightness-7
+ name: Switch to dark mode
+
+ # Palette toggle for dark mode
+ - media: "(prefers-color-scheme: dark)"
+ scheme: slate
+ primary: blue grey
+ toggle:
+ icon: material/brightness-4
+ name: Switch to light mode
+ features:
+ - navigation.tabs
+ - navigation.tabs.sticky
+ - navigation.indexes
+ - navigation.instant
+ icon:
+ repo: fontawesome/brands/github
+plugins:
+ - search
+ - exclude-search:
+ exclude_unreferenced: true
+ exclude:
+ - notebooks/active_learning_process.md
+ - /*/active_learning_process*
+ - /*/nbsphinx*
+ - mkdocs-jupyter
+ - mkdocstrings
+
+
+markdown_extensions:
+ - md_in_html
+ - attr_list
+ - pymdownx.arithmatex:
+ generic: true
+ - pymdownx.emoji:
+ emoji_index: !!python/name:materialx.emoji.twemoji
+ emoji_generator: !!python/name:materialx.emoji.to_svg
+ - pymdownx.highlight:
+ anchor_linenums: true
+ - admonition
+ - pymdownx.details
+ - pymdownx.inlinehilite
+ - pymdownx.snippets
+ - pymdownx.superfences
+
+extra_javascript:
+ - javascripts/mathjax.js
+ - https://polyfill.io/v3/polyfill.min.js?features=es6
+ - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
+extra_css:
+ - stylesheets/extra.css
+
+nav:
+ - Home: index.md
+ - User Guide:
+ - user_guide/index.md
+ - Cheat Sheet: user_guide/baal_cheatsheet.md
+ - Active data structure: notebooks/fundamentals/active-learning.ipynb
+ - Computing uncertainty:
+ - Stochastic models: notebooks/fundamentals/posteriors.ipynb
+ - Heuristics: user_guide/heuristics.md
+ - API:
+ - api/index.md
+ - api/bayesian.md
+ - api/calibration.md
+ - api/dataset_management.md
+ - api/heuristics.md
+ - api/modelwrapper.md
+ - api/utils.md
+ - Compatibility:
+ - api/compatibility/huggingface.md
+ - api/compatibility/pytorch-lightning.md
+
+ - Tutorials:
+ - tutorials/index.md
+ - Compatibility:
+ - tutorials/label-studio.md
+ - notebooks/compatibility/nlp_classification.ipynb
+ - notebooks/compatibility/sklearn_tutorial.ipynb
+ - Active learning for research: notebooks/active_learning_process.ipynb
+ - Active learning for production: notebooks/baal_prod_cls.ipynb
+ - Deep Ensembles for active learning: notebooks/deep_ensemble.ipynb
+ - Research:
+ - research/index.md
+ - Technical Reports:
+ - Active Fairness: notebooks/fairness/ActiveFairness.ipynb
+ - research/dirichlet_calibration.md
+ - research/double_descent.md
+ - Literature:
+ - research/literature/index.md
+ - Additional papers:
+ - research/literature/Additional papers/dmi.md
+ - research/literature/Additional papers/duq.md
+ - research/literature/Additional papers/gyolov3.md
+ - research/literature/Additional papers/lightcoresets.md
+ - research/literature/Additional papers/sparse_selection.md
+ - research/literature/Additional papers/vaal.md
+
+ - Support:
+ - support/index.md
+ - support/faq.md
diff --git a/notebooks/active_learning_process.ipynb b/notebooks/active_learning_process.ipynb
index 9faf3f2e..00b57ee3 100644
--- a/notebooks/active_learning_process.ipynb
+++ b/notebooks/active_learning_process.ipynb
@@ -2,11 +2,12 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
"source": [
"# How to do research and visualize progress\n",
"\n",
- "In this tutorial, we will show how to use BaaL for research ie. when we know the labels.\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/active_learning_process.ipynb)\n",
+ "\n",
+ "In this tutorial, we will show how to use Baal for research ie. when we know the labels.\n",
"We will introduce notions such as dataset management, MC-Dropout, BALD. If you need more documentation, be sure to check our **Additional resources** section below!\n",
"\n",
"BaaL can be used on a variety of research domains:\n",
@@ -18,14 +19,6 @@
"\n",
"Today we will focus on a simple example with CIFAR10 and we will animate the progress of active learning!\n",
"\n",
- "#### Requirements\n",
- "\n",
- "In addition to BaaL standard requirements, you will need to install:\n",
- "\n",
- "* MulticoreTSNE\n",
- "* Matplotlib\n",
- "\n",
- "\n",
"#### Additional resources\n",
"\n",
"* More info on the inner working of Active Learning Dataset [here](./fundamentals/active-learning.ipynb).\n",
@@ -33,12 +26,17 @@
" [Literature review](https://baal.readthedocs.io/en/latest/literature/core-papers.html).\n",
"\n",
"### Let's do this!"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 1,
- "metadata": {},
+ "execution_count": null,
"outputs": [],
"source": [
"# Let's start with a bunch of imports.\n",
@@ -50,6 +48,7 @@
"import numpy as np\n",
"import torch\n",
"import torch.backends\n",
+ "import torch.utils.data as torchdata\n",
"from torch import optim\n",
"from torch.hub import load_state_dict_from_url\n",
"from torch.nn import CrossEntropyLoss\n",
@@ -63,17 +62,23 @@
"from baal.bayesian.dropout import patch_module\n",
"from baal.modelwrapper import ModelWrapper\n",
"\n",
+ "\n",
"def vgg16(num_classes):\n",
" model = models.vgg16(pretrained=False, num_classes=num_classes)\n",
" weights = load_state_dict_from_url('https://download.pytorch.org/models/vgg16-397923af.pth')\n",
" weights = {k: v for k, v in weights.items() if 'classifier.6' not in k}\n",
" model.load_state_dict(weights, strict=False)\n",
" return model"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "markdown",
- "metadata": {},
"source": [
"### Dataset management and the pool\n",
"\n",
@@ -106,56 +111,37 @@
"`ActiveLearningDataset(your_dataset, pool_specifics:{'transform': test_transform}`\n",
"\n",
"where `test_transform` is the test version of `transform` without data augmentation.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "\"\"\"\n",
- "We will make an adapter so that `pool_specifics` modifies the transform correctly.\n",
- "Because the training set is now a torchdata.Subset, modifying the `transform` attribute is harder.\n",
- "\"\"\"\n",
- "\n",
- "import torch.utils.data as torchdata\n",
- "\n",
- "\n",
- "class TransformAdapter(torchdata.Subset):\n",
- "\n",
- " @property\n",
- " def transform(self):\n",
- " if hasattr(self.dataset, 'transform'):\n",
- " return self.dataset.transform\n",
- " else:\n",
- " raise AttributeError()\n",
- "\n",
- " @transform.setter\n",
- " def transform(self, transform):\n",
- " if hasattr(self.dataset, 'transform'):\n",
- " self.dataset.transform = transform"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "markdown",
- "metadata": {},
"source": [
"Here we define our Experiment configuration, this can come from your favorite experiment manager like MLFlow.\n",
"BaaL does not expect a particular format as all arguments are supplied."
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 3,
- "metadata": {},
+ "execution_count": null,
"outputs": [],
"source": [
"\n",
"\n",
"@dataclass\n",
"class ExperimentConfig:\n",
- " epoch: int = 20000//100\n",
+ " epoch: int = 20000 // 100\n",
" batch_size: int = 32\n",
" initial_pool: int = 512\n",
" query_size: int = 100\n",
@@ -163,27 +149,33 @@
" heuristic: str = 'bald'\n",
" iterations: int = 40\n",
" training_duration: int = 10\n",
- " \n"
- ]
- },
- {
- "cell_type": "markdown",
+ "\n"
+ ],
"metadata": {
+ "collapsed": false,
"pycharm": {
- "name": "#%% md\n"
+ "name": "#%%\n"
}
- },
+ }
+ },
+ {
+ "cell_type": "markdown",
"source": [
"### Problem definition\n",
"\n",
"We will perform active learning on a toy dataset, CIFAR-3 where we only keep dogs, cats and airplanes. This will make\n",
"visualization easier."
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 4,
- "metadata": {},
+ "execution_count": null,
"outputs": [],
"source": [
"def get_datasets(initial_pool):\n",
@@ -198,6 +190,22 @@
" Returns:\n",
" ActiveLearningDataset, Dataset, the training and test set.\n",
" \"\"\"\n",
+ "\n",
+ " class TransformAdapter(torchdata.Subset):\n",
+ " # We need a custom Subset class as we need to override \"transforms\" as well.\n",
+ " # This shouldn't be needed for your experiments.\n",
+ " @property\n",
+ " def transform(self):\n",
+ " if hasattr(self.dataset, 'transform'):\n",
+ " return self.dataset.transform\n",
+ " else:\n",
+ " raise AttributeError()\n",
+ "\n",
+ " @transform.setter\n",
+ " def transform(self, transform):\n",
+ " if hasattr(self.dataset, 'transform'):\n",
+ " self.dataset.transform = transform\n",
+ "\n",
" # airplane, cat, dog\n",
" classes_to_keep = [0, 3, 5]\n",
" transform = transforms.Compose(\n",
@@ -215,31 +223,32 @@
" )\n",
" train_ds = datasets.CIFAR10('.', train=True,\n",
" transform=transform, target_transform=None, download=True)\n",
- " \n",
+ "\n",
" train_mask = np.where([y in classes_to_keep for y in train_ds.targets])[0]\n",
" train_ds = TransformAdapter(train_ds, train_mask)\n",
- " \n",
+ "\n",
" # In a real application, you will want a validation set here.\n",
" test_set = datasets.CIFAR10('.', train=False,\n",
" transform=test_transform, target_transform=None, download=True)\n",
" test_mask = np.where([y in classes_to_keep for y in test_set.targets])[0]\n",
" test_set = TransformAdapter(test_set, test_mask)\n",
- " \n",
+ "\n",
" # Here we set `pool_specifics`, where we set the transform attribute for the pool.\n",
" active_set = ActiveLearningDataset(train_ds, pool_specifics={'transform': test_transform})\n",
"\n",
" # We start labeling randomly.\n",
" active_set.label_randomly(initial_pool)\n",
" return active_set, test_set"
- ]
- },
- {
- "cell_type": "markdown",
+ ],
"metadata": {
+ "collapsed": false,
"pycharm": {
- "name": "#%% md\n"
+ "name": "#%%\n"
}
- },
+ }
+ },
+ {
+ "cell_type": "markdown",
"source": [
"## Creating our experiment\n",
"\n",
@@ -256,22 +265,18 @@
" * Training/testing loops\n",
"* ActiveLearningLoop\n",
" * Will make prediction on the pool and label the most uncertain examples."
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Files already downloaded and verified\n",
- "Files already downloaded and verified\n"
- ]
- }
- ],
+ "execution_count": null,
+ "outputs": [],
"source": [
"hyperparams = ExperimentConfig()\n",
"use_cuda = torch.cuda.is_available()\n",
@@ -312,11 +317,16 @@
"\n",
"# We will reset the weights at each active learning step so we make a copy.\n",
"init_weights = deepcopy(model.state_dict())"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "markdown",
- "metadata": {},
"source": [
"### What is an active learning loop\n",
"\n",
@@ -325,788 +335,27 @@
"1. Training\n",
"2. Estimate uncertainty on the pool\n",
"3. Label the most uncertain examples.\n"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "db80f856c34647a1a8e129a84339a57f",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- " 0%| | 0/200 [00:00, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:26:02.888118Z [\u001b[32minfo ] Starting training dataset=512 epoch=10\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/opt/conda/lib/python3.9/site-packages/torch/utils/data/dataloader.py:478: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 1, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
- " warnings.warn(_create_warning_msg(\n",
- "/opt/conda/lib/python3.9/site-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n",
- " return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:26:32.784181Z [\u001b[32minfo ] Training complete train_loss=0.42513248324394226\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:26:32.785716Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:26:42.387235Z [\u001b[32minfo ] Evaluation complete test_loss=0.5483408570289612\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:26:42.391419Z [\u001b[32minfo ] Start Predict dataset=14488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:28:48.686742Z [\u001b[32minfo ] Starting training dataset=612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:29:18.082733Z [\u001b[32minfo ] Training complete train_loss=0.023272458463907242\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:29:18.084489Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:29:26.288090Z [\u001b[32minfo ] Evaluation complete test_loss=1.0466769933700562\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:29:26.292233Z [\u001b[32minfo ] Start Predict dataset=14388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:31:33.634294Z [\u001b[32minfo ] Starting training dataset=712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:32:06.180206Z [\u001b[32minfo ] Training complete train_loss=0.020062478259205818\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:32:06.181728Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:32:14.669412Z [\u001b[32minfo ] Evaluation complete test_loss=1.234800934791565\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:32:14.673486Z [\u001b[32minfo ] Start Predict dataset=14288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:34:19.535935Z [\u001b[32minfo ] Starting training dataset=812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:34:50.487032Z [\u001b[32minfo ] Training complete train_loss=0.0782688781619072\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:34:50.488655Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:34:58.867665Z [\u001b[32minfo ] Evaluation complete test_loss=0.9648405909538269\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:34:58.871468Z [\u001b[32minfo ] Start Predict dataset=14188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:37:04.037450Z [\u001b[32minfo ] Starting training dataset=912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:37:37.071740Z [\u001b[32minfo ] Training complete train_loss=0.006023803725838661\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:37:37.073237Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:37:45.168673Z [\u001b[32minfo ] Evaluation complete test_loss=0.8979674577713013\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:37:45.173268Z [\u001b[32minfo ] Start Predict dataset=14088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:39:49.231978Z [\u001b[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:40:22.371968Z [\u001b[32minfo ] Training complete train_loss=0.015347965992987156\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:40:22.373994Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:40:30.474273Z [\u001b[32minfo ] Evaluation complete test_loss=0.8754228353500366\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:40:30.478465Z [\u001b[32minfo ] Start Predict dataset=13988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:42:36.883231Z [\u001b[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:43:12.986473Z [\u001b[32minfo ] Training complete train_loss=0.008938436396420002\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:43:12.988493Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:43:21.974697Z [\u001b[32minfo ] Evaluation complete test_loss=0.8416990041732788\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:43:21.979689Z [\u001b[32minfo ] Start Predict dataset=13888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:45:28.336859Z [\u001b[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:46:06.676436Z [\u001b[32minfo ] Training complete train_loss=0.006746976636350155\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:46:06.678271Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:46:15.267152Z [\u001b[32minfo ] Evaluation complete test_loss=0.8873146772384644\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:46:15.271275Z [\u001b[32minfo ] Start Predict dataset=13788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:48:20.279303Z [\u001b[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:48:59.773418Z [\u001b[32minfo ] Training complete train_loss=0.07147088646888733\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:48:59.775274Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:49:08.279979Z [\u001b[32minfo ] Evaluation complete test_loss=0.6853619813919067\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:49:08.284169Z [\u001b[32minfo ] Start Predict dataset=13688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:51:13.170250Z [\u001b[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:51:53.273587Z [\u001b[32minfo ] Training complete train_loss=0.04359261691570282\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:51:53.275475Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:52:01.677753Z [\u001b[32minfo ] Evaluation complete test_loss=0.6789661645889282\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:52:01.681596Z [\u001b[32minfo ] Start Predict dataset=13588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:54:03.351257Z [\u001b[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:54:44.773633Z [\u001b[32minfo ] Training complete train_loss=0.018604231998324394\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:54:44.776034Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:54:53.375403Z [\u001b[32minfo ] Evaluation complete test_loss=0.7287857532501221\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:54:53.380431Z [\u001b[32minfo ] Start Predict dataset=13488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:56:55.411753Z [\u001b[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T15:57:37.783421Z [\u001b[32minfo ] Training complete train_loss=0.03406292200088501\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T15:57:37.785087Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T15:57:46.284654Z [\u001b[32minfo ] Evaluation complete test_loss=0.638004720211029\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T15:57:46.288731Z [\u001b[32minfo ] Start Predict dataset=13388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T15:59:43.650554Z [\u001b[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:00:25.788374Z [\u001b[32minfo ] Training complete train_loss=0.06721857935190201\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:00:25.789980Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:00:34.270598Z [\u001b[32minfo ] Evaluation complete test_loss=0.6078959703445435\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:00:34.274553Z [\u001b[32minfo ] Start Predict dataset=13288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:02:29.738611Z [\u001b[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:03:13.372754Z [\u001b[32minfo ] Training complete train_loss=0.08680642396211624\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:03:13.374516Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:03:21.776862Z [\u001b[32minfo ] Evaluation complete test_loss=0.647302508354187\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:03:21.780676Z [\u001b[32minfo ] Start Predict dataset=13188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:05:17.095705Z [\u001b[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:06:01.375814Z [\u001b[32minfo ] Training complete train_loss=0.06293369829654694\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:06:01.377432Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:06:09.786188Z [\u001b[32minfo ] Evaluation complete test_loss=0.6817241311073303\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:06:09.789944Z [\u001b[32minfo ] Start Predict dataset=13088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:08:04.612386Z [\u001b[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:08:49.982937Z [\u001b[32minfo ] Training complete train_loss=0.012322206981480122\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:08:49.984675Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:08:58.488292Z [\u001b[32minfo ] Evaluation complete test_loss=0.8289020657539368\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:08:58.566936Z [\u001b[32minfo ] Start Predict dataset=12988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:10:53.208150Z [\u001b[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:11:40.481402Z [\u001b[32minfo ] Training complete train_loss=0.06632529199123383\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:11:40.483683Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:11:48.892025Z [\u001b[32minfo ] Evaluation complete test_loss=0.6253312826156616\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:11:48.896544Z [\u001b[32minfo ] Start Predict dataset=12888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:13:44.590686Z [\u001b[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:14:31.585109Z [\u001b[32minfo ] Training complete train_loss=0.044168129563331604\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:14:31.587383Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:14:40.077616Z [\u001b[32minfo ] Evaluation complete test_loss=0.6782843470573425\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:14:40.081708Z [\u001b[32minfo ] Start Predict dataset=12788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:16:33.938059Z [\u001b[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:17:21.669894Z [\u001b[32minfo ] Training complete train_loss=0.09880666434764862\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:17:21.671646Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:17:30.173757Z [\u001b[32minfo ] Evaluation complete test_loss=0.8312588930130005\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:17:30.177913Z [\u001b[32minfo ] Start Predict dataset=12688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:19:20.962003Z [\u001b[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:20:10.175347Z [\u001b[32minfo ] Training complete train_loss=0.049882616847753525\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:20:10.177008Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:20:18.676877Z [\u001b[32minfo ] Evaluation complete test_loss=0.6991893649101257\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:20:18.681326Z [\u001b[32minfo ] Start Predict dataset=12588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:22:10.999132Z [\u001b[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:23:00.569287Z [\u001b[32minfo ] Training complete train_loss=0.06398215144872665\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:23:00.571141Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:23:08.785616Z [\u001b[32minfo ] Evaluation complete test_loss=0.5477628111839294\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:23:08.789326Z [\u001b[32minfo ] Start Predict dataset=12488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:24:58.624758Z [\u001b[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:25:49.383363Z [\u001b[32minfo ] Training complete train_loss=0.046333637088537216\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:25:49.385026Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:25:57.787980Z [\u001b[32minfo ] Evaluation complete test_loss=0.702488124370575\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:25:57.792133Z [\u001b[32minfo ] Start Predict dataset=12388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:27:47.319856Z [\u001b[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:28:39.489135Z [\u001b[32minfo ] Training complete train_loss=0.08484052121639252\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:28:39.490987Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:28:47.771346Z [\u001b[32minfo ] Evaluation complete test_loss=0.5731009840965271\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:28:47.775165Z [\u001b[32minfo ] Start Predict dataset=12288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:30:36.802687Z [\u001b[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:31:29.576259Z [\u001b[32minfo ] Training complete train_loss=0.09145867079496384\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:31:29.578028Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:31:37.886296Z [\u001b[32minfo ] Evaluation complete test_loss=0.5549673438072205\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:31:37.889800Z [\u001b[32minfo ] Start Predict dataset=12188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:33:26.398167Z [\u001b[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:34:20.490228Z [\u001b[32minfo ] Training complete train_loss=0.02744719199836254\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:34:20.492170Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:34:29.067964Z [\u001b[32minfo ] Evaluation complete test_loss=0.660302996635437\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:34:29.072246Z [\u001b[32minfo ] Start Predict dataset=12088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:36:15.329656Z [\u001b[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:37:09.966144Z [\u001b[32minfo ] Training complete train_loss=0.06737153232097626\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:37:09.968091Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:37:18.489054Z [\u001b[32minfo ] Evaluation complete test_loss=0.621569812297821\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:37:18.569881Z [\u001b[32minfo ] Start Predict dataset=11988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:39:04.909076Z [\u001b[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:40:01.971488Z [\u001b[32minfo ] Training complete train_loss=0.05985158681869507\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:40:01.973623Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:40:10.988141Z [\u001b[32minfo ] Evaluation complete test_loss=0.6705137491226196\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:40:10.992964Z [\u001b[32minfo ] Start Predict dataset=11888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:42:03.615463Z [\u001b[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:43:06.187299Z [\u001b[32minfo ] Training complete train_loss=0.06435515731573105\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:43:06.189039Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:43:14.974582Z [\u001b[32minfo ] Evaluation complete test_loss=0.6966602206230164\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:43:14.978799Z [\u001b[32minfo ] Start Predict dataset=11788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:45:01.591964Z [\u001b[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:45:59.571342Z [\u001b[32minfo ] Training complete train_loss=0.05543559044599533\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:45:59.572948Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:46:07.784239Z [\u001b[32minfo ] Evaluation complete test_loss=0.6278331279754639\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:46:07.788230Z [\u001b[32minfo ] Start Predict dataset=11688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:47:52.487984Z [\u001b[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:48:52.691644Z [\u001b[32minfo ] Training complete train_loss=0.07221610099077225\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:48:52.765384Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:49:01.280861Z [\u001b[32minfo ] Evaluation complete test_loss=0.6179820895195007\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:49:01.285400Z [\u001b[32minfo ] Start Predict dataset=11588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:50:44.974180Z [\u001b[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:51:42.676914Z [\u001b[32minfo ] Training complete train_loss=0.039833199232816696\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:51:42.678530Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:51:51.080616Z [\u001b[32minfo ] Evaluation complete test_loss=0.6217177510261536\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:51:51.084222Z [\u001b[32minfo ] Start Predict dataset=11488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:53:31.595579Z [\u001b[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:54:29.772139Z [\u001b[32minfo ] Training complete train_loss=0.03375746309757233\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:54:29.774393Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:54:37.979455Z [\u001b[32minfo ] Evaluation complete test_loss=0.6929616928100586\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:54:37.982888Z [\u001b[32minfo ] Start Predict dataset=11388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:56:16.550993Z [\u001b[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T16:57:15.780976Z [\u001b[32minfo ] Training complete train_loss=0.04057781398296356\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T16:57:15.782596Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T16:57:23.974826Z [\u001b[32minfo ] Evaluation complete test_loss=0.7048872113227844\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T16:57:23.978437Z [\u001b[32minfo ] Start Predict dataset=11288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T16:59:04.231574Z [\u001b[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:00:07.275913Z [\u001b[32minfo ] Training complete train_loss=0.07070793211460114\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:00:07.277627Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:00:15.779335Z [\u001b[32minfo ] Evaluation complete test_loss=0.5328732132911682\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:00:15.782809Z [\u001b[32minfo ] Start Predict dataset=11188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:01:54.765307Z [\u001b[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:02:58.388761Z [\u001b[32minfo ] Training complete train_loss=0.12230982631444931\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:02:58.391212Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:03:06.876905Z [\u001b[32minfo ] Evaluation complete test_loss=0.5321411490440369\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:03:06.881740Z [\u001b[32minfo ] Start Predict dataset=11088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:04:44.268732Z [\u001b[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:05:51.968480Z [\u001b[32minfo ] Training complete train_loss=0.044358428567647934\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:05:51.970839Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:06:00.487090Z [\u001b[32minfo ] Evaluation complete test_loss=0.8059483170509338\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:06:00.491321Z [\u001b[32minfo ] Start Predict dataset=10988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:07:37.287176Z [\u001b[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:08:43.176013Z [\u001b[32minfo ] Training complete train_loss=0.1225663274526596\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:08:43.177778Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:08:51.476998Z [\u001b[32minfo ] Evaluation complete test_loss=0.4315877854824066\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:08:51.481084Z [\u001b[32minfo ] Start Predict dataset=10888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:10:29.940602Z [\u001b[32minfo ] Starting training dataset=4212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:11:37.180729Z [\u001b[32minfo ] Training complete train_loss=0.10635881125926971\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:11:37.182797Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:11:45.678586Z [\u001b[32minfo ] Evaluation complete test_loss=0.5154958963394165\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:11:45.682656Z [\u001b[32minfo ] Start Predict dataset=10788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:13:21.169707Z [\u001b[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:14:29.792534Z [\u001b[32minfo ] Training complete train_loss=0.053799740970134735\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:14:29.866135Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:14:38.169583Z [\u001b[32minfo ] Evaluation complete test_loss=0.6495267748832703\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:14:38.173309Z [\u001b[32minfo ] Start Predict dataset=10688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:16:11.983602Z [\u001b[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:17:20.387089Z [\u001b[32minfo ] Training complete train_loss=0.018582936376333237\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:17:20.388534Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:17:28.788662Z [\u001b[32minfo ] Evaluation complete test_loss=0.7638018131256104\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:17:28.867651Z [\u001b[32minfo ] Start Predict dataset=10588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:19:02.263938Z [\u001b[32minfo ] Starting training dataset=4512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:20:11.585626Z [\u001b[32minfo ] Training complete train_loss=0.047023482620716095\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:20:11.587573Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:20:19.881492Z [\u001b[32minfo ] Evaluation complete test_loss=0.49579280614852905\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:20:19.885845Z [\u001b[32minfo ] Start Predict dataset=10488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:21:54.652841Z [\u001b[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:23:06.479348Z [\u001b[32minfo ] Training complete train_loss=0.06473588943481445\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:23:06.480989Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:23:14.966838Z [\u001b[32minfo ] Evaluation complete test_loss=0.6052875518798828\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:23:14.970796Z [\u001b[32minfo ] Start Predict dataset=10388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:24:47.734368Z [\u001b[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:25:59.879686Z [\u001b[32minfo ] Training complete train_loss=0.04754060506820679\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:25:59.881753Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:26:08.370846Z [\u001b[32minfo ] Evaluation complete test_loss=0.6196796298027039\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:26:08.374766Z [\u001b[32minfo ] Start Predict dataset=10288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:27:38.912284Z [\u001b[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:28:56.790148Z [\u001b[32minfo ] Training complete train_loss=0.13738520443439484\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:28:56.791933Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:29:05.372354Z [\u001b[32minfo ] Evaluation complete test_loss=0.417594313621521\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:29:05.376664Z [\u001b[32minfo ] Start Predict dataset=10188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:30:37.433346Z [\u001b[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:31:52.394380Z [\u001b[32minfo ] Training complete train_loss=0.08568105101585388\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:31:52.396245Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:32:00.787110Z [\u001b[32minfo ] Evaluation complete test_loss=0.48676493763923645\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:32:00.791032Z [\u001b[32minfo ] Start Predict dataset=10088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:33:28.340993Z [\u001b[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:34:43.667946Z [\u001b[32minfo ] Training complete train_loss=0.036210086196660995\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:34:43.670010Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:34:52.185600Z [\u001b[32minfo ] Evaluation complete test_loss=0.6417835354804993\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:34:52.189976Z [\u001b[32minfo ] Start Predict dataset=9988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:36:20.994496Z [\u001b[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:37:36.585937Z [\u001b[32minfo ] Training complete train_loss=0.055751074105501175\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:37:36.587822Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:37:44.981738Z [\u001b[32minfo ] Evaluation complete test_loss=0.5641336441040039\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:37:44.986213Z [\u001b[32minfo ] Start Predict dataset=9888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:39:15.946557Z [\u001b[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:40:33.074149Z [\u001b[32minfo ] Training complete train_loss=0.04473729059100151\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:40:33.075860Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:40:41.380948Z [\u001b[32minfo ] Evaluation complete test_loss=0.5987882614135742\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:40:41.384711Z [\u001b[32minfo ] Start Predict dataset=9788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:42:07.956223Z [\u001b[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:43:26.188865Z [\u001b[32minfo ] Training complete train_loss=0.04242468625307083\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:43:26.190898Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:43:34.776365Z [\u001b[32minfo ] Evaluation complete test_loss=0.573499858379364\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:43:34.780809Z [\u001b[32minfo ] Start Predict dataset=9688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:45:02.444034Z [\u001b[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:46:22.986454Z [\u001b[32minfo ] Training complete train_loss=0.05522795766592026\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:46:22.988322Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:46:31.871482Z [\u001b[32minfo ] Evaluation complete test_loss=0.5418797731399536\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:46:31.876347Z [\u001b[32minfo ] Start Predict dataset=9588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:47:57.958922Z [\u001b[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:49:19.288858Z [\u001b[32minfo ] Training complete train_loss=0.05585930123925209\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:49:19.290700Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:49:27.887083Z [\u001b[32minfo ] Evaluation complete test_loss=0.5781568884849548\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:49:27.891558Z [\u001b[32minfo ] Start Predict dataset=9488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:50:51.759392Z [\u001b[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:52:10.167108Z [\u001b[32minfo ] Training complete train_loss=0.08212323486804962\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:52:10.168639Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:52:18.489575Z [\u001b[32minfo ] Evaluation complete test_loss=0.5190374255180359\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:52:18.568302Z [\u001b[32minfo ] Start Predict dataset=9388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:53:42.390100Z [\u001b[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:55:06.677886Z [\u001b[32minfo ] Training complete train_loss=0.08772403001785278\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:55:06.679728Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:55:15.179280Z [\u001b[32minfo ] Evaluation complete test_loss=0.4441128075122833\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:55:15.183333Z [\u001b[32minfo ] Start Predict dataset=9288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:56:40.256626Z [\u001b[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T17:58:00.871617Z [\u001b[32minfo ] Training complete train_loss=0.05669952929019928\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T17:58:00.873122Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T17:58:09.284316Z [\u001b[32minfo ] Evaluation complete test_loss=0.5060445666313171\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T17:58:09.288729Z [\u001b[32minfo ] Start Predict dataset=9188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T17:59:28.919979Z [\u001b[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:00:51.078877Z [\u001b[32minfo ] Training complete train_loss=0.07387827336788177\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:00:51.080834Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:00:59.669384Z [\u001b[32minfo ] Evaluation complete test_loss=0.5086865425109863\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:00:59.674395Z [\u001b[32minfo ] Start Predict dataset=9088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:02:19.394067Z [\u001b[32minfo ] Starting training dataset=6012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:03:42.782050Z [\u001b[32minfo ] Training complete train_loss=0.08140388131141663\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:03:42.783808Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:03:51.468837Z [\u001b[32minfo ] Evaluation complete test_loss=0.432904988527298\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:03:51.473174Z [\u001b[32minfo ] Start Predict dataset=8988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:05:10.688109Z [\u001b[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:06:36.281515Z [\u001b[32minfo ] Training complete train_loss=0.04091908782720566\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:06:36.283262Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:06:44.686541Z [\u001b[32minfo ] Evaluation complete test_loss=0.5736671686172485\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:06:44.690767Z [\u001b[32minfo ] Start Predict dataset=8888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:08:05.198629Z [\u001b[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:09:30.569787Z [\u001b[32minfo ] Training complete train_loss=0.06559653580188751\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:09:30.571482Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:09:38.668353Z [\u001b[32minfo ] Evaluation complete test_loss=0.5123801827430725\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:09:38.672007Z [\u001b[32minfo ] Start Predict dataset=8788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:10:56.504718Z [\u001b[32minfo ] Starting training dataset=6312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:12:24.391821Z [\u001b[32minfo ] Training complete train_loss=0.07125910371541977\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:12:24.393844Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:12:32.869289Z [\u001b[32minfo ] Evaluation complete test_loss=0.4685639441013336\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:12:32.873654Z [\u001b[32minfo ] Start Predict dataset=8688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:13:52.161712Z [\u001b[32minfo ] Starting training dataset=6412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:15:19.480747Z [\u001b[32minfo ] Training complete train_loss=0.08134432137012482\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:15:19.482431Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:15:27.767816Z [\u001b[32minfo ] Evaluation complete test_loss=0.4484506845474243\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:15:27.771722Z [\u001b[32minfo ] Start Predict dataset=8588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:16:45.635661Z [\u001b[32minfo ] Starting training dataset=6512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:18:13.391296Z [\u001b[32minfo ] Training complete train_loss=0.07426474988460541\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:18:13.392929Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:18:22.003691Z [\u001b[32minfo ] Evaluation complete test_loss=0.5092469453811646\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:18:22.007843Z [\u001b[32minfo ] Start Predict dataset=8488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:19:37.445279Z [\u001b[32minfo ] Starting training dataset=6612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:21:05.781014Z [\u001b[32minfo ] Training complete train_loss=0.05355135723948479\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:21:05.782790Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:21:14.180182Z [\u001b[32minfo ] Evaluation complete test_loss=0.504236102104187\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:21:14.184058Z [\u001b[32minfo ] Start Predict dataset=8388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:22:27.520906Z [\u001b[32minfo ] Starting training dataset=6712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:24:01.288497Z [\u001b[32minfo ] Training complete train_loss=0.06972303986549377\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:24:01.290234Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:24:09.581470Z [\u001b[32minfo ] Evaluation complete test_loss=0.505696177482605\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:24:09.585144Z [\u001b[32minfo ] Start Predict dataset=8288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:25:23.082250Z [\u001b[32minfo ] Starting training dataset=6812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:26:51.691389Z [\u001b[32minfo ] Training complete train_loss=0.08558610081672668\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:26:51.693177Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:27:00.171906Z [\u001b[32minfo ] Evaluation complete test_loss=0.4490826725959778\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:27:00.175724Z [\u001b[32minfo ] Start Predict dataset=8188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:28:12.777510Z [\u001b[32minfo ] Starting training dataset=6912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:29:44.587334Z [\u001b[32minfo ] Training complete train_loss=0.027112364768981934\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:29:44.589016Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:29:52.967716Z [\u001b[32minfo ] Evaluation complete test_loss=0.6299618482589722\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:29:52.971769Z [\u001b[32minfo ] Start Predict dataset=8088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:31:03.915833Z [\u001b[32minfo ] Starting training dataset=7012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:32:36.978128Z [\u001b[32minfo ] Training complete train_loss=0.05840374156832695\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:32:36.979875Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:32:45.379434Z [\u001b[32minfo ] Evaluation complete test_loss=0.49078628420829773\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:32:45.384069Z [\u001b[32minfo ] Start Predict dataset=7988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:33:57.892025Z [\u001b[32minfo ] Starting training dataset=7112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:35:31.374373Z [\u001b[32minfo ] Training complete train_loss=0.07398192584514618\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:35:31.375994Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:35:39.775096Z [\u001b[32minfo ] Evaluation complete test_loss=0.3967265784740448\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:35:39.779247Z [\u001b[32minfo ] Start Predict dataset=7888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:36:49.517415Z [\u001b[32minfo ] Starting training dataset=7212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:38:24.174877Z [\u001b[32minfo ] Training complete train_loss=0.04612201824784279\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:38:24.177022Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:38:32.379501Z [\u001b[32minfo ] Evaluation complete test_loss=0.49809715151786804\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:38:32.382998Z [\u001b[32minfo ] Start Predict dataset=7788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:39:41.917608Z [\u001b[32minfo ] Starting training dataset=7312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:41:17.174369Z [\u001b[32minfo ] Training complete train_loss=0.06543444097042084\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:41:17.176491Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:41:25.471666Z [\u001b[32minfo ] Evaluation complete test_loss=0.5225977897644043\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:41:25.475692Z [\u001b[32minfo ] Start Predict dataset=7688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:42:33.522329Z [\u001b[32minfo ] Starting training dataset=7412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:44:09.293539Z [\u001b[32minfo ] Training complete train_loss=0.060686737298965454\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:44:09.366949Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:44:17.887026Z [\u001b[32minfo ] Evaluation complete test_loss=0.5125385522842407\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:44:17.890832Z [\u001b[32minfo ] Start Predict dataset=7588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:45:25.400968Z [\u001b[32minfo ] Starting training dataset=7512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:47:02.366684Z [\u001b[32minfo ] Training complete train_loss=0.07019717246294022\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:47:02.368580Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:47:10.878960Z [\u001b[32minfo ] Evaluation complete test_loss=0.470739483833313\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:47:10.882850Z [\u001b[32minfo ] Start Predict dataset=7488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:48:16.235930Z [\u001b[32minfo ] Starting training dataset=7612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:49:53.493500Z [\u001b[32minfo ] Training complete train_loss=0.04707063362002373\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:49:53.495700Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:50:01.788730Z [\u001b[32minfo ] Evaluation complete test_loss=0.5984643697738647\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:50:01.866775Z [\u001b[32minfo ] Start Predict dataset=7388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:51:07.901960Z [\u001b[32minfo ] Starting training dataset=7712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:52:46.877393Z [\u001b[32minfo ] Training complete train_loss=0.06192445009946823\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:52:46.879872Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:52:55.176958Z [\u001b[32minfo ] Evaluation complete test_loss=0.5132700204849243\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:52:55.181403Z [\u001b[32minfo ] Start Predict dataset=7288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:54:00.257566Z [\u001b[32minfo ] Starting training dataset=7812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:55:43.588375Z [\u001b[32minfo ] Training complete train_loss=0.03979147970676422\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:55:43.590052Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:55:51.875854Z [\u001b[32minfo ] Evaluation complete test_loss=0.5150150656700134\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:55:51.880159Z [\u001b[32minfo ] Start Predict dataset=7188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:56:56.536715Z [\u001b[32minfo ] Starting training dataset=7912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T18:58:41.879868Z [\u001b[32minfo ] Training complete train_loss=0.06223804131150246\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T18:58:41.881963Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T18:58:50.468474Z [\u001b[32minfo ] Evaluation complete test_loss=0.5643148422241211\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T18:58:50.472745Z [\u001b[32minfo ] Start Predict dataset=7088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T18:59:54.295632Z [\u001b[32minfo ] Starting training dataset=8012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:01:35.476312Z [\u001b[32minfo ] Training complete train_loss=0.06737291067838669\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:01:35.478476Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:01:43.780950Z [\u001b[32minfo ] Evaluation complete test_loss=0.48747894167900085\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:01:43.785449Z [\u001b[32minfo ] Start Predict dataset=6988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:02:46.131753Z [\u001b[32minfo ] Starting training dataset=8112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:04:29.880063Z [\u001b[32minfo ] Training complete train_loss=0.05925571545958519\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:04:29.881840Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:04:38.267654Z [\u001b[32minfo ] Evaluation complete test_loss=0.564688503742218\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:04:38.271766Z [\u001b[32minfo ] Start Predict dataset=6888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:05:39.800913Z [\u001b[32minfo ] Starting training dataset=8212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:07:23.486186Z [\u001b[32minfo ] Training complete train_loss=0.040423326194286346\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:07:23.487988Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:07:31.880357Z [\u001b[32minfo ] Evaluation complete test_loss=0.5314301252365112\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:07:31.884041Z [\u001b[32minfo ] Start Predict dataset=6788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:08:32.589051Z [\u001b[32minfo ] Starting training dataset=8312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:10:17.365494Z [\u001b[32minfo ] Training complete train_loss=0.06140904501080513\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:10:17.367863Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:10:25.972279Z [\u001b[32minfo ] Evaluation complete test_loss=0.49688518047332764\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:10:25.976035Z [\u001b[32minfo ] Start Predict dataset=6688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:11:25.835585Z [\u001b[32minfo ] Starting training dataset=8412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:13:10.774985Z [\u001b[32minfo ] Training complete train_loss=0.050612445920705795\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:13:10.776734Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:13:19.167663Z [\u001b[32minfo ] Evaluation complete test_loss=0.5152626037597656\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:13:19.171962Z [\u001b[32minfo ] Start Predict dataset=6588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:14:18.243027Z [\u001b[32minfo ] Starting training dataset=8512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:16:04.791515Z [\u001b[32minfo ] Training complete train_loss=0.033627718687057495\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:16:04.793285Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:16:13.288919Z [\u001b[32minfo ] Evaluation complete test_loss=0.4974973201751709\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:16:13.292998Z [\u001b[32minfo ] Start Predict dataset=6488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:17:11.209628Z [\u001b[32minfo ] Starting training dataset=8612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:18:58.093512Z [\u001b[32minfo ] Training complete train_loss=0.04596696048974991\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:18:58.095249Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:19:06.484257Z [\u001b[32minfo ] Evaluation complete test_loss=0.6129364967346191\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:19:06.488960Z [\u001b[32minfo ] Start Predict dataset=6388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:20:04.209439Z [\u001b[32minfo ] Starting training dataset=8712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:21:55.388171Z [\u001b[32minfo ] Training complete train_loss=0.049339789897203445\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:21:55.390263Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:22:04.078244Z [\u001b[32minfo ] Evaluation complete test_loss=0.5226636528968811\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:22:04.082582Z [\u001b[32minfo ] Start Predict dataset=6288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:23:01.440119Z [\u001b[32minfo ] Starting training dataset=8812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:24:53.994417Z [\u001b[32minfo ] Training complete train_loss=0.061960719525814056\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:24:53.996345Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:25:02.372585Z [\u001b[32minfo ] Evaluation complete test_loss=0.5233380198478699\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:25:02.377398Z [\u001b[32minfo ] Start Predict dataset=6188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:25:58.792316Z [\u001b[32minfo ] Starting training dataset=8912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:27:51.591211Z [\u001b[32minfo ] Training complete train_loss=0.04009644314646721\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:27:51.593404Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:28:00.069251Z [\u001b[32minfo ] Evaluation complete test_loss=0.5073189735412598\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:28:00.073255Z [\u001b[32minfo ] Start Predict dataset=6088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:28:55.614687Z [\u001b[32minfo ] Starting training dataset=9012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:30:48.691304Z [\u001b[32minfo ] Training complete train_loss=0.02636721171438694\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:30:48.693153Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:30:57.194178Z [\u001b[32minfo ] Evaluation complete test_loss=0.4932842552661896\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:30:57.198696Z [\u001b[32minfo ] Start Predict dataset=5988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:31:52.014806Z [\u001b[32minfo ] Starting training dataset=9112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:33:50.286149Z [\u001b[32minfo ] Training complete train_loss=0.038404107093811035\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:33:50.288005Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:33:58.867647Z [\u001b[32minfo ] Evaluation complete test_loss=0.4707241952419281\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:33:58.871875Z [\u001b[32minfo ] Start Predict dataset=5888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:34:52.597191Z [\u001b[32minfo ] Starting training dataset=9212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:36:52.979111Z [\u001b[32minfo ] Training complete train_loss=0.03221401944756508\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:36:52.981332Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:37:01.382679Z [\u001b[32minfo ] Evaluation complete test_loss=0.6017580628395081\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:37:01.386797Z [\u001b[32minfo ] Start Predict dataset=5788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:37:57.489992Z [\u001b[32minfo ] Starting training dataset=9312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:39:55.967846Z [\u001b[32minfo ] Training complete train_loss=0.04899067059159279\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:39:55.969679Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:40:04.272984Z [\u001b[32minfo ] Evaluation complete test_loss=0.4522852897644043\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:40:04.278449Z [\u001b[32minfo ] Start Predict dataset=5688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:40:57.436623Z [\u001b[32minfo ] Starting training dataset=9412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:42:55.093364Z [\u001b[32minfo ] Training complete train_loss=0.0554390586912632\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:42:55.095567Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:43:03.678599Z [\u001b[32minfo ] Evaluation complete test_loss=0.5251901149749756\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:43:03.682851Z [\u001b[32minfo ] Start Predict dataset=5588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:43:54.653131Z [\u001b[32minfo ] Starting training dataset=9512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:45:51.790112Z [\u001b[32minfo ] Training complete train_loss=0.04198170453310013\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:45:51.791730Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:46:00.284703Z [\u001b[32minfo ] Evaluation complete test_loss=0.4602336585521698\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:46:00.289764Z [\u001b[32minfo ] Start Predict dataset=5488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:46:50.358892Z [\u001b[32minfo ] Starting training dataset=9612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:48:46.692007Z [\u001b[32minfo ] Training complete train_loss=0.044020820409059525\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:48:46.693602Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:48:55.168600Z [\u001b[32minfo ] Evaluation complete test_loss=0.508590579032898\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:48:55.172559Z [\u001b[32minfo ] Start Predict dataset=5388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:49:43.776890Z [\u001b[32minfo ] Starting training dataset=9712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:51:42.176822Z [\u001b[32minfo ] Training complete train_loss=0.044634196907281876\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:51:42.179048Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:51:50.672765Z [\u001b[32minfo ] Evaluation complete test_loss=0.45551300048828125\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:51:50.676552Z [\u001b[32minfo ] Start Predict dataset=5288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:52:38.484667Z [\u001b[32minfo ] Starting training dataset=9812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:54:36.177208Z [\u001b[32minfo ] Training complete train_loss=0.04296591877937317\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:54:36.178931Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:54:44.583521Z [\u001b[32minfo ] Evaluation complete test_loss=0.47437164187431335\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:54:44.587728Z [\u001b[32minfo ] Start Predict dataset=5188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:55:31.707641Z [\u001b[32minfo ] Starting training dataset=9912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T19:57:30.279767Z [\u001b[32minfo ] Training complete train_loss=0.03641924262046814\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T19:57:30.281499Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T19:57:38.466727Z [\u001b[32minfo ] Evaluation complete test_loss=0.5371329188346863\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T19:57:38.470612Z [\u001b[32minfo ] Start Predict dataset=5088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T19:58:24.271875Z [\u001b[32minfo ] Starting training dataset=10012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:00:25.895237Z [\u001b[32minfo ] Training complete train_loss=0.03702413663268089\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:00:25.897511Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:00:34.571676Z [\u001b[32minfo ] Evaluation complete test_loss=0.5401384234428406\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:00:34.576397Z [\u001b[32minfo ] Start Predict dataset=4988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:01:22.740200Z [\u001b[32minfo ] Starting training dataset=10112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:03:27.695031Z [\u001b[32minfo ] Training complete train_loss=0.05812685936689377\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:03:27.698130Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:03:36.176022Z [\u001b[32minfo ] Evaluation complete test_loss=0.4845340847969055\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:03:36.179729Z [\u001b[32minfo ] Start Predict dataset=4888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:04:22.320485Z [\u001b[32minfo ] Starting training dataset=10212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:06:27.779220Z [\u001b[32minfo ] Training complete train_loss=0.0416891947388649\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:06:27.781323Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:06:36.067846Z [\u001b[32minfo ] Evaluation complete test_loss=0.46326562762260437\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:06:36.072402Z [\u001b[32minfo ] Start Predict dataset=4788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:07:22.045382Z [\u001b[32minfo ] Starting training dataset=10312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:09:30.292345Z [\u001b[32minfo ] Training complete train_loss=0.041010648012161255\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:09:30.294497Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:09:38.871867Z [\u001b[32minfo ] Evaluation complete test_loss=0.49109208583831787\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:09:38.876577Z [\u001b[32minfo ] Start Predict dataset=4688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:10:24.154535Z [\u001b[32minfo ] Starting training dataset=10412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:12:33.415576Z [\u001b[32minfo ] Training complete train_loss=0.028930526226758957\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:12:33.417436Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:12:42.284002Z [\u001b[32minfo ] Evaluation complete test_loss=0.5107240676879883\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:12:42.288185Z [\u001b[32minfo ] Start Predict dataset=4588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:13:26.235085Z [\u001b[32minfo ] Starting training dataset=10512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:15:35.385270Z [\u001b[32minfo ] Training complete train_loss=0.031576935201883316\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:15:35.387155Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:15:43.779605Z [\u001b[32minfo ] Evaluation complete test_loss=0.6074793338775635\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:15:43.783983Z [\u001b[32minfo ] Start Predict dataset=4488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:16:27.250669Z [\u001b[32minfo ] Starting training dataset=10612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:18:38.077150Z [\u001b[32minfo ] Training complete train_loss=0.025566451251506805\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:18:38.079132Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:18:46.374243Z [\u001b[32minfo ] Evaluation complete test_loss=0.6829275488853455\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:18:46.378420Z [\u001b[32minfo ] Start Predict dataset=4388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:19:29.033297Z [\u001b[32minfo ] Starting training dataset=10712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:21:40.186438Z [\u001b[32minfo ] Training complete train_loss=0.03265538439154625\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:21:40.188513Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:21:48.585218Z [\u001b[32minfo ] Evaluation complete test_loss=0.48324117064476013\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:21:48.589070Z [\u001b[32minfo ] Start Predict dataset=4288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:22:29.946839Z [\u001b[32minfo ] Starting training dataset=10812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:24:41.668771Z [\u001b[32minfo ] Training complete train_loss=0.04195354878902435\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:24:41.670907Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:24:49.971122Z [\u001b[32minfo ] Evaluation complete test_loss=0.5226069688796997\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:24:49.975409Z [\u001b[32minfo ] Start Predict dataset=4188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:25:30.746312Z [\u001b[32minfo ] Starting training dataset=10912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:27:43.090670Z [\u001b[32minfo ] Training complete train_loss=0.03698594123125076\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:27:43.092609Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:27:51.571422Z [\u001b[32minfo ] Evaluation complete test_loss=0.5863118767738342\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:27:51.576126Z [\u001b[32minfo ] Start Predict dataset=4088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:28:31.534919Z [\u001b[32minfo ] Starting training dataset=11012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:30:44.186803Z [\u001b[32minfo ] Training complete train_loss=0.04117933288216591\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:30:44.188896Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:30:52.585989Z [\u001b[32minfo ] Evaluation complete test_loss=0.4863777160644531\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:30:52.589908Z [\u001b[32minfo ] Start Predict dataset=3988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:31:31.716610Z [\u001b[32minfo ] Starting training dataset=11112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:33:46.383277Z [\u001b[32minfo ] Training complete train_loss=0.04627871885895729\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:33:46.385149Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:33:54.779086Z [\u001b[32minfo ] Evaluation complete test_loss=0.470214307308197\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:33:54.783126Z [\u001b[32minfo ] Start Predict dataset=3888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:34:33.190703Z [\u001b[32minfo ] Starting training dataset=11212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:36:48.681686Z [\u001b[32minfo ] Training complete train_loss=0.04613247141242027\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:36:48.683987Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:36:57.278346Z [\u001b[32minfo ] Evaluation complete test_loss=0.4558008015155792\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:36:57.282786Z [\u001b[32minfo ] Start Predict dataset=3788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:37:34.291065Z [\u001b[32minfo ] Starting training dataset=11312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:39:51.289119Z [\u001b[32minfo ] Training complete train_loss=0.02740650437772274\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:39:51.291195Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:39:59.882257Z [\u001b[32minfo ] Evaluation complete test_loss=0.5408679246902466\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:39:59.887662Z [\u001b[32minfo ] Start Predict dataset=3688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:40:35.740146Z [\u001b[32minfo ] Starting training dataset=11412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:42:52.090300Z [\u001b[32minfo ] Training complete train_loss=0.0475490465760231\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:42:52.092186Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:43:00.380313Z [\u001b[32minfo ] Evaluation complete test_loss=0.48505839705467224\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:43:00.384850Z [\u001b[32minfo ] Start Predict dataset=3588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:43:35.409152Z [\u001b[32minfo ] Starting training dataset=11512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:45:53.678636Z [\u001b[32minfo ] Training complete train_loss=0.029358908534049988\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:45:53.680683Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:46:01.986445Z [\u001b[32minfo ] Evaluation complete test_loss=0.5404146313667297\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:46:01.990899Z [\u001b[32minfo ] Start Predict dataset=3488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:46:36.192044Z [\u001b[32minfo ] Starting training dataset=11612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:48:54.491902Z [\u001b[32minfo ] Training complete train_loss=0.03432118520140648\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:48:54.494055Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:49:03.187720Z [\u001b[32minfo ] Evaluation complete test_loss=0.5160902142524719\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:49:03.192422Z [\u001b[32minfo ] Start Predict dataset=3388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:49:36.627034Z [\u001b[32minfo ] Starting training dataset=11712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:51:53.684736Z [\u001b[32minfo ] Training complete train_loss=0.025415150448679924\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:51:53.687300Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:52:02.180711Z [\u001b[32minfo ] Evaluation complete test_loss=0.4870961308479309\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:52:02.185758Z [\u001b[32minfo ] Start Predict dataset=3288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:52:34.039245Z [\u001b[32minfo ] Starting training dataset=11812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:54:52.384529Z [\u001b[32minfo ] Training complete train_loss=0.04198145121335983\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:54:52.386512Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:55:00.588475Z [\u001b[32minfo ] Evaluation complete test_loss=0.4633568823337555\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:55:00.593256Z [\u001b[32minfo ] Start Predict dataset=3188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:55:32.526412Z [\u001b[32minfo ] Starting training dataset=11912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T20:57:52.280876Z [\u001b[32minfo ] Training complete train_loss=0.027177348732948303\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T20:57:52.282884Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T20:58:00.669065Z [\u001b[32minfo ] Evaluation complete test_loss=0.5218877792358398\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T20:58:00.673801Z [\u001b[32minfo ] Start Predict dataset=3088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T20:58:31.200998Z [\u001b[32minfo ] Starting training dataset=12012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:00:51.678552Z [\u001b[32minfo ] Training complete train_loss=0.03196889907121658\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:00:51.680520Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:00:59.969555Z [\u001b[32minfo ] Evaluation complete test_loss=0.4975127875804901\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:00:59.974549Z [\u001b[32minfo ] Start Predict dataset=2988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:01:29.609903Z [\u001b[32minfo ] Starting training dataset=12112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:03:52.395673Z [\u001b[32minfo ] Training complete train_loss=0.029373254626989365\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:03:52.397885Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:04:00.787526Z [\u001b[32minfo ] Evaluation complete test_loss=0.48535236716270447\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:04:00.791903Z [\u001b[32minfo ] Start Predict dataset=2888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:04:29.892682Z [\u001b[32minfo ] Starting training dataset=12212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:06:53.894096Z [\u001b[32minfo ] Training complete train_loss=0.029735658317804337\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:06:53.895831Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:07:02.080102Z [\u001b[32minfo ] Evaluation complete test_loss=0.5647792220115662\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:07:02.084360Z [\u001b[32minfo ] Start Predict dataset=2788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:07:30.166622Z [\u001b[32minfo ] Starting training dataset=12312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:09:55.179011Z [\u001b[32minfo ] Training complete train_loss=0.0220402330160141\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:09:55.180688Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:10:03.477053Z [\u001b[32minfo ] Evaluation complete test_loss=0.5359561443328857\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:10:03.481581Z [\u001b[32minfo ] Start Predict dataset=2688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:10:30.528810Z [\u001b[32minfo ] Starting training dataset=12412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:12:54.091861Z [\u001b[32minfo ] Training complete train_loss=0.03964528813958168\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:12:54.094102Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:13:02.369356Z [\u001b[32minfo ] Evaluation complete test_loss=0.48836764693260193\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:13:02.374057Z [\u001b[32minfo ] Start Predict dataset=2588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:13:27.496414Z [\u001b[32minfo ] Starting training dataset=12512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:15:48.582187Z [\u001b[32minfo ] Training complete train_loss=0.04704689979553223\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:15:48.584036Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:15:56.668202Z [\u001b[32minfo ] Evaluation complete test_loss=0.44271594285964966\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:15:56.672079Z [\u001b[32minfo ] Start Predict dataset=2488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:16:21.327724Z [\u001b[32minfo ] Starting training dataset=12612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:18:50.083526Z [\u001b[32minfo ] Training complete train_loss=0.0628628060221672\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:18:50.085875Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:18:58.770438Z [\u001b[32minfo ] Evaluation complete test_loss=0.503575325012207\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:18:58.775111Z [\u001b[32minfo ] Start Predict dataset=2388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:19:22.821996Z [\u001b[32minfo ] Starting training dataset=12712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:21:51.483589Z [\u001b[32minfo ] Training complete train_loss=0.02907649055123329\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:21:51.485452Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:21:59.875917Z [\u001b[32minfo ] Evaluation complete test_loss=0.5075518488883972\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:21:59.879896Z [\u001b[32minfo ] Start Predict dataset=2288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:22:23.219614Z [\u001b[32minfo ] Starting training dataset=12812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:24:51.385507Z [\u001b[32minfo ] Training complete train_loss=0.030792735517024994\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:24:51.387626Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:24:59.785684Z [\u001b[32minfo ] Evaluation complete test_loss=0.44357359409332275\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:24:59.790069Z [\u001b[32minfo ] Start Predict dataset=2188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:25:22.213913Z [\u001b[32minfo ] Starting training dataset=12912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:27:50.994998Z [\u001b[32minfo ] Training complete train_loss=0.03515024855732918\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:27:50.996822Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:27:59.080001Z [\u001b[32minfo ] Evaluation complete test_loss=0.45534369349479675\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:27:59.084982Z [\u001b[32minfo ] Start Predict dataset=2088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:28:19.309633Z [\u001b[32minfo ] Starting training dataset=13012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:30:47.379066Z [\u001b[32minfo ] Training complete train_loss=0.028724441304802895\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:30:47.381229Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:30:55.668478Z [\u001b[32minfo ] Evaluation complete test_loss=0.5947825908660889\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:30:55.672879Z [\u001b[32minfo ] Start Predict dataset=1988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:31:16.513428Z [\u001b[32minfo ] Starting training dataset=13112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:33:46.087868Z [\u001b[32minfo ] Training complete train_loss=0.03632659092545509\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:33:46.090190Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:33:54.470903Z [\u001b[32minfo ] Evaluation complete test_loss=0.3916391432285309\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:33:54.475662Z [\u001b[32minfo ] Start Predict dataset=1888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:34:13.817268Z [\u001b[32minfo ] Starting training dataset=13212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:36:44.087828Z [\u001b[32minfo ] Training complete train_loss=0.0293908528983593\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:36:44.089967Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:36:52.285403Z [\u001b[32minfo ] Evaluation complete test_loss=0.553371787071228\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:36:52.289324Z [\u001b[32minfo ] Start Predict dataset=1788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:37:10.997355Z [\u001b[32minfo ] Starting training dataset=13312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:39:42.870680Z [\u001b[32minfo ] Training complete train_loss=0.024943392723798752\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:39:42.872809Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:39:51.376196Z [\u001b[32minfo ] Evaluation complete test_loss=0.5904661417007446\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:39:51.380310Z [\u001b[32minfo ] Start Predict dataset=1688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:40:09.211327Z [\u001b[32minfo ] Starting training dataset=13412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:42:40.875515Z [\u001b[32minfo ] Training complete train_loss=0.03411614149808884\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:42:40.877663Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:42:49.181025Z [\u001b[32minfo ] Evaluation complete test_loss=0.5226052403450012\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:42:49.185080Z [\u001b[32minfo ] Start Predict dataset=1588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:43:06.385285Z [\u001b[32minfo ] Starting training dataset=13512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:45:39.896472Z [\u001b[32minfo ] Training complete train_loss=0.02790781483054161\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:45:39.898469Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:45:48.768464Z [\u001b[32minfo ] Evaluation complete test_loss=0.48294728994369507\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:45:48.773007Z [\u001b[32minfo ] Start Predict dataset=1488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:46:05.101706Z [\u001b[32minfo ] Starting training dataset=13612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:48:38.587002Z [\u001b[32minfo ] Training complete train_loss=0.021476466208696365\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:48:38.589038Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:48:46.967470Z [\u001b[32minfo ] Evaluation complete test_loss=0.6516388654708862\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:48:46.971723Z [\u001b[32minfo ] Start Predict dataset=1388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:49:02.400625Z [\u001b[32minfo ] Starting training dataset=13712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:51:38.993724Z [\u001b[32minfo ] Training complete train_loss=0.02276446670293808\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:51:38.996209Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:51:47.371012Z [\u001b[32minfo ] Evaluation complete test_loss=0.5613048076629639\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:51:47.375167Z [\u001b[32minfo ] Start Predict dataset=1288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:52:01.682782Z [\u001b[32minfo ] Starting training dataset=13812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:54:41.984078Z [\u001b[32minfo ] Training complete train_loss=0.028370166197419167\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:54:41.985861Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:54:50.184247Z [\u001b[32minfo ] Evaluation complete test_loss=0.4826107919216156\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:54:50.188377Z [\u001b[32minfo ] Start Predict dataset=1188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:55:03.498739Z [\u001b[32minfo ] Starting training dataset=13912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T21:57:39.587512Z [\u001b[32minfo ] Training complete train_loss=0.022563118487596512\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T21:57:39.589138Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T21:57:48.077240Z [\u001b[32minfo ] Evaluation complete test_loss=0.5660622715950012\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T21:57:48.080857Z [\u001b[32minfo ] Start Predict dataset=1088\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T21:58:00.385493Z [\u001b[32minfo ] Starting training dataset=14012 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:00:36.183661Z [\u001b[32minfo ] Training complete train_loss=0.019728390499949455\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:00:36.185580Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:00:44.479725Z [\u001b[32minfo ] Evaluation complete test_loss=0.5509332418441772\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:00:44.484148Z [\u001b[32minfo ] Start Predict dataset=988\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:00:56.003275Z [\u001b[32minfo ] Starting training dataset=14112 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:03:32.995503Z [\u001b[32minfo ] Training complete train_loss=0.03207049146294594\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:03:32.997422Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:03:41.378439Z [\u001b[32minfo ] Evaluation complete test_loss=0.5133938193321228\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:03:41.382642Z [\u001b[32minfo ] Start Predict dataset=888\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:03:52.104510Z [\u001b[32minfo ] Starting training dataset=14212 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:06:29.792679Z [\u001b[32minfo ] Training complete train_loss=0.037114620208740234\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:06:29.794621Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:06:38.077351Z [\u001b[32minfo ] Evaluation complete test_loss=0.5605881214141846\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:06:38.082232Z [\u001b[32minfo ] Start Predict dataset=788\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:06:48.003399Z [\u001b[32minfo ] Starting training dataset=14312 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:09:27.080457Z [\u001b[32minfo ] Training complete train_loss=0.02423758991062641\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:09:27.082417Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:09:35.575955Z [\u001b[32minfo ] Evaluation complete test_loss=0.4343477487564087\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:09:35.580152Z [\u001b[32minfo ] Start Predict dataset=688\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:09:44.492943Z [\u001b[32minfo ] Starting training dataset=14412 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:12:23.871556Z [\u001b[32minfo ] Training complete train_loss=0.025025462731719017\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:12:23.873606Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:12:32.375955Z [\u001b[32minfo ] Evaluation complete test_loss=0.49787238240242004\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:12:32.380195Z [\u001b[32minfo ] Start Predict dataset=588\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:12:40.275824Z [\u001b[32minfo ] Starting training dataset=14512 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:15:20.281568Z [\u001b[32minfo ] Training complete train_loss=0.025414831936359406\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:15:20.283811Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:15:28.687757Z [\u001b[32minfo ] Evaluation complete test_loss=0.5783530473709106\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:15:28.691740Z [\u001b[32minfo ] Start Predict dataset=488\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:15:35.699876Z [\u001b[32minfo ] Starting training dataset=14612 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:18:15.677656Z [\u001b[32minfo ] Training complete train_loss=0.03256714344024658\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:18:15.679679Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:18:23.788199Z [\u001b[32minfo ] Evaluation complete test_loss=0.4111860692501068\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:18:23.792636Z [\u001b[32minfo ] Start Predict dataset=388\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:18:29.996618Z [\u001b[32minfo ] Starting training dataset=14712 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:21:10.987781Z [\u001b[32minfo ] Training complete train_loss=0.031116826459765434\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:21:10.989619Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:21:19.588062Z [\u001b[32minfo ] Evaluation complete test_loss=0.6319525837898254\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:21:19.592013Z [\u001b[32minfo ] Start Predict dataset=288\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:21:24.766597Z [\u001b[32minfo ] Starting training dataset=14812 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:24:06.881760Z [\u001b[32minfo ] Training complete train_loss=0.029832642525434494\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:24:06.884590Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:24:15.278154Z [\u001b[32minfo ] Evaluation complete test_loss=0.47507792711257935\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:24:15.282500Z [\u001b[32minfo ] Start Predict dataset=188\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:24:19.298270Z [\u001b[32minfo ] Starting training dataset=14912 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:27:02.087124Z [\u001b[32minfo ] Training complete train_loss=0.028897961601614952\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:27:02.089190Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:27:10.469021Z [\u001b[32minfo ] Evaluation complete test_loss=0.4525805115699768\n",
- "[131537-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T22:27:10.473399Z [\u001b[32minfo ] Start Predict dataset=88\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T22:27:13.881440Z [\u001b[32minfo ] Starting training dataset=15000 epoch=10\n",
- "[131537-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T22:29:57.197467Z [\u001b[32minfo ] Training complete train_loss=0.03034781664609909\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T22:29:57.199424Z [\u001b[32minfo ] Starting evaluating dataset=3000\n",
- "[131537-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T22:30:05.574946Z [\u001b[32minfo ] Evaluation complete test_loss=0.45382431149482727\n"
- ]
- }
- ],
+ "execution_count": null,
+ "outputs": [],
"source": [
"labelling_progress = active_set._labelled.copy().astype(np.uint16)\n",
"for epoch in tqdm(range(hyperparams.epoch)):\n",
" # Load the initial weights.\n",
" model.load_state_dict(init_weights)\n",
- " \n",
+ "\n",
" # Train the model on the currently labelled dataset.\n",
" _ = model.train_on_dataset(active_set, optimizer=optimizer, batch_size=hyperparams.batch_size,\n",
- " use_cuda=use_cuda, epoch=hyperparams.training_duration)\n",
+ " use_cuda=use_cuda, epoch=hyperparams.training_duration)\n",
"\n",
" # Get test NLL!\n",
" model.test_on_dataset(test_set, hyperparams.batch_size, use_cuda,\n",
@@ -1118,7 +367,7 @@
" # Keep track of progress\n",
" labelling_progress += active_set._labelled.astype(np.uint16)\n",
" if not should_continue:\n",
- " break\n",
+ " break\n",
"\n",
" test_loss = metrics['test_loss'].value\n",
" logs = {\n",
@@ -1126,56 +375,64 @@
" \"epoch\": epoch,\n",
" \"Next Training set size\": len(active_set)\n",
" }"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "markdown",
+ "source": [
+ "We will now save our progress on disk."
+ ],
"metadata": {
+ "collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
- },
- "source": [
- "We will now save our progress on disk."
- ]
+ }
},
{
"cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "odict_keys(['features.0.weight', 'features.0.bias', 'features.2.weight', 'features.2.bias', 'features.5.weight', 'features.5.bias', 'features.7.weight', 'features.7.bias', 'features.10.weight', 'features.10.bias', 'features.12.weight', 'features.12.bias', 'features.14.weight', 'features.14.bias', 'features.17.weight', 'features.17.bias', 'features.19.weight', 'features.19.bias', 'features.21.weight', 'features.21.bias', 'features.24.weight', 'features.24.bias', 'features.26.weight', 'features.26.bias', 'features.28.weight', 'features.28.bias', 'classifier.0.weight', 'classifier.0.bias', 'classifier.3.weight', 'classifier.3.bias', 'classifier.6.weight', 'classifier.6.bias']) dict_keys(['labelled', 'random_state']) [103 89 135 ... 121 15 77]\n"
- ]
- }
- ],
+ "execution_count": null,
+ "outputs": [],
"source": [
"model_weight = model.state_dict()\n",
"dataset = active_set.state_dict()\n",
- "torch.save({'model':model_weight, 'dataset':dataset, 'labelling_progress':labelling_progress},\n",
+ "torch.save({'model': model_weight, 'dataset': dataset, 'labelling_progress': labelling_progress},\n",
" 'checkpoint.pth')\n",
"print(model.state_dict().keys(), dataset.keys(), labelling_progress)"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "markdown",
- "metadata": {},
"source": [
"## Visualization\n",
"\n",
"Now that our active learning experiment is completed, we can visualize it!\n",
"\n",
"## Get t-SNE features.\n",
- "We will use MultiCoreTSNE to get a t-SNE representation of our dataset. This will allows us to visualize the progress."
- ]
+ "We will use scikit-learn to get a t-SNE representation of our dataset. This will allows us to visualize the progress."
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 8,
- "metadata": {},
+ "execution_count": null,
"outputs": [],
"source": [
"# modify our model to get features\n",
@@ -1188,35 +445,51 @@
" def __init__(self, model):\n",
" super().__init__()\n",
" self.model = model\n",
+ "\n",
" def forward(self, x):\n",
- " return torch.flatten(self.model.features(x),1)\n",
- " \n",
+ " return torch.flatten(self.model.features(x), 1)\n",
+ "\n",
"\n",
"features = FeatureExtractor(model.model)\n",
"acc = []\n",
- "for x,y in DataLoader(active_set._dataset, batch_size=10):\n",
+ "for x, y in DataLoader(active_set._dataset, batch_size=10):\n",
" acc.append((features(x.cuda()).detach().cpu().numpy(), y.detach().cpu().numpy()))\n",
- " \n",
+ "\n",
"xs, ys = zip(*acc)"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "code",
- "execution_count": 9,
- "metadata": {},
+ "execution_count": null,
"outputs": [],
"source": [
- "from MulticoreTSNE import MulticoreTSNE as TSNE\n",
+ "from sklearn.manifold import TSNE\n",
"\n",
"# Compute t-SNE on the extracted features.\n",
"tsne = TSNE(n_jobs=4)\n",
"transformed = tsne.fit_transform(np.vstack(xs))"
- ]
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
},
{
"cell_type": "code",
"execution_count": 10,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -1249,7 +522,11 @@
{
"cell_type": "code",
"execution_count": 11,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from baal.utils.plot_utils import make_animation_from_data\n",
@@ -273800,6 +273077,7 @@
"import matplotlib.pyplot as plt\n",
"from matplotlib import animation\n",
"\n",
+ "\n",
"def plot_images(img_list):\n",
" def init():\n",
" img.set_data(img_list[0])\n",
@@ -273809,13 +273087,14 @@
" img.set_data(img_list[i])\n",
" return (img,)\n",
"\n",
- " fig = plt.Figure(figsize=(10,10))\n",
+ " fig = plt.Figure(figsize=(10, 10))\n",
" ax = fig.gca()\n",
" img = ax.imshow(img_list[0])\n",
" anim = animation.FuncAnimation(fig, animate, init_func=init,\n",
- " frames=len(img_list), interval=60, blit=True)\n",
+ " frames=len(img_list), interval=60, blit=True)\n",
" return anim\n",
"\n",
+ "\n",
"HTML(plot_images(frames).to_jshtml())"
]
},
@@ -273855,4 +273134,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
-}
+}
\ No newline at end of file
diff --git a/notebooks/baal_prod_cls.ipynb b/notebooks/baal_prod_cls.ipynb
index 99f9a50c..916fa58d 100644
--- a/notebooks/baal_prod_cls.ipynb
+++ b/notebooks/baal_prod_cls.ipynb
@@ -9,9 +9,11 @@
}
},
"source": [
- "# Use BaaL in production (Classification)\n",
+ "# Use Baal in production (Classification)\n",
"\n",
- "In this tutorial, we will show you how to use BaaL during your labeling task.\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/baal_prod_cls.ipynb)\n",
+ "\n",
+ "In this tutorial, we will show you how to use Baal during your labeling task.\n",
"\n",
"**NOTE** In this tutorial, we assume that we do not know the labels!\n",
"\n",
@@ -21,8 +23,8 @@
"pip install baal\n",
"```\n",
"\n",
- "We will first need a dataset! For the purpose of this demo, we will use a classification dataset, but BaaL\n",
- "works on more than computer vision! As long as we can estimate the uncertainty of a prediction, BaaL can be used.\n",
+ "We will first need a dataset! For the purpose of this demo, we will use a classification dataset, but Baal\n",
+ "works on more than computer vision! As long as we can estimate the uncertainty of a prediction, Baal can be used.\n",
"\n",
"We will use the [Natural Images Dataset](https://www.kaggle.com/prasunroy/natural-images).\n",
"Please extract the data in `/tmp/natural_images`.\n"
@@ -122,12 +124,12 @@
"We now have two unlabeled datasets : train and validation. We encapsulate the training dataset in a \n",
"`ActiveLearningDataset` object which will take care of the split between labeled and unlabeled samples.\n",
"We are now ready to use Active Learning.\n",
- "We will use a technique called MC-Dropout, BaaL supports other techniques (see README) and proposes a similar API\n",
+ "We will use a technique called MC-Dropout, Baal supports other techniques (see README) and proposes a similar API\n",
"for each of them.\n",
- "When using MC-Dropout with BaaL, you can use any model as long as there are some Dropout Layers. These layers are essential to compute\n",
+ "When using MC-Dropout with Baal, you can use any model as long as there are some Dropout Layers. These layers are essential to compute\n",
"the uncertainty of the model.\n",
"\n",
- "BaaL propose several models, but it also supports custom models using baal.bayesian.dropout.MCDropoutModule.\n",
+ "Baal propose several models, but it also supports custom models using baal.bayesian.dropout.MCDropoutModule.\n",
"\n",
"In this example, we will use VGG-16, a popular model from `torchvision`."
]
@@ -378,55 +380,14 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": null,
"metadata": {
"pycharm": {
- "name": "#%%\n"
+ "name": "#%%\n",
+ "is_executing": true
}
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Training on 110 items!\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T14:50:02.089160Z [\u001B[32minfo ] Starting training dataset=110 epoch=5\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T14:50:19.678241Z [\u001B[32minfo ] Training complete train_loss=1.9793428182601929\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T14:50:19.681509Z [\u001B[32minfo ] Starting evaluating dataset=1725\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T14:50:33.777658Z [\u001B[32minfo ] Evaluation complete test_loss=2.013453960418701\n",
- "Metrics: {'test_loss': 2.013453960418701, 'train_loss': 1.9793428182601929}\n",
- "[103-MainThread ] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T14:50:33.784990Z [\u001B[32minfo ] Start Predict dataset=5064\n",
- "Training on 120 items!\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T14:52:14.295969Z [\u001B[32minfo ] Starting training dataset=120 epoch=5\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T14:52:32.482238Z [\u001B[32minfo ] Training complete train_loss=1.8900309801101685\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T14:52:32.484473Z [\u001B[32minfo ] Starting evaluating dataset=1725\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T14:52:46.287436Z [\u001B[32minfo ] Evaluation complete test_loss=1.8315811157226562\n",
- "Metrics: {'test_loss': 1.8315811157226562, 'train_loss': 1.8900309801101685}\n",
- "[103-MainThread ] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T14:52:46.367016Z [\u001B[32minfo ] Start Predict dataset=5054\n",
- "Training on 130 items!\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T14:54:26.794349Z [\u001B[32minfo ] Starting training dataset=130 epoch=5\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T14:54:44.481490Z [\u001B[32minfo ] Training complete train_loss=1.961772084236145\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T14:54:44.483477Z [\u001B[32minfo ] Starting evaluating dataset=1725\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T14:54:58.268424Z [\u001B[32minfo ] Evaluation complete test_loss=1.859472393989563\n",
- "Metrics: {'test_loss': 1.859472393989563, 'train_loss': 1.961772084236145}\n",
- "[103-MainThread ] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T14:54:58.276565Z [\u001B[32minfo ] Start Predict dataset=5044\n",
- "Training on 140 items!\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T14:56:38.406344Z [\u001B[32minfo ] Starting training dataset=140 epoch=5\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T14:56:57.088064Z [\u001B[32minfo ] Training complete train_loss=1.8688158988952637\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T14:56:57.091358Z [\u001B[32minfo ] Starting evaluating dataset=1725\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T14:57:10.968456Z [\u001B[32minfo ] Evaluation complete test_loss=1.7242822647094727\n",
- "Metrics: {'test_loss': 1.7242822647094727, 'train_loss': 1.8688158988952637}\n",
- "[103-MainThread ] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T14:57:10.977104Z [\u001B[32minfo ] Start Predict dataset=5034\n",
- "Training on 150 items!\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T14:58:51.197386Z [\u001B[32minfo ] Starting training dataset=150 epoch=5\n",
- "[103-MainThread ] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T14:59:09.779341Z [\u001B[32minfo ] Training complete train_loss=1.8381125926971436\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T14:59:09.782580Z [\u001B[32minfo ] Starting evaluating dataset=1725\n",
- "[103-MainThread ] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T14:59:23.176680Z [\u001B[32minfo ] Evaluation complete test_loss=1.7318601608276367\n",
- "Metrics: {'test_loss': 1.7318601608276367, 'train_loss': 1.8381125926971436}\n",
- "[103-MainThread ] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T14:59:23.184444Z [\u001B[32minfo ] Start Predict dataset=5024\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# 5. If not done, go back to 2.\n",
"for step in range(5): # 5 Active Learning step!\n",
@@ -490,7 +451,7 @@
},
"source": [
"## Support\n",
- "Submit an issue or reach us to our Gitter!"
+ "Submit an issue or reach us to our Slack!"
]
}
],
diff --git a/notebooks/compatibility/nlp_classification.ipynb b/notebooks/compatibility/nlp_classification.ipynb
index 032b98d2..1da87a85 100644
--- a/notebooks/compatibility/nlp_classification.ipynb
+++ b/notebooks/compatibility/nlp_classification.ipynb
@@ -3,22 +3,33 @@
{
"cell_type": "markdown",
"id": "still-resident",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"# Active Learning for NLP Classification\n",
+ "\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/compatibility/nlp_classification.ipynb)\n",
+ "\n",
"In this tutorial, we guide you through using our new [HuggingFace](https://huggingface.co/transformers/main_classes/trainer.html) trainer wrapper to do active learning with transformers models.\n",
" Any model which could be trained by HuggingFace trainer and has `Dropout` layers could be used in the same manner.\n",
"\n",
"We will use the `SST2` dataset and `BertForSequenceClassification` as the model for the purpose of this tutorial. As usual, we need to first download the dataset.\n",
"\n",
- "Note: This tutorial is intended for advanced users. If you are not familiar with BaaL, please refer to other tutorials."
+ "Note: This tutorial is intended for advanced users. If you are not familiar with Baal, please refer to other tutorials."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "sixth-wound",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stderr",
@@ -37,7 +48,11 @@
{
"cell_type": "markdown",
"id": "mechanical-tennessee",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## ActiveLearning Dataset\n",
"In order to create an active learning dataset, we need to wrap the dataset with `baal.ActiveLearningDataset`.\n",
@@ -49,7 +64,11 @@
"cell_type": "code",
"execution_count": 2,
"id": "liquid-replacement",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stdout",
@@ -77,7 +96,11 @@
{
"cell_type": "markdown",
"id": "ready-participation",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Active Learning Model\n",
"The process of making a model bayesian is exactly the same as before. In this case, we will get the `Bert` model and use `baal.bayesian.dropout.patch_module` to make the dropout layer stochastic at inference time. "
@@ -87,7 +110,11 @@
"cell_type": "code",
"execution_count": 3,
"id": "baking-coalition",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stderr",
@@ -119,7 +146,11 @@
{
"cell_type": "markdown",
"id": "eleven-portugal",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Heuristic\n",
"\n",
@@ -133,7 +164,11 @@
"cell_type": "code",
"execution_count": 4,
"id": "cooperative-constant",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from baal.active import get_heuristic\n",
@@ -144,7 +179,11 @@
{
"cell_type": "markdown",
"id": "listed-kelly",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## HugginFace Trainer Wrapper\n",
"\n",
@@ -158,7 +197,11 @@
"cell_type": "code",
"execution_count": 5,
"id": "moving-olive",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -620,4 +663,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
-}
+}
\ No newline at end of file
diff --git a/notebooks/compatibility/sklearn_tutorial.ipynb b/notebooks/compatibility/sklearn_tutorial.ipynb
index f227082c..ce0a0707 100644
--- a/notebooks/compatibility/sklearn_tutorial.ipynb
+++ b/notebooks/compatibility/sklearn_tutorial.ipynb
@@ -9,14 +9,16 @@
}
},
"source": [
- "# How to use BaaL with Scikit-Learn models\n",
+ "# How to use Baal with Scikit-Learn models\n",
"\n",
- "In this tutorial, you will learn how to use BaaL on a scikit-learn model.\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/compatibility/sklearn_tutorial.ipynb)\n",
+ "\n",
+ "In this tutorial, you will learn how to use Baal on a scikit-learn model.\n",
"In this case, we will use `RandomForestClassifier`.\n",
"\n",
"This tutorial is based on the tutorial from [Saimadhu Polamuri](https://dataaspirant.com/2017/06/26/random-forest-classifier-python-scikit-learn/).\n",
"\n",
- "First, if you have not done it yet, let's install BaaL.\n",
+ "First, if you have not done it yet, let's install Baal.\n",
"\n",
"```bash\n",
"pip install baal\n",
@@ -144,7 +146,7 @@
"\n",
"# Predict independently for all estimators.\n",
"x = np.array(list(map(lambda e: e.predict_proba(test_x), clf.estimators_)))\n",
- "# Roll axis because BaaL expect [n_samples, n_classes, ..., n_estimations]\n",
+ "# Roll axis because Baal expect [n_samples, n_classes, ..., n_estimations]\n",
"x = np.rollaxis(x, 0, 3)\n",
"print(\"Uncertainty per sample\")\n",
"print(BALD().compute_score(x))\n",
@@ -250,7 +252,7 @@
" # Predict with all fitted estimators.\n",
" x = np.array(list(map(lambda e: e.predict_proba(test[0]), clf.estimators_)))\n",
" \n",
- " # Roll axis because BaaL expect [n_samples, n_classes, ..., n_estimations]\n",
+ " # Roll axis because Baal expect [n_samples, n_classes, ..., n_estimations]\n",
" x = np.rollaxis(x, 0, 3)\n",
" return x\n",
"\n",
diff --git a/notebooks/deep_ensemble.ipynb b/notebooks/deep_ensemble.ipynb
index 79550a46..57987380 100644
--- a/notebooks/deep_ensemble.ipynb
+++ b/notebooks/deep_ensemble.ipynb
@@ -2,13 +2,19 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
- "# How to use Deep ensembles in BaaL\n",
+ "# How to use Deep ensembles in Baal\n",
+ "\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/deep_ensemble.ipynb)\n",
"\n",
"Ensemble are one of the easiest form of Bayesian deep learning.\n",
" The main drawback from this approach is the important amount of computational resources needed to perform it.\n",
- " In this notebook, we will present BaaL's Ensemble API namely `EnsembleModelWrapper`.\n",
+ " In this notebook, we will present Baal's Ensemble API namely `EnsembleModelWrapper`.\n",
"\n",
"\n",
"This notebook is similar to our notebook on how to do research, we suggest you look at it first if you haven't.\n",
@@ -22,7 +28,11 @@
{
"cell_type": "code",
"execution_count": 1,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"import random\n",
@@ -59,7 +69,11 @@
{
"cell_type": "code",
"execution_count": 2,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"@dataclass\n",
@@ -106,7 +120,11 @@
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stdout",
@@ -157,7 +175,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Presenting EnsembleModelWrapper\n",
"\n",
@@ -185,1079 +207,14 @@
},
{
"cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "080ccd9d4e1f4c7d8f56f08f5572fdab",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- " 0%| | 0/58 [00:00, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:03:23.225157Z [\u001B[32minfo ] Starting training dataset=512 epoch=10\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/opt/conda/lib/python3.9/site-packages/torch/utils/data/dataloader.py:478: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 1, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
- " warnings.warn(_create_warning_msg(\n",
- "/opt/conda/lib/python3.9/site-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n",
- " return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:03:54.272799Z [\u001B[32minfo ] Training complete train_loss=0.8364141583442688\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:03:54.281157Z [\u001B[32minfo ] Starting training dataset=512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:04:27.085055Z [\u001B[32minfo ] Training complete train_loss=0.8260515332221985\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:04:27.092947Z [\u001B[32minfo ] Starting training dataset=512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:05:00.277956Z [\u001B[32minfo ] Training complete train_loss=0.8478720188140869\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:05:00.286100Z [\u001B[32minfo ] Starting training dataset=512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:05:33.876247Z [\u001B[32minfo ] Training complete train_loss=0.8530490398406982\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:05:33.890366Z [\u001B[32minfo ] Starting training dataset=512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:06:08.077229Z [\u001B[32minfo ] Training complete train_loss=0.8666098117828369\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T15:06:08.084093Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T15:06:22.489413Z [\u001B[32minfo ] Evaluation complete test_loss=1.082019567489624\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T15:06:22.498875Z [\u001B[32minfo ] Start Predict dataset=49488\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:23:19.746892Z [\u001B[32minfo ] Starting training dataset=612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:23:49.982128Z [\u001B[32minfo ] Training complete train_loss=0.8243563771247864\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:23:49.989835Z [\u001B[32minfo ] Starting training dataset=612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:24:21.172522Z [\u001B[32minfo ] Training complete train_loss=0.9930000305175781\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:24:21.180164Z [\u001B[32minfo ] Starting training dataset=612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:24:54.582503Z [\u001B[32minfo ] Training complete train_loss=0.9522870779037476\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:24:54.590289Z [\u001B[32minfo ] Starting training dataset=612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:25:27.677519Z [\u001B[32minfo ] Training complete train_loss=1.0974868535995483\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:25:27.684730Z [\u001B[32minfo ] Starting training dataset=612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:26:00.088513Z [\u001B[32minfo ] Training complete train_loss=1.154961109161377\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T15:26:00.169698Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T15:26:13.881201Z [\u001B[32minfo ] Evaluation complete test_loss=1.050412893295288\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T15:26:13.888829Z [\u001B[32minfo ] Start Predict dataset=49388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:43:24.920358Z [\u001B[32minfo ] Starting training dataset=712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:43:57.485136Z [\u001B[32minfo ] Training complete train_loss=0.932747483253479\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:43:57.492869Z [\u001B[32minfo ] Starting training dataset=712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:44:30.077726Z [\u001B[32minfo ] Training complete train_loss=0.918374240398407\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:44:30.085204Z [\u001B[32minfo ] Starting training dataset=712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:45:02.867968Z [\u001B[32minfo ] Training complete train_loss=0.9606548547744751\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:45:02.875076Z [\u001B[32minfo ] Starting training dataset=712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:45:35.279739Z [\u001B[32minfo ] Training complete train_loss=1.016992211341858\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T15:45:35.287374Z [\u001B[32minfo ] Starting training dataset=712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T15:46:07.184076Z [\u001B[32minfo ] Training complete train_loss=0.9882396459579468\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T15:46:07.189735Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T15:46:20.788120Z [\u001B[32minfo ] Evaluation complete test_loss=1.0184006690979004\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T15:46:20.795889Z [\u001B[32minfo ] Start Predict dataset=49288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:03:30.744977Z [\u001B[32minfo ] Starting training dataset=812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:04:04.781179Z [\u001B[32minfo ] Training complete train_loss=0.9256729483604431\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:04:04.788756Z [\u001B[32minfo ] Starting training dataset=812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:04:38.886644Z [\u001B[32minfo ] Training complete train_loss=0.8962534666061401\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:04:38.894324Z [\u001B[32minfo ] Starting training dataset=812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:05:12.584859Z [\u001B[32minfo ] Training complete train_loss=1.099798560142517\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:05:12.592090Z [\u001B[32minfo ] Starting training dataset=812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:05:46.968839Z [\u001B[32minfo ] Training complete train_loss=0.9000493288040161\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:05:46.976785Z [\u001B[32minfo ] Starting training dataset=812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:06:20.685078Z [\u001B[32minfo ] Training complete train_loss=0.9171968698501587\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T16:06:20.690647Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T16:06:34.579119Z [\u001B[32minfo ] Evaluation complete test_loss=0.9765419960021973\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T16:06:34.586519Z [\u001B[32minfo ] Start Predict dataset=49188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:23:45.107789Z [\u001B[32minfo ] Starting training dataset=912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:24:20.079465Z [\u001B[32minfo ] Training complete train_loss=0.8302789926528931\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:24:20.086495Z [\u001B[32minfo ] Starting training dataset=912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:24:54.285867Z [\u001B[32minfo ] Training complete train_loss=0.9873713254928589\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:24:54.295280Z [\u001B[32minfo ] Starting training dataset=912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:25:28.584150Z [\u001B[32minfo ] Training complete train_loss=0.8690028786659241\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:25:28.591403Z [\u001B[32minfo ] Starting training dataset=912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:26:02.984630Z [\u001B[32minfo ] Training complete train_loss=0.9912691712379456\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:26:02.991666Z [\u001B[32minfo ] Starting training dataset=912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:26:37.672282Z [\u001B[32minfo ] Training complete train_loss=0.9958629012107849\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T16:26:37.677315Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T16:26:51.273046Z [\u001B[32minfo ] Evaluation complete test_loss=0.9095031023025513\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T16:26:51.281388Z [\u001B[32minfo ] Start Predict dataset=49088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:43:41.295745Z [\u001B[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:44:16.976296Z [\u001B[32minfo ] Training complete train_loss=0.9254322052001953\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:44:16.983815Z [\u001B[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:44:52.478948Z [\u001B[32minfo ] Training complete train_loss=1.0186196565628052\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:44:52.485844Z [\u001B[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:45:27.668294Z [\u001B[32minfo ] Training complete train_loss=0.9414865970611572\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:45:27.675891Z [\u001B[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:46:03.285576Z [\u001B[32minfo ] Training complete train_loss=0.9331195950508118\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T16:46:03.292214Z [\u001B[32minfo ] Starting training dataset=1012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T16:46:39.089283Z [\u001B[32minfo ] Training complete train_loss=1.0527617931365967\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T16:46:39.165706Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T16:46:52.691071Z [\u001B[32minfo ] Evaluation complete test_loss=0.9129394888877869\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T16:46:52.769545Z [\u001B[32minfo ] Start Predict dataset=48988\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:03:22.458586Z [\u001B[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:03:59.079071Z [\u001B[32minfo ] Training complete train_loss=0.9513149857521057\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:03:59.085982Z [\u001B[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:04:35.268902Z [\u001B[32minfo ] Training complete train_loss=1.043251633644104\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:04:35.276104Z [\u001B[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:05:11.276178Z [\u001B[32minfo ] Training complete train_loss=0.911968469619751\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:05:11.283077Z [\u001B[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:05:47.967951Z [\u001B[32minfo ] Training complete train_loss=0.9133453369140625\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:05:47.975760Z [\u001B[32minfo ] Starting training dataset=1112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:06:23.978351Z [\u001B[32minfo ] Training complete train_loss=0.9577516913414001\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T17:06:23.983301Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T17:06:36.879626Z [\u001B[32minfo ] Evaluation complete test_loss=0.8657751083374023\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T17:06:36.886557Z [\u001B[32minfo ] Start Predict dataset=48888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:23:02.721417Z [\u001B[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:23:39.677137Z [\u001B[32minfo ] Training complete train_loss=1.0441893339157104\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:23:39.685051Z [\u001B[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:24:16.871166Z [\u001B[32minfo ] Training complete train_loss=0.929194986820221\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:24:16.881245Z [\u001B[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:24:54.368581Z [\u001B[32minfo ] Training complete train_loss=1.0284929275512695\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:24:54.375269Z [\u001B[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:25:31.480339Z [\u001B[32minfo ] Training complete train_loss=0.9321802854537964\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:25:31.487068Z [\u001B[32minfo ] Starting training dataset=1212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:26:09.074131Z [\u001B[32minfo ] Training complete train_loss=0.973421573638916\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T17:26:09.080513Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T17:26:22.782218Z [\u001B[32minfo ] Evaluation complete test_loss=0.8525174260139465\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T17:26:22.793393Z [\u001B[32minfo ] Start Predict dataset=48788\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:42:46.221663Z [\u001B[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:43:24.664875Z [\u001B[32minfo ] Training complete train_loss=0.9666503071784973\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:43:24.672229Z [\u001B[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:44:03.190809Z [\u001B[32minfo ] Training complete train_loss=1.002950668334961\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:44:03.269028Z [\u001B[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:44:41.075336Z [\u001B[32minfo ] Training complete train_loss=0.9888299107551575\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:44:41.082244Z [\u001B[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:45:19.077166Z [\u001B[32minfo ] Training complete train_loss=0.97415691614151\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T17:45:19.084448Z [\u001B[32minfo ] Starting training dataset=1312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T17:45:57.374593Z [\u001B[32minfo ] Training complete train_loss=1.0200046300888062\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T17:45:57.380081Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T17:46:10.685676Z [\u001B[32minfo ] Evaluation complete test_loss=0.8271186947822571\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T17:46:10.693032Z [\u001B[32minfo ] Start Predict dataset=48688\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:02:32.635236Z [\u001B[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:03:12.272650Z [\u001B[32minfo ] Training complete train_loss=1.075010895729065\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:03:12.280246Z [\u001B[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:03:52.190764Z [\u001B[32minfo ] Training complete train_loss=0.992854654788971\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:03:52.198475Z [\u001B[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:04:32.369968Z [\u001B[32minfo ] Training complete train_loss=1.0081067085266113\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:04:32.376590Z [\u001B[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:05:12.665417Z [\u001B[32minfo ] Training complete train_loss=0.9663589596748352\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:05:12.672271Z [\u001B[32minfo ] Starting training dataset=1412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:05:52.280446Z [\u001B[32minfo ] Training complete train_loss=1.04006028175354\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T18:05:52.285840Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T18:06:05.572855Z [\u001B[32minfo ] Evaluation complete test_loss=0.804664671421051\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T18:06:05.580908Z [\u001B[32minfo ] Start Predict dataset=48588\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:22:25.401360Z [\u001B[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:23:06.483224Z [\u001B[32minfo ] Training complete train_loss=0.9277119040489197\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:23:06.491979Z [\u001B[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:23:48.188278Z [\u001B[32minfo ] Training complete train_loss=1.0428746938705444\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:23:48.267931Z [\u001B[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:24:30.874456Z [\u001B[32minfo ] Training complete train_loss=0.9510531425476074\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:24:30.881489Z [\u001B[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:25:12.688178Z [\u001B[32minfo ] Training complete train_loss=0.9416515827178955\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:25:12.766617Z [\u001B[32minfo ] Starting training dataset=1512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:25:53.775859Z [\u001B[32minfo ] Training complete train_loss=0.9402214884757996\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T18:25:53.780544Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T18:26:06.993932Z [\u001B[32minfo ] Evaluation complete test_loss=0.7674832940101624\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T18:26:07.068616Z [\u001B[32minfo ] Start Predict dataset=48488\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:42:29.514702Z [\u001B[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:43:12.192095Z [\u001B[32minfo ] Training complete train_loss=1.028326153755188\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:43:12.266413Z [\u001B[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:43:53.779097Z [\u001B[32minfo ] Training complete train_loss=0.9863273501396179\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:43:53.786243Z [\u001B[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:44:36.290856Z [\u001B[32minfo ] Training complete train_loss=0.9068755507469177\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:44:36.366920Z [\u001B[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:45:18.870138Z [\u001B[32minfo ] Training complete train_loss=1.023092269897461\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T18:45:18.877238Z [\u001B[32minfo ] Starting training dataset=1612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T18:46:00.383976Z [\u001B[32minfo ] Training complete train_loss=0.965911328792572\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T18:46:00.388895Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T18:46:13.684169Z [\u001B[32minfo ] Evaluation complete test_loss=0.7569476962089539\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T18:46:13.691282Z [\u001B[32minfo ] Start Predict dataset=48388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:02:30.984315Z [\u001B[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:03:15.674997Z [\u001B[32minfo ] Training complete train_loss=0.9350682497024536\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:03:15.681606Z [\u001B[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:03:58.887455Z [\u001B[32minfo ] Training complete train_loss=0.9201800227165222\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:03:58.894480Z [\u001B[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:04:41.578060Z [\u001B[32minfo ] Training complete train_loss=0.905670166015625\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:04:41.585305Z [\u001B[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:05:24.475178Z [\u001B[32minfo ] Training complete train_loss=0.9620814323425293\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:05:24.482448Z [\u001B[32minfo ] Starting training dataset=1712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:06:07.490241Z [\u001B[32minfo ] Training complete train_loss=0.9224438071250916\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T19:06:07.570348Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T19:06:20.696662Z [\u001B[32minfo ] Evaluation complete test_loss=0.7361646890640259\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T19:06:20.771211Z [\u001B[32minfo ] Start Predict dataset=48288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:22:41.163738Z [\u001B[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:23:25.570368Z [\u001B[32minfo ] Training complete train_loss=0.9820511937141418\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:23:25.579157Z [\u001B[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:24:10.075295Z [\u001B[32minfo ] Training complete train_loss=0.9375017881393433\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:24:10.083318Z [\u001B[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:24:54.880439Z [\u001B[32minfo ] Training complete train_loss=0.9257637858390808\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:24:54.888091Z [\u001B[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:25:39.078416Z [\u001B[32minfo ] Training complete train_loss=0.9971398711204529\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:25:39.086671Z [\u001B[32minfo ] Starting training dataset=1812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:26:23.472168Z [\u001B[32minfo ] Training complete train_loss=1.013749599456787\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T19:26:23.476976Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T19:26:36.574364Z [\u001B[32minfo ] Evaluation complete test_loss=0.756501317024231\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T19:26:36.581478Z [\u001B[32minfo ] Start Predict dataset=48188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:43:22.766592Z [\u001B[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:44:11.265697Z [\u001B[32minfo ] Training complete train_loss=0.9557903409004211\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:44:11.274063Z [\u001B[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:44:59.982236Z [\u001B[32minfo ] Training complete train_loss=0.9173471927642822\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:44:59.989619Z [\u001B[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:45:48.168328Z [\u001B[32minfo ] Training complete train_loss=1.0321499109268188\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:45:48.176667Z [\u001B[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:46:35.773009Z [\u001B[32minfo ] Training complete train_loss=0.9191875457763672\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T19:46:35.781286Z [\u001B[32minfo ] Starting training dataset=1912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T19:47:22.576191Z [\u001B[32minfo ] Training complete train_loss=0.9246641397476196\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T19:47:22.581554Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T19:47:36.582237Z [\u001B[32minfo ] Evaluation complete test_loss=0.7167291045188904\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T19:47:36.589409Z [\u001B[32minfo ] Start Predict dataset=48088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:04:31.552071Z [\u001B[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:05:20.382208Z [\u001B[32minfo ] Training complete train_loss=0.9722501039505005\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:05:20.390139Z [\u001B[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:06:09.175924Z [\u001B[32minfo ] Training complete train_loss=0.9446245431900024\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:06:09.183759Z [\u001B[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:06:58.075425Z [\u001B[32minfo ] Training complete train_loss=0.9757698774337769\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:06:58.083521Z [\u001B[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:07:46.880669Z [\u001B[32minfo ] Training complete train_loss=0.9409571886062622\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:07:46.888642Z [\u001B[32minfo ] Starting training dataset=2012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:08:34.876303Z [\u001B[32minfo ] Training complete train_loss=0.9852419495582581\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T20:08:34.881729Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T20:08:48.985392Z [\u001B[32minfo ] Evaluation complete test_loss=0.723760724067688\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T20:08:48.992518Z [\u001B[32minfo ] Start Predict dataset=47988\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:25:34.747891Z [\u001B[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:26:23.872387Z [\u001B[32minfo ] Training complete train_loss=0.9171951413154602\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:26:23.880777Z [\u001B[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:27:13.165485Z [\u001B[32minfo ] Training complete train_loss=0.9799726009368896\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:27:13.173679Z [\u001B[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:28:03.283328Z [\u001B[32minfo ] Training complete train_loss=0.9386671185493469\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:28:03.290982Z [\u001B[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:28:52.777442Z [\u001B[32minfo ] Training complete train_loss=0.978201687335968\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:28:52.784738Z [\u001B[32minfo ] Starting training dataset=2112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:29:42.877656Z [\u001B[32minfo ] Training complete train_loss=0.9618527293205261\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T20:29:42.883872Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T20:29:57.578619Z [\u001B[32minfo ] Evaluation complete test_loss=0.7093442678451538\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T20:29:57.585998Z [\u001B[32minfo ] Start Predict dataset=47888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:46:40.178333Z [\u001B[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:47:31.289002Z [\u001B[32minfo ] Training complete train_loss=0.9138000011444092\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:47:31.296781Z [\u001B[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:48:21.980726Z [\u001B[32minfo ] Training complete train_loss=1.0114140510559082\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:48:21.987649Z [\u001B[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:49:12.391125Z [\u001B[32minfo ] Training complete train_loss=1.171674370765686\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:49:12.467107Z [\u001B[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:50:02.169114Z [\u001B[32minfo ] Training complete train_loss=0.9495107531547546\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T20:50:02.177071Z [\u001B[32minfo ] Starting training dataset=2212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T20:50:52.579582Z [\u001B[32minfo ] Training complete train_loss=1.0180065631866455\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T20:50:52.585367Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T20:51:06.175383Z [\u001B[32minfo ] Evaluation complete test_loss=0.7294108271598816\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T20:51:06.183712Z [\u001B[32minfo ] Start Predict dataset=47788\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:07:22.539634Z [\u001B[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:08:16.280207Z [\u001B[32minfo ] Training complete train_loss=0.9690930247306824\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:08:16.286669Z [\u001B[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:09:09.184223Z [\u001B[32minfo ] Training complete train_loss=0.9622244834899902\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:09:09.191550Z [\u001B[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:10:02.666252Z [\u001B[32minfo ] Training complete train_loss=0.9211553335189819\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:10:02.674865Z [\u001B[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:10:57.070214Z [\u001B[32minfo ] Training complete train_loss=0.9597650766372681\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:10:57.077798Z [\u001B[32minfo ] Starting training dataset=2312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:11:50.182542Z [\u001B[32minfo ] Training complete train_loss=0.964903712272644\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T21:11:50.188292Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T21:12:03.866348Z [\u001B[32minfo ] Evaluation complete test_loss=0.6798441410064697\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T21:12:03.874067Z [\u001B[32minfo ] Start Predict dataset=47688\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:28:11.883129Z [\u001B[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:29:03.879859Z [\u001B[32minfo ] Training complete train_loss=1.0068079233169556\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:29:03.887980Z [\u001B[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:29:56.381578Z [\u001B[32minfo ] Training complete train_loss=0.9520434141159058\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:29:56.390312Z [\u001B[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:30:48.191878Z [\u001B[32minfo ] Training complete train_loss=0.9582843780517578\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:30:48.266421Z [\u001B[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:31:40.281782Z [\u001B[32minfo ] Training complete train_loss=1.0118552446365356\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:31:40.288720Z [\u001B[32minfo ] Starting training dataset=2412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:32:32.970914Z [\u001B[32minfo ] Training complete train_loss=0.9314520955085754\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T21:32:32.976789Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T21:32:46.474366Z [\u001B[32minfo ] Evaluation complete test_loss=0.6736973524093628\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T21:32:46.480994Z [\u001B[32minfo ] Start Predict dataset=47588\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:48:57.148416Z [\u001B[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:49:50.280161Z [\u001B[32minfo ] Training complete train_loss=0.9337089657783508\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:49:50.286868Z [\u001B[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:50:43.675634Z [\u001B[32minfo ] Training complete train_loss=1.005215048789978\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:50:43.686572Z [\u001B[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:51:37.178821Z [\u001B[32minfo ] Training complete train_loss=0.9765651226043701\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:51:37.185438Z [\u001B[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:52:30.772492Z [\u001B[32minfo ] Training complete train_loss=1.0099214315414429\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T21:52:30.780204Z [\u001B[32minfo ] Starting training dataset=2512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T21:53:25.086434Z [\u001B[32minfo ] Training complete train_loss=1.1066418886184692\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T21:53:25.092135Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T21:53:38.890457Z [\u001B[32minfo ] Evaluation complete test_loss=0.6832886934280396\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T21:53:38.968482Z [\u001B[32minfo ] Start Predict dataset=47488\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:09:47.037387Z [\u001B[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:10:41.882671Z [\u001B[32minfo ] Training complete train_loss=0.9732304811477661\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:10:41.889490Z [\u001B[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:11:36.080223Z [\u001B[32minfo ] Training complete train_loss=0.9936309456825256\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:11:36.087166Z [\u001B[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:12:30.385642Z [\u001B[32minfo ] Training complete train_loss=0.8979130983352661\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:12:30.392645Z [\u001B[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:13:24.886768Z [\u001B[32minfo ] Training complete train_loss=1.0363106727600098\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:13:24.894615Z [\u001B[32minfo ] Starting training dataset=2612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:14:19.284788Z [\u001B[32minfo ] Training complete train_loss=0.9428572058677673\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T22:14:19.289613Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T22:14:32.774810Z [\u001B[32minfo ] Evaluation complete test_loss=0.6570330858230591\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T22:14:32.781457Z [\u001B[32minfo ] Start Predict dataset=47388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:30:41.520167Z [\u001B[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:31:38.892835Z [\u001B[32minfo ] Training complete train_loss=0.922064483165741\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:31:38.971116Z [\u001B[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:32:34.989046Z [\u001B[32minfo ] Training complete train_loss=0.8859795331954956\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:32:35.067765Z [\u001B[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:33:32.582960Z [\u001B[32minfo ] Training complete train_loss=0.9763253331184387\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:33:32.590187Z [\u001B[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:34:29.085416Z [\u001B[32minfo ] Training complete train_loss=0.9625152349472046\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:34:29.091940Z [\u001B[32minfo ] Starting training dataset=2712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:35:25.181361Z [\u001B[32minfo ] Training complete train_loss=0.9341091513633728\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T22:35:25.186428Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T22:35:39.483789Z [\u001B[32minfo ] Evaluation complete test_loss=0.6503035426139832\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T22:35:39.491229Z [\u001B[32minfo ] Start Predict dataset=47288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:51:37.149598Z [\u001B[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:52:33.470586Z [\u001B[32minfo ] Training complete train_loss=0.9524716734886169\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:52:33.477044Z [\u001B[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:53:29.469538Z [\u001B[32minfo ] Training complete train_loss=0.9347421526908875\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:53:29.476470Z [\u001B[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:54:25.470438Z [\u001B[32minfo ] Training complete train_loss=0.96184903383255\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:54:25.477112Z [\u001B[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:55:21.368596Z [\u001B[32minfo ] Training complete train_loss=0.9115388989448547\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T22:55:21.375550Z [\u001B[32minfo ] Starting training dataset=2812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T22:56:17.476897Z [\u001B[32minfo ] Training complete train_loss=0.9248518347740173\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T22:56:17.481429Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T22:56:30.870313Z [\u001B[32minfo ] Evaluation complete test_loss=0.6633090972900391\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T22:56:30.876905Z [\u001B[32minfo ] Start Predict dataset=47188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:12:24.288451Z [\u001B[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:13:21.378543Z [\u001B[32minfo ] Training complete train_loss=0.9456859230995178\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:13:21.385101Z [\u001B[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:14:18.189772Z [\u001B[32minfo ] Training complete train_loss=0.9629805684089661\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:14:18.267688Z [\u001B[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:15:15.173773Z [\u001B[32minfo ] Training complete train_loss=0.9584450125694275\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:15:15.180834Z [\u001B[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:16:12.666025Z [\u001B[32minfo ] Training complete train_loss=0.9231571555137634\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:16:12.672462Z [\u001B[32minfo ] Starting training dataset=2912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:17:10.073503Z [\u001B[32minfo ] Training complete train_loss=0.9232361912727356\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T23:17:10.078705Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T23:17:23.270843Z [\u001B[32minfo ] Evaluation complete test_loss=0.6433601975440979\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T23:17:23.277508Z [\u001B[32minfo ] Start Predict dataset=47088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:33:28.882364Z [\u001B[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:34:28.174089Z [\u001B[32minfo ] Training complete train_loss=0.9940481781959534\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:34:28.181205Z [\u001B[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:35:27.384836Z [\u001B[32minfo ] Training complete train_loss=0.9763245582580566\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:35:27.392383Z [\u001B[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:36:27.078643Z [\u001B[32minfo ] Training complete train_loss=0.9977985620498657\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:36:27.085664Z [\u001B[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:37:26.387570Z [\u001B[32minfo ] Training complete train_loss=0.9883565902709961\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:37:26.394578Z [\u001B[32minfo ] Starting training dataset=3012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:38:26.078454Z [\u001B[32minfo ] Training complete train_loss=1.0123181343078613\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T23:38:26.084095Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T23:38:39.683168Z [\u001B[32minfo ] Evaluation complete test_loss=0.6295325756072998\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T23:38:39.690456Z [\u001B[32minfo ] Start Predict dataset=46988\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:54:29.465550Z [\u001B[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:55:29.272588Z [\u001B[32minfo ] Training complete train_loss=0.993098795413971\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:55:29.279219Z [\u001B[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:56:29.374525Z [\u001B[32minfo ] Training complete train_loss=0.982636570930481\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:56:29.381500Z [\u001B[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:57:28.969284Z [\u001B[32minfo ] Training complete train_loss=0.9553168416023254\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:57:28.978303Z [\u001B[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:58:28.386104Z [\u001B[32minfo ] Training complete train_loss=0.9859296679496765\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-28T23:58:28.392510Z [\u001B[32minfo ] Starting training dataset=3112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-28T23:59:28.085509Z [\u001B[32minfo ] Training complete train_loss=0.911634624004364\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-28T23:59:28.090740Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-28T23:59:41.378569Z [\u001B[32minfo ] Evaluation complete test_loss=0.6155526041984558\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-28T23:59:41.388091Z [\u001B[32minfo ] Start Predict dataset=46888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:15:27.000611Z [\u001B[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:16:27.485323Z [\u001B[32minfo ] Training complete train_loss=0.9690141081809998\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:16:27.492326Z [\u001B[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:17:27.971410Z [\u001B[32minfo ] Training complete train_loss=0.9362989664077759\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:17:27.978854Z [\u001B[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:18:28.374476Z [\u001B[32minfo ] Training complete train_loss=0.9801816940307617\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:18:28.381476Z [\u001B[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:19:28.688675Z [\u001B[32minfo ] Training complete train_loss=0.982334315776825\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:19:28.695882Z [\u001B[32minfo ] Starting training dataset=3212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:20:29.280950Z [\u001B[32minfo ] Training complete train_loss=1.0058739185333252\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T00:20:29.285715Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T00:20:42.576387Z [\u001B[32minfo ] Evaluation complete test_loss=0.624253511428833\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T00:20:42.582922Z [\u001B[32minfo ] Start Predict dataset=46788\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:36:24.397328Z [\u001B[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:37:25.777847Z [\u001B[32minfo ] Training complete train_loss=0.9525146484375\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:37:25.785148Z [\u001B[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:38:27.670850Z [\u001B[32minfo ] Training complete train_loss=0.9617348909378052\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:38:27.678614Z [\u001B[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:39:29.474158Z [\u001B[32minfo ] Training complete train_loss=0.9088602066040039\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:39:29.480853Z [\u001B[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:40:31.282180Z [\u001B[32minfo ] Training complete train_loss=0.941899836063385\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:40:31.288671Z [\u001B[32minfo ] Starting training dataset=3312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:41:32.674235Z [\u001B[32minfo ] Training complete train_loss=0.9036476612091064\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T00:41:32.679012Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T00:41:45.880151Z [\u001B[32minfo ] Evaluation complete test_loss=0.6003412008285522\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T00:41:45.886711Z [\u001B[32minfo ] Start Predict dataset=46688\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:57:25.233096Z [\u001B[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:58:27.771124Z [\u001B[32minfo ] Training complete train_loss=0.9652302861213684\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:58:27.778539Z [\u001B[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T00:59:29.682729Z [\u001B[32minfo ] Training complete train_loss=0.9133856892585754\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T00:59:29.689246Z [\u001B[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:00:32.276974Z [\u001B[32minfo ] Training complete train_loss=0.9120355248451233\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:00:32.285180Z [\u001B[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:01:34.571251Z [\u001B[32minfo ] Training complete train_loss=0.9987239241600037\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:01:34.579233Z [\u001B[32minfo ] Starting training dataset=3412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:02:36.971180Z [\u001B[32minfo ] Training complete train_loss=1.0511140823364258\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T01:02:36.976140Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T01:02:50.389403Z [\u001B[32minfo ] Evaluation complete test_loss=0.600139319896698\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T01:02:50.466974Z [\u001B[32minfo ] Start Predict dataset=46588\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:18:50.039255Z [\u001B[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:19:55.272920Z [\u001B[32minfo ] Training complete train_loss=0.9255532026290894\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:19:55.280740Z [\u001B[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:21:00.783239Z [\u001B[32minfo ] Training complete train_loss=0.9507350325584412\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:21:00.791110Z [\u001B[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:22:06.082850Z [\u001B[32minfo ] Training complete train_loss=0.9777060151100159\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:22:06.090186Z [\u001B[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:23:11.572780Z [\u001B[32minfo ] Training complete train_loss=0.9614008069038391\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:23:11.581010Z [\u001B[32minfo ] Starting training dataset=3512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:24:17.388319Z [\u001B[32minfo ] Training complete train_loss=0.9385112524032593\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T01:24:17.464897Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T01:24:31.274227Z [\u001B[32minfo ] Evaluation complete test_loss=0.5854552388191223\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T01:24:31.281479Z [\u001B[32minfo ] Start Predict dataset=46488\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:40:39.084449Z [\u001B[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:41:51.372209Z [\u001B[32minfo ] Training complete train_loss=0.9902898669242859\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:41:51.384139Z [\u001B[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:43:04.776776Z [\u001B[32minfo ] Training complete train_loss=0.9506107568740845\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:43:04.784669Z [\u001B[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:44:16.971692Z [\u001B[32minfo ] Training complete train_loss=0.9101556539535522\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:44:16.980149Z [\u001B[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:45:30.171964Z [\u001B[32minfo ] Training complete train_loss=0.9209990501403809\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T01:45:30.180936Z [\u001B[32minfo ] Starting training dataset=3612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T01:46:41.988783Z [\u001B[32minfo ] Training complete train_loss=0.9617601633071899\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T01:46:41.995001Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T01:46:56.588932Z [\u001B[32minfo ] Evaluation complete test_loss=0.588925838470459\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T01:46:56.596366Z [\u001B[32minfo ] Start Predict dataset=46388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:03:02.545742Z [\u001B[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:04:10.991458Z [\u001B[32minfo ] Training complete train_loss=0.902265727519989\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:04:11.070627Z [\u001B[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:05:20.275718Z [\u001B[32minfo ] Training complete train_loss=0.9274501204490662\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:05:20.283525Z [\u001B[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:06:30.384887Z [\u001B[32minfo ] Training complete train_loss=0.948907732963562\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:06:30.392616Z [\u001B[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:07:40.073581Z [\u001B[32minfo ] Training complete train_loss=0.9361139535903931\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:07:40.081202Z [\u001B[32minfo ] Starting training dataset=3712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:08:49.870122Z [\u001B[32minfo ] Training complete train_loss=0.9426257014274597\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T02:08:49.876067Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T02:09:03.779674Z [\u001B[32minfo ] Evaluation complete test_loss=0.5798165798187256\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T02:09:03.787281Z [\u001B[32minfo ] Start Predict dataset=46288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:25:00.390871Z [\u001B[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:26:08.266439Z [\u001B[32minfo ] Training complete train_loss=0.9778515100479126\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:26:08.273235Z [\u001B[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:27:16.871698Z [\u001B[32minfo ] Training complete train_loss=0.9607134461402893\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:27:16.879594Z [\u001B[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:28:25.190132Z [\u001B[32minfo ] Training complete train_loss=1.0153460502624512\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:28:25.268855Z [\u001B[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:29:34.683644Z [\u001B[32minfo ] Training complete train_loss=1.0046292543411255\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:29:34.692586Z [\u001B[32minfo ] Starting training dataset=3812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:30:44.576145Z [\u001B[32minfo ] Training complete train_loss=0.9653735756874084\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T02:30:44.582048Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T02:30:58.377427Z [\u001B[32minfo ] Evaluation complete test_loss=0.5952193140983582\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T02:30:58.384668Z [\u001B[32minfo ] Start Predict dataset=46188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:46:37.602564Z [\u001B[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:47:48.374654Z [\u001B[32minfo ] Training complete train_loss=0.9733763933181763\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:47:48.381969Z [\u001B[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:48:58.977107Z [\u001B[32minfo ] Training complete train_loss=0.9541704058647156\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:48:58.984005Z [\u001B[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:50:07.770611Z [\u001B[32minfo ] Training complete train_loss=0.8996275067329407\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:50:07.777522Z [\u001B[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:51:17.876229Z [\u001B[32minfo ] Training complete train_loss=0.9932214617729187\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T02:51:17.883210Z [\u001B[32minfo ] Starting training dataset=3912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T02:52:26.082136Z [\u001B[32minfo ] Training complete train_loss=1.0322301387786865\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T02:52:26.087786Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T02:52:39.383655Z [\u001B[32minfo ] Evaluation complete test_loss=0.5735118985176086\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T02:52:39.391237Z [\u001B[32minfo ] Start Predict dataset=46088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:08:19.372014Z [\u001B[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:09:30.573365Z [\u001B[32minfo ] Training complete train_loss=0.8765227794647217\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:09:30.580674Z [\u001B[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:10:40.077172Z [\u001B[32minfo ] Training complete train_loss=0.9590541124343872\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:10:40.084249Z [\u001B[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:11:51.185509Z [\u001B[32minfo ] Training complete train_loss=0.9163060784339905\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:11:51.192620Z [\u001B[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:13:01.870247Z [\u001B[32minfo ] Training complete train_loss=0.9452018737792969\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:13:01.878052Z [\u001B[32minfo ] Starting training dataset=4012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:14:12.270683Z [\u001B[32minfo ] Training complete train_loss=0.9404565095901489\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T03:14:12.276100Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T03:14:25.490276Z [\u001B[32minfo ] Evaluation complete test_loss=0.562659740447998\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T03:14:25.569134Z [\u001B[32minfo ] Start Predict dataset=45988\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:30:03.646820Z [\u001B[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:31:13.466228Z [\u001B[32minfo ] Training complete train_loss=1.0031845569610596\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:31:13.472767Z [\u001B[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:32:23.173042Z [\u001B[32minfo ] Training complete train_loss=0.9533597230911255\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:32:23.179884Z [\u001B[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:33:33.078510Z [\u001B[32minfo ] Training complete train_loss=0.9691417217254639\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:33:33.084899Z [\u001B[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:34:42.771816Z [\u001B[32minfo ] Training complete train_loss=0.9820665717124939\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:34:42.778569Z [\u001B[32minfo ] Starting training dataset=4112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:35:52.866703Z [\u001B[32minfo ] Training complete train_loss=0.9796773195266724\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T03:35:52.871526Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T03:36:05.879288Z [\u001B[32minfo ] Evaluation complete test_loss=0.5647993087768555\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T03:36:05.885746Z [\u001B[32minfo ] Start Predict dataset=45888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:51:29.489449Z [\u001B[32minfo ] Starting training dataset=4212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:52:40.770569Z [\u001B[32minfo ] Training complete train_loss=0.936839759349823\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:52:40.778765Z [\u001B[32minfo ] Starting training dataset=4212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:55:02.783307Z [\u001B[32minfo ] Training complete train_loss=0.9478463530540466\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:55:02.789775Z [\u001B[32minfo ] Starting training dataset=4212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:56:14.090717Z [\u001B[32minfo ] Training complete train_loss=0.9612425565719604\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T03:56:14.168573Z [\u001B[32minfo ] Starting training dataset=4212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T03:57:24.880913Z [\u001B[32minfo ] Training complete train_loss=0.9398402571678162\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T03:57:24.885770Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T03:57:38.175341Z [\u001B[32minfo ] Evaluation complete test_loss=0.5565947890281677\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T03:57:38.182963Z [\u001B[32minfo ] Start Predict dataset=45788\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:12:59.505991Z [\u001B[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:14:11.385840Z [\u001B[32minfo ] Training complete train_loss=1.0070544481277466\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:14:11.393299Z [\u001B[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:15:23.470047Z [\u001B[32minfo ] Training complete train_loss=0.9844921231269836\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:15:23.476889Z [\u001B[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:16:35.675671Z [\u001B[32minfo ] Training complete train_loss=0.9934103488922119\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:16:35.682764Z [\u001B[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:17:50.472318Z [\u001B[32minfo ] Training complete train_loss=0.9531852602958679\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:17:50.479302Z [\u001B[32minfo ] Starting training dataset=4312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:19:05.178550Z [\u001B[32minfo ] Training complete train_loss=0.9936967492103577\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T04:19:05.184399Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T04:19:18.580249Z [\u001B[32minfo ] Evaluation complete test_loss=0.5665599703788757\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T04:19:18.586943Z [\u001B[32minfo ] Start Predict dataset=45688\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:34:51.766049Z [\u001B[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:36:06.384482Z [\u001B[32minfo ] Training complete train_loss=0.9465316534042358\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:36:06.391749Z [\u001B[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:37:21.270006Z [\u001B[32minfo ] Training complete train_loss=0.9687955379486084\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:37:21.277308Z [\u001B[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:38:34.181247Z [\u001B[32minfo ] Training complete train_loss=0.9473304152488708\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:38:34.188378Z [\u001B[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:39:48.682254Z [\u001B[32minfo ] Training complete train_loss=0.9860917329788208\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:39:48.688619Z [\u001B[32minfo ] Starting training dataset=4412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:41:01.381298Z [\u001B[32minfo ] Training complete train_loss=0.9456995129585266\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T04:41:01.386213Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T04:41:14.570893Z [\u001B[32minfo ] Evaluation complete test_loss=0.5559056997299194\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T04:41:14.578396Z [\u001B[32minfo ] Start Predict dataset=45588\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:56:32.245143Z [\u001B[32minfo ] Starting training dataset=4512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:57:46.188021Z [\u001B[32minfo ] Training complete train_loss=0.9349108934402466\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:57:46.195296Z [\u001B[32minfo ] Starting training dataset=4512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T04:59:00.277858Z [\u001B[32minfo ] Training complete train_loss=0.974810779094696\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T04:59:00.284605Z [\u001B[32minfo ] Starting training dataset=4512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:00:14.582182Z [\u001B[32minfo ] Training complete train_loss=0.9310106635093689\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:00:14.589957Z [\u001B[32minfo ] Starting training dataset=4512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:01:29.273130Z [\u001B[32minfo ] Training complete train_loss=0.9381789565086365\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:01:29.279718Z [\u001B[32minfo ] Starting training dataset=4512 epoch=10\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:18:11.286794Z [\u001B[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:19:27.672247Z [\u001B[32minfo ] Training complete train_loss=0.9956920742988586\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:19:27.679533Z [\u001B[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:20:44.580321Z [\u001B[32minfo ] Training complete train_loss=0.9173332452774048\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:20:44.587566Z [\u001B[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:22:01.084456Z [\u001B[32minfo ] Training complete train_loss=0.9863068461418152\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:22:01.091486Z [\u001B[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:23:18.674201Z [\u001B[32minfo ] Training complete train_loss=0.9764572381973267\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:23:18.681107Z [\u001B[32minfo ] Starting training dataset=4612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:24:35.480255Z [\u001B[32minfo ] Training complete train_loss=1.0168014764785767\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T05:24:35.485128Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T05:24:48.874589Z [\u001B[32minfo ] Evaluation complete test_loss=0.5520719289779663\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T05:24:48.881463Z [\u001B[32minfo ] Start Predict dataset=45388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:40:04.304007Z [\u001B[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:41:21.974405Z [\u001B[32minfo ] Training complete train_loss=0.9784602522850037\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:41:21.981149Z [\u001B[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:42:39.381711Z [\u001B[32minfo ] Training complete train_loss=0.9848698973655701\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:42:39.388686Z [\u001B[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:43:57.482323Z [\u001B[32minfo ] Training complete train_loss=0.9804074168205261\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:43:57.489593Z [\u001B[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:45:14.773833Z [\u001B[32minfo ] Training complete train_loss=0.9591724276542664\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T05:45:14.781518Z [\u001B[32minfo ] Starting training dataset=4712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T05:46:33.281245Z [\u001B[32minfo ] Training complete train_loss=1.0082279443740845\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T05:46:33.286733Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T05:46:46.591746Z [\u001B[32minfo ] Evaluation complete test_loss=0.5615522861480713\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T05:46:46.665617Z [\u001B[32minfo ] Start Predict dataset=45288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:01:59.899100Z [\u001B[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:03:19.880195Z [\u001B[32minfo ] Training complete train_loss=0.9809603691101074\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:03:19.888224Z [\u001B[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:04:39.375615Z [\u001B[32minfo ] Training complete train_loss=0.973745584487915\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:04:39.383822Z [\u001B[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:05:58.485925Z [\u001B[32minfo ] Training complete train_loss=0.9743210673332214\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:05:58.493312Z [\u001B[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:07:17.076932Z [\u001B[32minfo ] Training complete train_loss=0.91501784324646\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:07:17.083588Z [\u001B[32minfo ] Starting training dataset=4812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:08:35.989119Z [\u001B[32minfo ] Training complete train_loss=0.9752476215362549\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T06:08:35.995117Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T06:08:49.377772Z [\u001B[32minfo ] Evaluation complete test_loss=0.5705881118774414\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T06:08:49.385668Z [\u001B[32minfo ] Start Predict dataset=45188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:23:59.766065Z [\u001B[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:25:20.285377Z [\u001B[32minfo ] Training complete train_loss=0.9525318741798401\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:25:20.293520Z [\u001B[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:26:39.891338Z [\u001B[32minfo ] Training complete train_loss=0.9308268427848816\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:26:39.970277Z [\u001B[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:27:59.877774Z [\u001B[32minfo ] Training complete train_loss=0.9380195140838623\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:27:59.885615Z [\u001B[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:29:19.772513Z [\u001B[32minfo ] Training complete train_loss=0.9800739288330078\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:29:19.780060Z [\u001B[32minfo ] Starting training dataset=4912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:30:40.389743Z [\u001B[32minfo ] Training complete train_loss=0.9535344243049622\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T06:30:40.395101Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T06:30:53.871705Z [\u001B[32minfo ] Evaluation complete test_loss=0.5377533435821533\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T06:30:53.879359Z [\u001B[32minfo ] Start Predict dataset=45088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:46:03.495772Z [\u001B[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:47:24.584999Z [\u001B[32minfo ] Training complete train_loss=0.9156500697135925\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:47:24.591922Z [\u001B[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:48:45.286743Z [\u001B[32minfo ] Training complete train_loss=0.9553552269935608\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:48:45.294576Z [\u001B[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:50:05.775591Z [\u001B[32minfo ] Training complete train_loss=0.9296905994415283\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:50:05.783333Z [\u001B[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:51:26.690279Z [\u001B[32minfo ] Training complete train_loss=0.9139729738235474\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T06:51:26.697377Z [\u001B[32minfo ] Starting training dataset=5012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T06:52:47.585409Z [\u001B[32minfo ] Training complete train_loss=0.9346900582313538\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T06:52:47.591531Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T06:53:00.973940Z [\u001B[32minfo ] Evaluation complete test_loss=0.5232065320014954\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T06:53:00.981762Z [\u001B[32minfo ] Start Predict dataset=44988\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:08:07.516317Z [\u001B[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:09:28.877012Z [\u001B[32minfo ] Training complete train_loss=0.9632118940353394\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:09:28.884978Z [\u001B[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:10:50.481289Z [\u001B[32minfo ] Training complete train_loss=0.8998667597770691\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:10:50.489168Z [\u001B[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:12:12.371390Z [\u001B[32minfo ] Training complete train_loss=0.9703662991523743\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:12:12.378297Z [\u001B[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:13:34.783263Z [\u001B[32minfo ] Training complete train_loss=0.911615252494812\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:13:34.790932Z [\u001B[32minfo ] Starting training dataset=5112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:14:57.593475Z [\u001B[32minfo ] Training complete train_loss=0.9733651280403137\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T07:14:57.666507Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T07:15:10.972475Z [\u001B[32minfo ] Evaluation complete test_loss=0.5270566344261169\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T07:15:10.979908Z [\u001B[32minfo ] Start Predict dataset=44888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:30:15.747722Z [\u001B[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:31:38.673276Z [\u001B[32minfo ] Training complete train_loss=0.8865935802459717\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:31:38.680685Z [\u001B[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:33:01.991828Z [\u001B[32minfo ] Training complete train_loss=0.9334018230438232\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:33:02.072821Z [\u001B[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:34:24.690463Z [\u001B[32minfo ] Training complete train_loss=0.9533438682556152\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:34:24.765045Z [\u001B[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:35:48.084328Z [\u001B[32minfo ] Training complete train_loss=0.9691643118858337\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:35:48.091523Z [\u001B[32minfo ] Starting training dataset=5212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:37:10.381950Z [\u001B[32minfo ] Training complete train_loss=0.9655492305755615\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T07:37:10.387022Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T07:37:23.881587Z [\u001B[32minfo ] Evaluation complete test_loss=0.5173717141151428\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T07:37:23.888603Z [\u001B[32minfo ] Start Predict dataset=44788\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:52:26.318401Z [\u001B[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:53:50.090727Z [\u001B[32minfo ] Training complete train_loss=0.9988188147544861\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:53:50.166640Z [\u001B[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:55:13.680111Z [\u001B[32minfo ] Training complete train_loss=0.9845055341720581\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:55:13.687638Z [\u001B[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:56:38.190458Z [\u001B[32minfo ] Training complete train_loss=0.9391801357269287\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:56:38.268577Z [\u001B[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:58:02.480110Z [\u001B[32minfo ] Training complete train_loss=0.9259323477745056\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T07:58:02.486874Z [\u001B[32minfo ] Starting training dataset=5312 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T07:59:26.789141Z [\u001B[32minfo ] Training complete train_loss=0.9599472284317017\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T07:59:26.794346Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T07:59:40.283603Z [\u001B[32minfo ] Evaluation complete test_loss=0.5252811908721924\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T07:59:40.292579Z [\u001B[32minfo ] Start Predict dataset=44688\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:14:41.452168Z [\u001B[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:16:06.479870Z [\u001B[32minfo ] Training complete train_loss=0.9627469182014465\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:16:06.487092Z [\u001B[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:17:32.184454Z [\u001B[32minfo ] Training complete train_loss=0.9845385551452637\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:17:32.193680Z [\u001B[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:18:58.381604Z [\u001B[32minfo ] Training complete train_loss=0.9816064238548279\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:18:58.389140Z [\u001B[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:20:23.482114Z [\u001B[32minfo ] Training complete train_loss=0.9480637311935425\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:20:23.489218Z [\u001B[32minfo ] Starting training dataset=5412 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:21:48.982720Z [\u001B[32minfo ] Training complete train_loss=0.9441766142845154\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T08:21:48.988076Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T08:22:02.376927Z [\u001B[32minfo ] Evaluation complete test_loss=0.5122457146644592\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T08:22:02.384138Z [\u001B[32minfo ] Start Predict dataset=44588\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:36:59.422635Z [\u001B[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:38:25.077782Z [\u001B[32minfo ] Training complete train_loss=0.9657859206199646\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:38:25.084857Z [\u001B[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:39:51.477342Z [\u001B[32minfo ] Training complete train_loss=0.9360918402671814\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:39:51.483954Z [\u001B[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:41:17.575167Z [\u001B[32minfo ] Training complete train_loss=0.938201904296875\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:41:17.582039Z [\u001B[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:42:44.375687Z [\u001B[32minfo ] Training complete train_loss=0.9420015811920166\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:42:44.383021Z [\u001B[32minfo ] Starting training dataset=5512 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T08:44:09.980322Z [\u001B[32minfo ] Training complete train_loss=0.9979342222213745\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T08:44:09.985577Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T08:44:23.583289Z [\u001B[32minfo ] Evaluation complete test_loss=0.5096003413200378\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T08:44:23.592039Z [\u001B[32minfo ] Start Predict dataset=44488\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T08:59:20.005780Z [\u001B[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:00:47.983898Z [\u001B[32minfo ] Training complete train_loss=0.9347040057182312\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:00:47.992177Z [\u001B[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:02:14.675232Z [\u001B[32minfo ] Training complete train_loss=0.9421486258506775\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:02:14.682607Z [\u001B[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:03:42.471977Z [\u001B[32minfo ] Training complete train_loss=0.9339758157730103\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:03:42.478964Z [\u001B[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:05:09.588801Z [\u001B[32minfo ] Training complete train_loss=0.940785825252533\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:05:09.596157Z [\u001B[32minfo ] Starting training dataset=5612 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:06:36.388508Z [\u001B[32minfo ] Training complete train_loss=0.9172220230102539\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T09:06:36.394159Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T09:06:49.779906Z [\u001B[32minfo ] Evaluation complete test_loss=0.5034990906715393\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T09:06:49.787079Z [\u001B[32minfo ] Start Predict dataset=44388\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:21:42.877712Z [\u001B[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:23:11.072422Z [\u001B[32minfo ] Training complete train_loss=0.9773575663566589\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:23:11.080054Z [\u001B[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:24:39.979758Z [\u001B[32minfo ] Training complete train_loss=0.9121693968772888\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:24:39.986819Z [\u001B[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:26:08.273716Z [\u001B[32minfo ] Training complete train_loss=0.9608463644981384\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:26:08.282000Z [\u001B[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:27:36.386442Z [\u001B[32minfo ] Training complete train_loss=0.9310780167579651\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:27:36.393392Z [\u001B[32minfo ] Starting training dataset=5712 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:29:04.675088Z [\u001B[32minfo ] Training complete train_loss=0.9387174844741821\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T09:29:04.681026Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T09:29:18.473080Z [\u001B[32minfo ] Evaluation complete test_loss=0.502855658531189\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T09:29:18.480180Z [\u001B[32minfo ] Start Predict dataset=44288\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:44:11.984106Z [\u001B[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:45:41.684589Z [\u001B[32minfo ] Training complete train_loss=0.9386858940124512\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:45:41.691708Z [\u001B[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:47:11.889461Z [\u001B[32minfo ] Training complete train_loss=0.9746957421302795\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:47:11.896628Z [\u001B[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:48:41.878885Z [\u001B[32minfo ] Training complete train_loss=0.9614348411560059\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:48:41.885215Z [\u001B[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:50:10.674407Z [\u001B[32minfo ] Training complete train_loss=0.9561299681663513\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T09:50:10.680954Z [\u001B[32minfo ] Starting training dataset=5812 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T09:51:40.788730Z [\u001B[32minfo ] Training complete train_loss=0.9587898850440979\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T09:51:40.794217Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T09:51:54.684919Z [\u001B[32minfo ] Evaluation complete test_loss=0.506767749786377\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T09:51:54.692014Z [\u001B[32minfo ] Start Predict dataset=44188\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:06:42.728074Z [\u001B[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:08:13.492879Z [\u001B[32minfo ] Training complete train_loss=0.9357155561447144\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:08:13.571886Z [\u001B[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:09:43.775531Z [\u001B[32minfo ] Training complete train_loss=0.928965151309967\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:09:43.783025Z [\u001B[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:11:15.290185Z [\u001B[32minfo ] Training complete train_loss=0.9106123447418213\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:11:15.366495Z [\u001B[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:12:46.477856Z [\u001B[32minfo ] Training complete train_loss=0.9481375813484192\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:12:46.484984Z [\u001B[32minfo ] Starting training dataset=5912 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:14:20.480515Z [\u001B[32minfo ] Training complete train_loss=0.9085630774497986\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T10:14:20.485748Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T10:14:34.189367Z [\u001B[32minfo ] Evaluation complete test_loss=0.4975428879261017\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T10:14:34.198983Z [\u001B[32minfo ] Start Predict dataset=44088\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:29:23.082284Z [\u001B[32minfo ] Starting training dataset=6012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:30:54.879188Z [\u001B[32minfo ] Training complete train_loss=0.9179600477218628\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:30:54.885979Z [\u001B[32minfo ] Starting training dataset=6012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:32:27.188698Z [\u001B[32minfo ] Training complete train_loss=0.954762876033783\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:32:27.196154Z [\u001B[32minfo ] Starting training dataset=6012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:33:58.874241Z [\u001B[32minfo ] Training complete train_loss=0.9262583255767822\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:33:58.881235Z [\u001B[32minfo ] Starting training dataset=6012 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:35:30.088966Z [\u001B[32minfo ] Training complete train_loss=0.9067783951759338\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:35:30.096937Z [\u001B[32minfo ] Starting training dataset=6012 epoch=10\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:51:59.151384Z [\u001B[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:53:36.383207Z [\u001B[32minfo ] Training complete train_loss=0.9925647974014282\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:53:36.391280Z [\u001B[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:55:12.673108Z [\u001B[32minfo ] Training complete train_loss=0.9393655061721802\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:55:12.681112Z [\u001B[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:56:49.685065Z [\u001B[32minfo ] Training complete train_loss=0.9082939028739929\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:56:49.692182Z [\u001B[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T10:58:26.181962Z [\u001B[32minfo ] Training complete train_loss=0.9237620830535889\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T10:58:26.189559Z [\u001B[32minfo ] Starting training dataset=6112 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:00:00.983051Z [\u001B[32minfo ] Training complete train_loss=0.9448640942573547\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T11:00:00.988386Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T11:00:14.479037Z [\u001B[32minfo ] Evaluation complete test_loss=0.5105274319648743\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T11:00:14.485896Z [\u001B[32minfo ] Start Predict dataset=43888\n",
- "Training model 0\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T11:14:56.388463Z [\u001B[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:16:28.581044Z [\u001B[32minfo ] Training complete train_loss=0.9154177308082581\n",
- "Training model 1\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T11:16:28.587949Z [\u001B[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:18:00.773198Z [\u001B[32minfo ] Training complete train_loss=0.9639526605606079\n",
- "Training model 2\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T11:18:00.780174Z [\u001B[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:19:33.565889Z [\u001B[32minfo ] Training complete train_loss=1.0114784240722656\n",
- "Training model 3\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T11:19:33.573952Z [\u001B[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:21:08.172122Z [\u001B[32minfo ] Training complete train_loss=0.932121217250824\n",
- "Training model 4\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:109] 2021-07-29T11:21:08.179191Z [\u001B[32minfo ] Starting training dataset=6212 epoch=10\n",
- "[14095-MainThread] [baal.modelwrapper:train_on_dataset:119] 2021-07-29T11:22:41.977505Z [\u001B[32minfo ] Training complete train_loss=0.9630128741264343\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:147] 2021-07-29T11:22:41.984938Z [\u001B[32minfo ] Starting evaluating dataset=10000\n",
- "[14095-MainThread] [baal.modelwrapper:test_on_dataset:156] 2021-07-29T11:22:55.375432Z [\u001B[32minfo ] Evaluation complete test_loss=0.4921897053718567\n",
- "[14095-MainThread] [baal.modelwrapper:predict_on_dataset_generator:241] 2021-07-29T11:22:55.382629Z [\u001B[32minfo ] Start Predict dataset=43788\n"
- ]
+ "execution_count": null,
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n",
+ "is_executing": true
}
- ],
+ },
+ "outputs": [],
"source": [
"report = []\n",
"for epoch in tqdm(range(hyperparams.epoch)):\n",
@@ -1297,7 +254,11 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
diff --git a/notebooks/fairness/ActiveFairness.ipynb b/notebooks/fairness/ActiveFairness.ipynb
index 08e909b1..efa721ae 100644
--- a/notebooks/fairness/ActiveFairness.ipynb
+++ b/notebooks/fairness/ActiveFairness.ipynb
@@ -2,11 +2,17 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Can active learning preemptively mitigate fairness issues?\n",
"*By Parmida Atighehchian*\n",
"\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/fairness/ActiveFairness.ipynb)\n",
+ "\n",
"The purpose of this notebook is to demonstrate the prilimary results of our recent [contribution](https://arxiv.org/abs/2104.06879) to ICLR workshop of Responsible AI 2021.\n",
"We show that active learning could help in creating fairer datasets without the need to know the bias in the dataset. This is important since in real scenarios, the source of bias is often unknown. Using active learning (i.e. BALD), we show that the prior knowledge of the bias is not necessary and hence it could be easier to integrate this setup in pipelines to make sure that the dataset is generally fairer and the possible biases are reduced. \n",
"\n",
@@ -14,7 +20,7 @@
"\n",
"The Dockerfile is located at `baal/notebooks/fairness/Docker_biased_data`.\n",
"\n",
- "More resources on BaaL:\n",
+ "More resources on Baal:\n",
"\n",
"* [Literature review](https://baal.readthedocs.io/en/latest/literature/index.html)\n",
"* [Active learning dataset and training loop classes](https://baal.readthedocs.io/en/latest/notebooks/fundamentals/active-learning.html)\n",
@@ -27,7 +33,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"### Introducing bias in dataset\n",
"\n",
@@ -45,7 +55,11 @@
{
"cell_type": "code",
"execution_count": 1,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stderr",
@@ -109,16 +123,24 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
- "### Prepare model and dataset to be used in BaaL setup\n",
- "As usual we wrap the train_set in `ActiveLearningDataset` and using vgg16 as default model, we use the BaaL's `patch_module` to create a dropout layer which performs in inference time."
+ "### Prepare model and dataset to be used in Baal setup\n",
+ "As usual we wrap the train_set in `ActiveLearningDataset` and using vgg16 as default model, we use the Baal's `patch_module` to create a dropout layer which performs in inference time."
]
},
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from torchvision.transforms import transforms\n",
@@ -168,7 +190,11 @@
{
"cell_type": "code",
"execution_count": 4,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from torchvision import models\n",
@@ -192,7 +218,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We wrap the pytorch criterion to accomodate target being a dictionary."
]
@@ -200,7 +230,11 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from torch import nn\n",
@@ -216,7 +250,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"#### Training\n",
"\n",
@@ -229,7 +267,10 @@
"metadata": {
"tags": [
"no_output"
- ]
+ ],
+ "pycharm": {
+ "name": "#%%\n"
+ }
},
"outputs": [],
"source": [
@@ -317,7 +358,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"### Results and Discussion\n",
"\n",
@@ -328,7 +373,10 @@
"cell_type": "code",
"execution_count": 17,
"metadata": {
- "scrolled": false
+ "scrolled": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
},
"outputs": [
{
@@ -370,7 +418,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We demonstrate the `test_loss` and `training_size` using `bald` vs `random` as heuristics. As it is shown, the trainig size increases with the same pace but the above graphs shows the underlying difference in the existing samples for each class which then results in also a better loss decrease using `bald`."
]
@@ -378,7 +430,11 @@
{
"cell_type": "code",
"execution_count": 16,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -431,4 +487,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
-}
+}
\ No newline at end of file
diff --git a/notebooks/fundamentals/active-learning.ipynb b/notebooks/fundamentals/active-learning.ipynb
index 4072836d..05b103b2 100644
--- a/notebooks/fundamentals/active-learning.ipynb
+++ b/notebooks/fundamentals/active-learning.ipynb
@@ -2,10 +2,16 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"# Active learning infrastructure objects\n",
"\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/fundamentals/active-learning.ipynb)\n",
+ "\n",
"Active learning, or interactively choosing datapoints to request labels for,\n",
"presents a challenge that requires some data handling infrastructure that's\n",
"slightly different to the normal pytorch dataset classes. In particular, a\n",
@@ -27,16 +33,24 @@
{
"cell_type": "code",
"execution_count": 2,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
- "path = \"/Users/jan/datasets/mnist/\""
+ "path = \"/tmp\""
]
},
{
"cell_type": "code",
"execution_count": 4,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from torchvision import transforms, datasets\n",
@@ -53,7 +67,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"As you can see, this is a fairly thin wrapper around MNIST. But, we can now\n",
"check several new properties of this dataset:\n"
@@ -62,7 +80,11 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -82,7 +104,11 @@
{
"cell_type": "code",
"execution_count": 6,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -101,7 +127,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We can also start labelling data. Either randomly, or based on specific indices:"
]
@@ -109,7 +139,11 @@
{
"cell_type": "code",
"execution_count": 7,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"active_mnist.label_randomly(10)\n",
@@ -118,7 +152,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We've just labelled 10 points randomly, and 3 points based on specific indices.\n",
"Now, if we check how many have been labelled, we see that 13 have been labelled:\n"
@@ -127,7 +165,11 @@
{
"cell_type": "code",
"execution_count": 8,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -147,7 +189,11 @@
{
"cell_type": "code",
"execution_count": 9,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -166,7 +212,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We will also see that when we check the length of this dataset - something that\n",
"is done by e.g. pytorch `DataLoader` classes - it only gives the length of the\n",
@@ -176,7 +226,11 @@
{
"cell_type": "code",
"execution_count": 12,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -195,7 +249,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"And, if we try to access an item, it will only allow us to index the _labelled_\n",
"datapoints:\n"
@@ -204,7 +262,11 @@
{
"cell_type": "code",
"execution_count": 13,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -246,7 +308,11 @@
{
"cell_type": "code",
"execution_count": 14,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"ename": "IndexError",
@@ -268,7 +334,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"Instead, if we want to actually use the _unlabelled_ data, we need to use the\n",
"`pool` attribute of the active learning dataset, which is itself a dataset:\n"
@@ -277,7 +347,11 @@
{
"cell_type": "code",
"execution_count": 15,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
diff --git a/notebooks/fundamentals/posteriors.ipynb b/notebooks/fundamentals/posteriors.ipynb
index 68467cff..d59fe1a1 100644
--- a/notebooks/fundamentals/posteriors.ipynb
+++ b/notebooks/fundamentals/posteriors.ipynb
@@ -2,10 +2,16 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"# Methods for approximating bayesian posteriors \n",
"\n",
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/baal-org/baal/blob/master/notebooks/fundamentals/posteriors.ipynb)\n",
+ "\n",
"When we started developing active learning methods, we realised that what we wanted to\n",
"achieve required estimating the uncertainty of models. Doing so for neural networks is\n",
"an ongoing active research area.\n",
@@ -20,7 +26,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Monte-Carlo Dropout\n",
"\n",
@@ -38,7 +48,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"### Usage\n",
"\n",
@@ -48,10 +62,15 @@
{
"cell_type": "code",
"execution_count": 1,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"import torch\n",
+ "\n",
"import baal.bayesian.dropout\n",
"\n",
"standard_model = torch.nn.Sequential(\n",
@@ -77,7 +96,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"The main difference between these is that the standard model will set the dropout probability to zero during eval, while the MC dropout model will not:"
]
@@ -85,7 +108,11 @@
{
"cell_type": "code",
"execution_count": 2,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"name": "stdout",
@@ -108,7 +135,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"In order to get a distribution of model outputs, you simply need to repeatedly run the same data through the MC Dropout model. `baal` makes this easier for you by providing a class called `ModelWrapper`. This class accepts your model and a criterion (loss) function, and provides several utility functions, such as running training steps and more. The one that is important for obtaining a posterior distribution is `Modelwrapper.predict_on_batch`.\n",
"\n",
@@ -118,7 +149,11 @@
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"from baal.modelwrapper import ModelWrapper\n",
@@ -134,7 +169,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"The tensor \"prediction_distribution\" has the shape (batch size) x (output size) x iterations:"
]
@@ -142,7 +181,11 @@
{
"cell_type": "code",
"execution_count": 4,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -161,7 +204,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"We can visualise this posterior distribution, for example for the first data point in our\n",
"minibatch (although note that because this model is overly simplistic, this is not very\n",
@@ -171,7 +218,11 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -188,7 +239,7 @@
],
"source": [
"import matplotlib.pyplot as plt\n",
- "%matplotlib inline\n",
+ "% matplotlib inline\n",
"\n",
"fig, ax = plt.subplots()\n",
"ax.hist(predictions[0, 0, :].numpy(), bins=50);\n",
@@ -197,7 +248,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"## Drop Connect\n",
"\n",
@@ -209,7 +264,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"### Usage\n",
"As usual we have pre-implemented wrappers to ease your job for this. Example below shows how to use this module:"
@@ -218,10 +277,16 @@
{
"cell_type": "code",
"execution_count": 6,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"import torch\n",
+ "\n",
+ "\n",
"class DummyModel(torch.nn.Module):\n",
" def __init__(self):\n",
" super(DummyModel, self).__init__()\n",
@@ -242,7 +307,11 @@
{
"cell_type": "code",
"execution_count": 7,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"import numpy as np\n",
@@ -260,7 +329,11 @@
{
"cell_type": "code",
"execution_count": 8,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -279,7 +352,11 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
"let's visualize the posterior:"
]
@@ -287,7 +364,11 @@
{
"cell_type": "code",
"execution_count": 9,
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [
{
"data": {
@@ -304,7 +385,7 @@
],
"source": [
"import matplotlib.pyplot as plt\n",
- "%matplotlib inline\n",
+ "% matplotlib inline\n",
"\n",
"fig, ax = plt.subplots()\n",
"ax.hist(predictions[0, 0, :].numpy(), bins=50);\n",
@@ -313,11 +394,23 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
"source": [
- "As part of our experiments, we run MCDropout(MCD) and DropConnect(MCDC) against eachother. We let the experiments run for 2000 epochs on `vgg16` using `CIFAR10` and tried different number of iterations and weight drop rate for Dropconnect. As the experiments show, `DropConnect` could give a better result if it is used with number of iterations more than `80` and drop weight rate of around `50%`. The reference [paper](https://arxiv.org/pdf/1906.04569.pdf) indicates having a drop rate of `94%` should give the best result but our experiments show otherwise. The main factor of change for DropConnect is the number of `iterations` used to estimate the posterior. However, as we can see for MCDropout, number of `iterations` 40 and 80 would give almost same results which would overfit by time. In order to prevent overfitting, we could change `learning rate` and use other techniques and get a lift on the performance, however as could be seen for higher `iterations`, DropConnect could easily outperform MCDropout at 10K training set size. \n",
- "Finally, the choice of method and training process is always for the user and depending on the problem in hand. Parameters like how low the validation error should be and if the training is allowed to be run for few days or there is a time limit could all effect in which strategy is best and which hyperparameters to choose.\n",
- "![MCD VS MCDC](https://github.com/ElementAI/baal/blob/master/docs/literature/images/experiment_results/iterations_mcdc.png?raw=true)"
+ "As part of our experiments, we compare MCDropout(MCD) and DropConnect(MCDC). We let the experiments run for 2000 epochs on `vgg16` using `CIFAR10` and tried different number of iterations and weight drop rate for Dropconnect.\n",
+ "Our experiments indicate that `DropConnect` could give a better result if it is used with number of iterations more than `80` and drop weight rate of around `50%`.\n",
+ "\n",
+ "The reference [paper](https://arxiv.org/pdf/1906.04569.pdf) indicates using a drop rate of `94%` give the best result but our experiments show otherwise.\n",
+ "The main factor of change for DropConnect is the number of `iterations` used to estimate the posterior. However, as we can see for MCDropout, number of `iterations` 40 and 80 would give almost the same results.\n",
+ " In order to prevent overfitting, we could change `learning rate` and use other techniques and get a lift on the performance, however as could be seen for higher `iterations`, DropConnect could easily outperform MCDropout at 10K training set size.\n",
+ "\n",
+ "Finally, the choice of method and training process is always up to the user and their current dataset.\n",
+ "Parameters like how low the validation error should be and if the training is allowed to be run for few days or there is a time limit could all effect in which strategy is best and which hyperparameters to choose.\n",
+ "\n",
+ ""
]
}
],
diff --git a/poetry.lock b/poetry.lock
index 4b388f81..0aace7ad 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,6 +1,6 @@
[[package]]
name = "absl-py"
-version = "1.1.0"
+version = "1.2.0"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
category = "main"
optional = true
@@ -8,7 +8,7 @@ python-versions = ">=3.6"
[[package]]
name = "aiohttp"
-version = "3.8.1"
+version = "3.8.3"
description = "Async http client/server framework (asyncio)"
category = "main"
optional = true
@@ -17,12 +17,10 @@ python-versions = ">=3.6"
[package.dependencies]
aiosignal = ">=1.1.2"
async-timeout = ">=4.0.0a3,<5.0"
-asynctest = {version = "0.13.0", markers = "python_version < \"3.8\""}
attrs = ">=17.3.0"
charset-normalizer = ">=2.0,<3.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
-typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
yarl = ">=1.0,<2.0"
[package.extras]
@@ -40,63 +38,15 @@ python-versions = ">=3.6"
frozenlist = ">=1.1.0"
[[package]]
-name = "alabaster"
-version = "0.7.12"
-description = "A configurable sidebar-enabled Sphinx theme"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[[package]]
-name = "appnope"
-version = "0.1.3"
-description = "Disable App Nap on macOS >= 10.9"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[[package]]
-name = "argon2-cffi"
-version = "21.3.0"
-description = "The secure Argon2 password hashing algorithm."
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-argon2-cffi-bindings = "*"
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
-
-[package.extras]
-dev = ["pre-commit", "cogapp", "tomli", "coverage[toml] (>=5.0.2)", "hypothesis", "pytest", "sphinx", "sphinx-notfound-page", "furo"]
-docs = ["sphinx", "sphinx-notfound-page", "furo"]
-tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pytest"]
-
-[[package]]
-name = "argon2-cffi-bindings"
-version = "21.2.0"
-description = "Low-level CFFI bindings for Argon2"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-cffi = ">=1.0.1"
-
-[package.extras]
-dev = ["pytest", "cogapp", "pre-commit", "wheel"]
-tests = ["pytest"]
-
-[[package]]
-name = "asteroid-sphinx-theme"
-version = "0.0.3"
-description = "Asteroid: Sphinx Theme"
+name = "astunparse"
+version = "1.6.3"
+description = "An AST unparser for Python"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
-sphinx = "*"
+six = ">=1.6.1,<2.0"
[[package]]
name = "async-timeout"
@@ -106,20 +56,9 @@ category = "main"
optional = true
python-versions = ">=3.6"
-[package.dependencies]
-typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""}
-
-[[package]]
-name = "asynctest"
-version = "0.13.0"
-description = "Enhance the standard unittest package with features for testing asyncio libraries"
-category = "main"
-optional = true
-python-versions = ">=3.5"
-
[[package]]
name = "atomicwrites"
-version = "1.4.0"
+version = "1.4.1"
description = "Atomic file writes."
category = "dev"
optional = false
@@ -127,36 +66,17 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "attrs"
-version = "21.4.0"
+version = "22.1.0"
description = "Classes Without Boilerplate"
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.5"
[package.extras]
-dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
+dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
-tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
-tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
-
-[[package]]
-name = "babel"
-version = "2.10.3"
-description = "Internationalization utilities"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-pytz = ">=2015.7"
-
-[[package]]
-name = "backcall"
-version = "0.2.0"
-description = "Specifications for callback functions passed in to an API"
-category = "dev"
-optional = false
-python-versions = "*"
+tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
+tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"]
[[package]]
name = "bandit"
@@ -194,25 +114,28 @@ lxml = ["lxml"]
[[package]]
name = "black"
-version = "22.6.0"
+version = "21.12b0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
python-versions = ">=3.6.2"
[package.dependencies]
-click = ">=8.0.0"
+click = ">=7.1.2"
mypy-extensions = ">=0.4.3"
-pathspec = ">=0.9.0"
+pathspec = ">=0.9.0,<1"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
-typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
-typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
+tomli = ">=0.2.6,<2.0.0"
+typing-extensions = [
+ {version = ">=3.10.0.0", markers = "python_version < \"3.10\""},
+ {version = "!=3.10.0.1", markers = "python_version >= \"3.10\""},
+]
[package.extras]
colorama = ["colorama (>=0.4.3)"]
d = ["aiohttp (>=3.7.4)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+python2 = ["typed-ast (>=1.4.3)"]
uvloop = ["uvloop (>=0.15.2)"]
[[package]]
@@ -241,7 +164,7 @@ python-versions = "~=3.7"
[[package]]
name = "certifi"
-version = "2022.6.15"
+version = "2022.9.24"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
@@ -260,7 +183,7 @@ pycparser = "*"
[[package]]
name = "charset-normalizer"
-version = "2.1.0"
+version = "2.1.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
@@ -279,7 +202,6 @@ python-versions = ">=3.7"
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[[package]]
name = "colorama"
@@ -290,19 +212,26 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
-name = "commonmark"
-version = "0.9.1"
-description = "Python parser for the CommonMark Markdown spec"
-category = "dev"
+name = "contourpy"
+version = "1.0.5"
+description = "Python library for calculating contours of 2D quadrilateral grids"
+category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
+
+[package.dependencies]
+numpy = ">=1.16"
[package.extras]
-test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"]
+test-no-codebase = ["pillow", "matplotlib", "pytest"]
+test-minimal = ["pytest"]
+test = ["isort", "flake8", "pillow", "matplotlib", "pytest"]
+docs = ["sphinx-rtd-theme", "sphinx", "docutils (<0.18)"]
+bokeh = ["selenium", "bokeh"]
[[package]]
name = "coverage"
-version = "6.4.1"
+version = "6.5.0"
description = "Code coverage measurement for Python"
category = "dev"
optional = false
@@ -332,7 +261,6 @@ aiohttp = "*"
dill = "*"
fsspec = {version = ">=2021.05.0", extras = ["http"]}
huggingface-hub = ">=0.1.0,<1.0.0"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
multiprocess = "*"
numpy = ">=1.17"
packaging = "*"
@@ -347,32 +275,16 @@ xxhash = "*"
apache-beam = ["apache-beam (>=2.26.0)"]
audio = ["librosa"]
benchmarks = ["numpy (==1.18.5)", "tensorflow (==2.3.0)", "torch (==1.6.0)", "transformers (==3.0.2)"]
-dev = ["absl-py", "pytest", "pytest-datadir", "pytest-xdist", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "aiobotocore", "boto3", "botocore", "faiss-cpu (>=1.6.4)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs (==2021.08.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "torch", "torchaudio", "soundfile", "transformers", "bs4", "conllu", "h5py", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "torchmetrics (==0.6.0)", "mauve-text", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "Pillow (>=6.2.1)", "librosa", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (>=22.0,<23.0)", "flake8 (>=3.8.3)", "isort (>=5.0.0)", "pyyaml (>=5.3.1)", "importlib-resources"]
+dev = ["absl-py", "pytest", "pytest-datadir", "pytest-xdist", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "aiobotocore", "boto3", "botocore", "faiss-cpu (>=1.6.4)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs (==2021.08.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "torch", "torchaudio", "soundfile", "transformers", "bs4", "conllu", "h5py", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "torchmetrics (==0.6.0)", "mauve-text", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "Pillow (>=6.2.1)", "librosa", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "black (>=22.0,<23.0)", "flake8 (>=3.8.3)", "isort (>=5.0.0)", "pyyaml (>=5.3.1)", "importlib-resources"]
docs = ["docutils (==0.16.0)", "recommonmark", "sphinx (==3.1.2)", "sphinx-markdown-tables", "sphinx-rtd-theme (==0.4.3)", "sphinxext-opengraph (==0.4.1)", "sphinx-copybutton", "fsspec (<2021.9.0)", "s3fs", "sphinx-panels", "sphinx-inline-tabs", "myst-parser", "Markdown (!=3.3.5)"]
quality = ["black (>=22.0,<23.0)", "flake8 (>=3.8.3)", "isort (>=5.0.0)", "pyyaml (>=5.3.1)"]
s3 = ["fsspec", "boto3", "botocore", "s3fs"]
tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)"]
tensorflow_gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["absl-py", "pytest", "pytest-datadir", "pytest-xdist", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "aiobotocore", "boto3", "botocore", "faiss-cpu (>=1.6.4)", "fsspec", "moto[s3,server] (==2.0.4)", "rarfile (>=4.0)", "s3fs (==2021.08.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "torch", "torchaudio", "soundfile", "transformers", "bs4", "conllu", "h5py", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "torchmetrics (==0.6.0)", "mauve-text", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "Pillow (>=6.2.1)", "librosa", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
+tests = ["absl-py", "pytest", "pytest-datadir", "pytest-xdist", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "aiobotocore", "boto3", "botocore", "faiss-cpu (>=1.6.4)", "fsspec", "moto[server,s3] (==2.0.4)", "rarfile (>=4.0)", "s3fs (==2021.08.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "torch", "torchaudio", "soundfile", "transformers", "bs4", "conllu", "h5py", "langdetect", "lxml", "mwparserfromhell", "nltk", "openpyxl", "py7zr", "tldextract", "zstandard", "bert-score (>=0.3.6)", "rouge-score", "sacrebleu", "scipy", "seqeval", "scikit-learn", "jiwer", "sentencepiece", "torchmetrics (==0.6.0)", "mauve-text", "toml (>=0.10.1)", "requests-file (>=1.5.1)", "tldextract (>=3.1.0)", "texttable (>=1.6.3)", "Werkzeug (>=1.0.1)", "six (>=1.15.0,<1.16.0)", "Pillow (>=6.2.1)", "librosa", "wget (>=3.2)", "pytorch-nlp (==0.5.0)", "pytorch-lightning", "fastBPE (==0.1.0)", "fairseq", "importlib-resources"]
torch = ["torch"]
vision = ["Pillow (>=6.2.1)"]
-[[package]]
-name = "debugpy"
-version = "1.6.0"
-description = "An implementation of the Debug Adapter Protocol for Python"
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[[package]]
-name = "decorator"
-version = "5.1.1"
-description = "Decorators for Humans"
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
[[package]]
name = "defusedxml"
version = "0.7.1"
@@ -418,7 +330,7 @@ python-versions = ">=3.6"
[[package]]
name = "fastjsonschema"
-version = "2.15.3"
+version = "2.16.2"
description = "Fastest Python implementation of JSON schema"
category = "dev"
optional = false
@@ -429,15 +341,15 @@ devel = ["colorama", "jsonschema", "json-spec", "pylint", "pytest", "pytest-benc
[[package]]
name = "filelock"
-version = "3.7.1"
+version = "3.8.0"
description = "A platform independent file lock."
category = "main"
optional = true
python-versions = ">=3.7"
[package.extras]
-docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"]
-testing = ["covdefaults (>=1.2.0)", "coverage (>=4)", "pytest (>=4)", "pytest-cov", "pytest-timeout (>=1.4.2)"]
+docs = ["furo (>=2022.6.21)", "sphinx (>=5.1.1)", "sphinx-autodoc-typehints (>=1.19.1)"]
+testing = ["covdefaults (>=2.2)", "coverage (>=6.4.2)", "pytest (>=7.1.2)", "pytest-cov (>=3)", "pytest-timeout (>=2.1)"]
[[package]]
name = "flake8"
@@ -448,14 +360,13 @@ optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[package.dependencies]
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
mccabe = ">=0.6.0,<0.7.0"
pycodestyle = ">=2.7.0,<2.8.0"
pyflakes = ">=2.3.0,<2.4.0"
[[package]]
name = "fonttools"
-version = "4.33.3"
+version = "4.37.4"
description = "Tools to manipulate font files"
category = "main"
optional = false
@@ -477,7 +388,7 @@ woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"]
[[package]]
name = "frozenlist"
-version = "1.3.0"
+version = "1.3.1"
description = "A list-like structure which implements collections.abc.MutableSequence"
category = "main"
optional = true
@@ -518,6 +429,20 @@ smb = ["smbprotocol"]
ssh = ["paramiko"]
tqdm = ["tqdm"]
+[[package]]
+name = "ghp-import"
+version = "2.1.0"
+description = "Copy your docs directly to the gh-pages branch."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+python-dateutil = ">=2.8.1"
+
+[package.extras]
+dev = ["wheel", "flake8", "markdown", "twine"]
+
[[package]]
name = "gitdb"
version = "4.0.9"
@@ -531,7 +456,7 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.27"
+version = "3.1.28"
description = "GitPython is a python library used to interact with Git repositories"
category = "dev"
optional = false
@@ -539,11 +464,10 @@ python-versions = ">=3.7"
[package.dependencies]
gitdb = ">=4.0.1,<5"
-typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""}
[[package]]
name = "google-auth"
-version = "2.9.0"
+version = "2.12.0"
description = "Google Authentication Library"
category = "main"
optional = true
@@ -576,19 +500,30 @@ requests-oauthlib = ">=0.7.0"
[package.extras]
tool = ["click (>=6.0.0)"]
+[[package]]
+name = "griffe"
+version = "0.22.2"
+description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+async = ["aiofiles (>=0.7,<1.0)"]
+
[[package]]
name = "grpcio"
-version = "1.47.0"
+version = "1.49.1"
description = "HTTP/2-based RPC framework"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
six = ">=1.5.2"
[package.extras]
-protobuf = ["grpcio-tools (>=1.47.0)"]
+protobuf = ["grpcio-tools (>=1.49.1)"]
[[package]]
name = "h5py"
@@ -603,7 +538,7 @@ numpy = ">=1.14.5"
[[package]]
name = "huggingface-hub"
-version = "0.8.1"
+version = "0.10.0"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
category = "main"
optional = true
@@ -611,7 +546,6 @@ python-versions = ">=3.7.0"
[package.dependencies]
filelock = "*"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
packaging = ">=20.9"
pyyaml = ">=5.1"
requests = "*"
@@ -619,13 +553,14 @@ tqdm = "*"
typing-extensions = ">=3.7.4.3"
[package.extras]
-all = ["pytest", "pytest-cov", "datasets", "soundfile", "black (>=22.0,<23.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
-dev = ["pytest", "pytest-cov", "datasets", "soundfile", "black (>=22.0,<23.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
-fastai = ["toml", "fastai (>=2.4)", "fastcore (>=1.3.27)"]
-quality = ["black (>=22.0,<23.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)"]
-tensorflow = ["tensorflow", "pydot", "graphviz"]
-testing = ["pytest", "pytest-cov", "datasets", "soundfile"]
torch = ["torch"]
+testing = ["soundfile", "pytest-cov", "pytest", "jinja2", "jedi", "isort (>=5.5.4)", "InquirerPy (==0.3.4)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+quality = ["mypy", "isort (>=5.5.4)", "flake8-bugbear", "flake8 (>=3.8.3)", "black (==22.3)"]
+fastai = ["fastcore (>=1.3.27)", "fastai (>=2.4)", "toml"]
+dev = ["mypy", "flake8-bugbear", "flake8 (>=3.8.3)", "black (==22.3)", "soundfile", "pytest-cov", "pytest", "jinja2", "jedi", "isort (>=5.5.4)", "InquirerPy (==0.3.4)"]
+cli = ["InquirerPy (==0.3.4)"]
+all = ["mypy", "flake8-bugbear", "flake8 (>=3.8.3)", "black (==22.3)", "soundfile", "pytest-cov", "pytest", "jinja2", "jedi", "isort (>=5.5.4)", "InquirerPy (==0.3.4)"]
[[package]]
name = "hypothesis"
@@ -651,40 +586,31 @@ pytz = ["pytz (>=2014.1)"]
[[package]]
name = "idna"
-version = "3.3"
+version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false
python-versions = ">=3.5"
-[[package]]
-name = "imagesize"
-version = "1.4.1"
-description = "Getting image size from png/jpeg/jpeg2000/gif file"
-category = "dev"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-
[[package]]
name = "importlib-metadata"
-version = "4.12.0"
+version = "5.0.0"
description = "Read metadata from Python packages"
category = "main"
optional = false
python-versions = ">=3.7"
[package.dependencies]
-typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"]
+docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
perf = ["ipython"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"]
[[package]]
name = "importlib-resources"
-version = "5.8.0"
+version = "5.10.0"
description = "Read resources from Python packages"
category = "dev"
optional = false
@@ -694,8 +620,8 @@ python-versions = ">=3.7"
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
+docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
[[package]]
name = "iniconfig"
@@ -705,109 +631,11 @@ category = "dev"
optional = false
python-versions = "*"
-[[package]]
-name = "ipykernel"
-version = "6.15.0"
-description = "IPython Kernel for Jupyter"
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-appnope = {version = "*", markers = "platform_system == \"Darwin\""}
-debugpy = ">=1.0"
-ipython = ">=7.23.1"
-jupyter-client = ">=6.1.12"
-matplotlib-inline = ">=0.1"
-nest-asyncio = "*"
-packaging = "*"
-psutil = "*"
-pyzmq = ">=17"
-tornado = ">=6.1"
-traitlets = ">=5.1.0"
-
-[package.extras]
-test = ["flaky", "ipyparallel", "pre-commit", "pytest-cov", "pytest-timeout", "pytest (>=6.0)"]
-
-[[package]]
-name = "ipython"
-version = "7.34.0"
-description = "IPython: Productive Interactive Computing"
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-appnope = {version = "*", markers = "sys_platform == \"darwin\""}
-backcall = "*"
-colorama = {version = "*", markers = "sys_platform == \"win32\""}
-decorator = "*"
-jedi = ">=0.16"
-matplotlib-inline = "*"
-pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
-pickleshare = "*"
-prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0"
-pygments = "*"
-traitlets = ">=4.2"
-
-[package.extras]
-all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.17)", "pygments", "qtconsole", "requests", "testpath"]
-doc = ["Sphinx (>=1.3)"]
-kernel = ["ipykernel"]
-nbconvert = ["nbconvert"]
-nbformat = ["nbformat"]
-notebook = ["notebook", "ipywidgets"]
-parallel = ["ipyparallel"]
-qtconsole = ["qtconsole"]
-test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.17)"]
-
-[[package]]
-name = "ipython-genutils"
-version = "0.2.0"
-description = "Vestigial utilities from IPython"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[[package]]
-name = "ipywidgets"
-version = "7.7.1"
-description = "IPython HTML widgets for Jupyter"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-ipykernel = ">=4.5.1"
-ipython = {version = ">=4.0.0", markers = "python_version >= \"3.3\""}
-ipython-genutils = ">=0.2.0,<0.3.0"
-jupyterlab-widgets = {version = ">=1.0.0", markers = "python_version >= \"3.6\""}
-traitlets = ">=4.3.1"
-widgetsnbextension = ">=3.6.0,<3.7.0"
-
-[package.extras]
-test = ["pytest (>=3.6.0)", "pytest-cov", "mock"]
-
-[[package]]
-name = "jedi"
-version = "0.18.1"
-description = "An autocompletion tool for Python that can be used for text editors."
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-parso = ">=0.8.0,<0.9.0"
-
-[package.extras]
-qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
-testing = ["Django (<3.1)", "colorama", "docopt", "pytest (<7.0.0)"]
-
[[package]]
name = "jinja2"
version = "3.1.2"
description = "A very fast and expressive template engine."
-category = "dev"
+category = "main"
optional = false
python-versions = ">=3.7"
@@ -819,15 +647,15 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "joblib"
-version = "1.1.0"
+version = "1.2.0"
description = "Lightweight pipelining with Python functions"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "jsonargparse"
-version = "4.15.0"
+version = "4.15.1"
description = "Parsing of command line options, yaml/jsonnet config files and/or environment variables based on argparse."
category = "main"
optional = true
@@ -859,7 +687,7 @@ urls = ["validators (>=0.14.2)", "requests (>=2.18.4)"]
[[package]]
name = "jsonschema"
-version = "4.6.1"
+version = "4.16.0"
description = "An implementation of JSON Schema validation for Python"
category = "dev"
optional = false
@@ -867,10 +695,9 @@ python-versions = ">=3.7"
[package.dependencies]
attrs = ">=17.4.0"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
@@ -878,7 +705,7 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-
[[package]]
name = "jupyter-client"
-version = "7.3.4"
+version = "7.3.5"
description = "Jupyter protocol implementation and client libraries"
category = "dev"
optional = false
@@ -890,7 +717,7 @@ jupyter-core = ">=4.9.2"
nest-asyncio = ">=1.5.4"
python-dateutil = ">=2.8.2"
pyzmq = ">=23.0"
-tornado = ">=6.0"
+tornado = ">=6.2"
traitlets = "*"
[package.extras]
@@ -899,7 +726,7 @@ test = ["codecov", "coverage", "ipykernel (>=6.5)", "ipython", "mypy", "pre-comm
[[package]]
name = "jupyter-core"
-version = "4.10.0"
+version = "4.11.1"
description = "Jupyter core package. A base package on which Jupyter projects rely."
category = "dev"
optional = false
@@ -912,21 +739,6 @@ traitlets = "*"
[package.extras]
test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"]
-[[package]]
-name = "jupyter-sphinx"
-version = "0.3.2"
-description = "Jupyter Sphinx Extensions"
-category = "dev"
-optional = false
-python-versions = ">= 3.6"
-
-[package.dependencies]
-IPython = "*"
-ipywidgets = ">=7.0.0"
-nbconvert = ">=5.5"
-nbformat = "*"
-Sphinx = ">=2"
-
[[package]]
name = "jupyterlab-pygments"
version = "0.2.2"
@@ -936,24 +748,32 @@ optional = false
python-versions = ">=3.7"
[[package]]
-name = "jupyterlab-widgets"
-version = "1.1.1"
-description = "A JupyterLab extension."
+name = "jupytext"
+version = "1.14.1"
+description = "Jupyter notebooks as Markdown documents, Julia, Python or R scripts"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = "~=3.6"
+
+[package.dependencies]
+markdown-it-py = ">=1.0.0,<3.0.0"
+mdit-py-plugins = "*"
+nbformat = "*"
+pyyaml = "*"
+toml = "*"
+
+[package.extras]
+rst2md = ["sphinx-gallery (>=0.7.0,<0.8.0)"]
+toml = ["toml"]
[[package]]
name = "kiwisolver"
-version = "1.4.3"
+version = "1.4.4"
description = "A fast implementation of the Cassowary constraint solver"
category = "main"
optional = false
python-versions = ">=3.7"
-[package.dependencies]
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
-
[[package]]
name = "lightning-flash"
version = "0.7.5"
@@ -993,12 +813,26 @@ video = ["kornia (>=0.5.1)", "pytorchvideo (==0.1.2)", "torchvision", "Pillow (>
video_extras = ["pytorchvideo (==0.1.2)", "kornia (>=0.5.1)", "Pillow (>=7.2)", "torchvision", "fiftyone"]
vision = ["segmentation-models-pytorch", "pytorchvideo (==0.1.2)", "kornia (>=0.5.1)", "timm (>=0.4.5)", "Pillow (>=7.2)", "lightning-bolts (>=0.3.3)", "torchvision", "pystiche (>=1.0.0,<2.0.0)"]
+[[package]]
+name = "lxml"
+version = "4.9.1"
+description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
+
+[package.extras]
+cssselect = ["cssselect (>=0.7)"]
+html5 = ["html5lib"]
+htmlsoup = ["beautifulsoup4"]
+source = ["Cython (>=0.29.7)"]
+
[[package]]
name = "markdown"
version = "3.3.7"
description = "Python implementation of Markdown."
category = "main"
-optional = true
+optional = false
python-versions = ">=3.6"
[package.dependencies]
@@ -1007,51 +841,94 @@ importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
[package.extras]
testing = ["coverage", "pyyaml"]
+[[package]]
+name = "markdown-it-py"
+version = "2.1.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+testing = ["pytest-regressions", "pytest-cov", "pytest", "coverage"]
+rtd = ["sphinx-book-theme", "sphinx-design", "sphinx-copybutton", "sphinx", "pyyaml", "myst-parser", "attrs"]
+profiling = ["gprof2dot"]
+plugins = ["mdit-py-plugins"]
+linkify = ["linkify-it-py (>=1.0,<2.0)"]
+compare = ["panflute (>=2.1.3,<2.2.0)", "mistune (>=2.0.2,<2.1.0)", "mistletoe (>=0.8.1,<0.9.0)", "markdown (>=3.3.6,<3.4.0)", "commonmark (>=0.9.1,<0.10.0)"]
+code_style = ["pre-commit (==2.6)"]
+benchmarking = ["pytest-benchmark (>=3.2,<4.0)", "pytest", "psutil"]
+
[[package]]
name = "markupsafe"
version = "2.1.1"
description = "Safely add untrusted strings to HTML/XML markup."
-category = "dev"
+category = "main"
optional = false
python-versions = ">=3.7"
[[package]]
name = "matplotlib"
-version = "3.5.2"
+version = "3.6.1"
description = "Python plotting package"
category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
[package.dependencies]
+contourpy = ">=1.0.1"
cycler = ">=0.10"
fonttools = ">=4.22.0"
kiwisolver = ">=1.0.1"
-numpy = ">=1.17"
+numpy = ">=1.19"
packaging = ">=20.0"
pillow = ">=6.2.0"
pyparsing = ">=2.2.1"
python-dateutil = ">=2.7"
-setuptools_scm = ">=4"
+setuptools_scm = ">=7"
[[package]]
-name = "matplotlib-inline"
-version = "0.1.3"
-description = "Inline Matplotlib backend for Jupyter"
+name = "mccabe"
+version = "0.6.1"
+description = "McCabe checker, plugin for flake8"
category = "dev"
optional = false
-python-versions = ">=3.5"
+python-versions = "*"
+
+[[package]]
+name = "mdit-py-plugins"
+version = "0.3.1"
+description = "Collection of plugins for markdown-it-py"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
[package.dependencies]
-traitlets = "*"
+markdown-it-py = ">=1.0.0,<3.0.0"
+
+[package.extras]
+testing = ["pytest-regressions", "pytest-cov", "pytest", "coverage"]
+rtd = ["sphinx-book-theme (>=0.1.0,<0.2.0)", "myst-parser (>=0.16.1,<0.17.0)", "attrs"]
+code_style = ["pre-commit"]
[[package]]
-name = "mccabe"
-version = "0.6.1"
-description = "McCabe checker, plugin for flake8"
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
+
+[[package]]
+name = "mergedeep"
+version = "1.3.4"
+description = "A deep merge function for 🐍."
+category = "main"
+optional = false
+python-versions = ">=3.6"
[[package]]
name = "mistune"
@@ -1061,6 +938,139 @@ category = "dev"
optional = false
python-versions = "*"
+[[package]]
+name = "mkdocs"
+version = "1.4.0"
+description = "Project documentation with Markdown."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+click = ">=7.0"
+ghp-import = ">=1.0"
+importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""}
+Jinja2 = ">=2.11.1"
+Markdown = ">=3.2.1,<3.4"
+mergedeep = ">=1.3.4"
+packaging = ">=20.5"
+PyYAML = ">=5.1"
+pyyaml-env-tag = ">=0.1"
+watchdog = ">=2.0"
+
+[package.extras]
+i18n = ["babel (>=2.9.0)"]
+
+[[package]]
+name = "mkdocs-autorefs"
+version = "0.4.1"
+description = "Automatically link across pages in MkDocs."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+Markdown = ">=3.3"
+mkdocs = ">=1.1"
+
+[[package]]
+name = "mkdocs-exclude-search"
+version = "0.6.4"
+description = "A mkdocs plugin that lets you exclude selected files or sections from the search index."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+mkdocs = ">=1.0.4"
+
+[[package]]
+name = "mkdocs-jupyter"
+version = "0.21.0"
+description = "Use Jupyter in mkdocs websites"
+category = "dev"
+optional = false
+python-versions = ">=3.7.1,<4"
+
+[package.dependencies]
+jupytext = ">=1.13.8,<2.0.0"
+mkdocs = ">=1.2.3,<2.0.0"
+mkdocs-material = ">=8.0.0,<9.0.0"
+nbconvert = ">=6.2.0,<7.0.0"
+Pygments = ">=2.12.0,<3.0.0"
+
+[[package]]
+name = "mkdocs-material"
+version = "8.5.6"
+description = "Documentation that simply works"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+jinja2 = ">=3.0.2"
+markdown = ">=3.2"
+mkdocs = ">=1.4.0"
+mkdocs-material-extensions = ">=1.0.3"
+pygments = ">=2.12"
+pymdown-extensions = ">=9.4"
+requests = ">=2.26"
+
+[[package]]
+name = "mkdocs-material-extensions"
+version = "1.0.3"
+description = "Extension pack for Python Markdown."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "mkdocstrings"
+version = "0.18.1"
+description = "Automatic documentation from sources, for MkDocs."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+Jinja2 = ">=2.11.1"
+Markdown = ">=3.3"
+MarkupSafe = ">=1.1"
+mkdocs = ">=1.2"
+mkdocs-autorefs = ">=0.3.1"
+mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""}
+mkdocstrings-python-legacy = ">=0.2"
+pymdown-extensions = ">=6.3"
+
+[package.extras]
+crystal = ["mkdocstrings-crystal (>=0.3.4)"]
+python = ["mkdocstrings-python (>=0.5.2)"]
+python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"]
+
+[[package]]
+name = "mkdocstrings-python"
+version = "0.6.6"
+description = "A Python handler for mkdocstrings."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+griffe = ">=0.11.1"
+mkdocstrings = ">=0.18"
+
+[[package]]
+name = "mkdocstrings-python-legacy"
+version = "0.2.2"
+description = "A legacy Python handler for mkdocstrings."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+mkdocstrings = ">=0.18"
+pytkdocs = ">=0.14"
+
[[package]]
name = "multidict"
version = "6.0.2"
@@ -1091,7 +1101,6 @@ python-versions = ">=3.5"
[package.dependencies]
mypy-extensions = ">=0.4.3,<0.5.0"
toml = "*"
-typed-ast = {version = ">=1.4.0,<1.5.0", markers = "python_version < \"3.8\""}
typing-extensions = ">=3.7.4"
[package.extras]
@@ -1108,7 +1117,7 @@ python-versions = "*"
[[package]]
name = "nbclient"
-version = "0.6.6"
+version = "0.7.0"
description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
category = "dev"
optional = false
@@ -1121,12 +1130,12 @@ nest-asyncio = "*"
traitlets = ">=5.2.2"
[package.extras]
-sphinx = ["autodoc-traits", "mock", "moto", "myst-parser", "Sphinx (>=1.7)", "sphinx-book-theme"]
-test = ["black", "check-manifest", "flake8", "ipykernel", "ipython (<8.0.0)", "ipywidgets (<8.0.0)", "mypy", "pip (>=18.1)", "pre-commit", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "setuptools (>=60.0)", "testpath", "twine (>=1.11.0)", "xmltodict"]
+test = ["xmltodict", "twine (>=1.11.0)", "testpath", "setuptools (>=60.0)", "pytest-cov (>=2.6.1)", "pytest-asyncio", "pytest (>=4.1)", "pre-commit", "pip (>=18.1)", "nbconvert", "mypy", "ipywidgets", "ipython", "ipykernel", "flake8", "check-manifest", "black"]
+sphinx = ["sphinx-book-theme", "Sphinx (>=1.7)", "myst-parser", "moto", "mock", "autodoc-traits"]
[[package]]
name = "nbconvert"
-version = "6.5.0"
+version = "6.5.4"
description = "Converting Jupyter Notebooks"
category = "dev"
optional = false
@@ -1140,6 +1149,7 @@ entrypoints = ">=0.2.2"
jinja2 = ">=3.0"
jupyter-core = ">=4.7"
jupyterlab-pygments = "*"
+lxml = "*"
MarkupSafe = ">=2.0"
mistune = ">=0.8.1,<2"
nbclient = ">=0.5.0"
@@ -1159,7 +1169,7 @@ webpdf = ["pyppeteer (>=1,<1.1)"]
[[package]]
name = "nbformat"
-version = "5.4.0"
+version = "5.6.1"
description = "The Jupyter Notebook format"
category = "dev"
optional = false
@@ -1172,88 +1182,27 @@ jupyter-core = "*"
traitlets = ">=5.1"
[package.extras]
-test = ["check-manifest", "testpath", "pytest", "pre-commit"]
-
-[[package]]
-name = "nbsphinx"
-version = "0.8.9"
-description = "Jupyter Notebook Tools for Sphinx"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-docutils = "*"
-jinja2 = "*"
-nbconvert = "!=5.4"
-nbformat = "*"
-sphinx = ">=1.8"
-traitlets = ">=5"
+test = ["check-manifest", "pep440", "pre-commit", "pytest", "testpath"]
[[package]]
name = "nest-asyncio"
-version = "1.5.5"
+version = "1.5.6"
description = "Patch asyncio to allow nested event loops"
category = "dev"
optional = false
python-versions = ">=3.5"
-[[package]]
-name = "notebook"
-version = "6.4.12"
-description = "A web-based notebook environment for interactive computing"
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-argon2-cffi = "*"
-ipykernel = "*"
-ipython-genutils = "*"
-jinja2 = "*"
-jupyter-client = ">=5.3.4"
-jupyter-core = ">=4.6.1"
-nbconvert = ">=5"
-nbformat = "*"
-nest-asyncio = ">=1.5"
-prometheus-client = "*"
-pyzmq = ">=17"
-Send2Trash = ">=1.8.0"
-terminado = ">=0.8.3"
-tornado = ">=6.1"
-traitlets = ">=4.2.1"
-
-[package.extras]
-docs = ["sphinx", "nbsphinx", "sphinxcontrib-github-alt", "sphinx-rtd-theme", "myst-parser"]
-json-logging = ["json-logging"]
-test = ["pytest", "coverage", "requests", "testpath", "nbval", "selenium", "pytest-cov", "requests-unixsocket"]
-
[[package]]
name = "numpy"
-version = "1.21.6"
+version = "1.23.3"
description = "NumPy is the fundamental package for array computing with Python."
category = "main"
optional = false
-python-versions = ">=3.7,<3.11"
-
-[[package]]
-name = "numpydoc"
-version = "1.4.0"
-description = "Sphinx extension to support docstrings in Numpy format"
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-Jinja2 = ">=2.10"
-sphinx = ">=3.0"
-
-[package.extras]
-testing = ["matplotlib", "pytest-cov", "pytest"]
+python-versions = ">=3.8"
[[package]]
name = "oauthlib"
-version = "3.2.0"
+version = "3.2.1"
description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
category = "main"
optional = true
@@ -1277,19 +1226,22 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
[[package]]
name = "pandas"
-version = "1.1.5"
+version = "1.5.0"
description = "Powerful data structures for data analysis, time series, and statistics"
category = "main"
optional = true
-python-versions = ">=3.6.1"
+python-versions = ">=3.8"
[package.dependencies]
-numpy = ">=1.15.4"
-python-dateutil = ">=2.7.3"
-pytz = ">=2017.2"
+numpy = [
+ {version = ">=1.21.0", markers = "python_version >= \"3.10\""},
+ {version = ">=1.20.3", markers = "python_version < \"3.10\""},
+]
+python-dateutil = ">=2.8.1"
+pytz = ">=2020.1"
[package.extras]
-test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"]
+test = ["pytest-xdist (>=1.31)", "pytest (>=6.0)", "hypothesis (>=5.5.3)"]
[[package]]
name = "pandocfilters"
@@ -1299,53 +1251,22 @@ category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-[[package]]
-name = "parso"
-version = "0.8.3"
-description = "A Python Parser"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
-testing = ["docopt", "pytest (<6.0.0)"]
-
[[package]]
name = "pathspec"
-version = "0.9.0"
+version = "0.10.1"
description = "Utility library for gitignore style pattern matching of file paths."
category = "dev"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+python-versions = ">=3.7"
[[package]]
name = "pbr"
-version = "5.9.0"
+version = "5.10.0"
description = "Python Build Reasonableness"
category = "dev"
optional = false
python-versions = ">=2.6"
-[[package]]
-name = "pexpect"
-version = "4.8.0"
-description = "Pexpect allows easy control of interactive console applications."
-category = "dev"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-ptyprocess = ">=0.5"
-
-[[package]]
-name = "pickleshare"
-version = "0.7.5"
-description = "Tiny 'shelve'-like database with concurrency support"
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "pillow"
version = "9.2.0"
@@ -1358,6 +1279,14 @@ python-versions = ">=3.7"
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+[[package]]
+name = "pkgutil-resolve-name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
[[package]]
name = "platformdirs"
version = "2.5.2"
@@ -1378,62 +1307,18 @@ category = "dev"
optional = false
python-versions = ">=3.6"
-[package.dependencies]
-importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
-
-[package.extras]
-dev = ["pre-commit", "tox"]
-testing = ["pytest", "pytest-benchmark"]
-
-[[package]]
-name = "prometheus-client"
-version = "0.14.1"
-description = "Python client for the Prometheus monitoring system."
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
[package.extras]
-twisted = ["twisted"]
-
-[[package]]
-name = "prompt-toolkit"
-version = "3.0.30"
-description = "Library for building powerful interactive command lines in Python"
-category = "dev"
-optional = false
-python-versions = ">=3.6.2"
-
-[package.dependencies]
-wcwidth = "*"
+testing = ["pytest-benchmark", "pytest"]
+dev = ["tox", "pre-commit"]
[[package]]
name = "protobuf"
-version = "3.19.4"
+version = "3.19.6"
description = "Protocol Buffers"
category = "main"
optional = true
python-versions = ">=3.5"
-[[package]]
-name = "psutil"
-version = "5.9.1"
-description = "Cross-platform lib for process and system monitoring in Python."
-category = "dev"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-
-[package.extras]
-test = ["ipaddress", "mock", "enum34", "pywin32", "wmi"]
-
-[[package]]
-name = "ptyprocess"
-version = "0.7.0"
-description = "Run a subprocess in a pseudo terminal"
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "py"
version = "1.11.0"
@@ -1444,7 +1329,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
name = "pyarrow"
-version = "8.0.0"
+version = "9.0.0"
description = "Python library for Apache Arrow"
category = "main"
optional = true
@@ -1506,12 +1391,26 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "pygments"
-version = "2.12.0"
+version = "2.13.0"
description = "Pygments is a syntax highlighting package written in Python."
category = "dev"
optional = false
python-versions = ">=3.6"
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pymdown-extensions"
+version = "9.6"
+description = "Extension pack for Python Markdown."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+markdown = ">=3.2"
+
[[package]]
name = "pyparsing"
version = "3.0.9"
@@ -1543,7 +1442,6 @@ python-versions = ">=3.6"
atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
attrs = ">=19.2.0"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
-importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
@@ -1571,7 +1469,7 @@ testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtuale
[[package]]
name = "pytest-mock"
-version = "3.8.1"
+version = "3.10.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
category = "dev"
optional = false
@@ -1594,9 +1492,23 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
[package.dependencies]
six = ">=1.5"
+[[package]]
+name = "pytkdocs"
+version = "0.16.1"
+description = "Load Python objects documentation."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+astunparse = {version = ">=1.6", markers = "python_version < \"3.9\""}
+
+[package.extras]
+numpy-style = ["docstring_parser (>=0.7)"]
+
[[package]]
name = "pytorch-lightning"
-version = "1.6.4"
+version = "1.7.7"
description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate."
category = "main"
optional = true
@@ -1606,34 +1518,33 @@ python-versions = ">=3.7"
fsspec = {version = ">=2021.05.0,<2021.06.0 || >2021.06.0", extras = ["http"]}
numpy = ">=1.17.2"
packaging = ">=17.0"
-protobuf = "<=3.20.1"
pyDeprecate = ">=0.3.1"
PyYAML = ">=5.4"
-tensorboard = ">=2.2.0"
-torch = ">=1.8"
-torchmetrics = ">=0.4.1"
+tensorboard = ">=2.9.1"
+torch = ">=1.9"
+torchmetrics = ">=0.7.0"
tqdm = ">=4.57.0"
typing-extensions = ">=4.0.0"
[package.extras]
-all = ["matplotlib (>3.1)", "torchtext (>=0.9)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.7.1)", "gcsfs (>=2021.5.0)", "rich (>=10.2.2,<10.15.0 || >=10.16.0)", "neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "test-tube (>=0.7.5)", "wandb (>=0.8.21)", "coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=6.0)", "pytest-rerunfailures (>=10.2)", "mypy (>=0.920)", "flake8 (>=3.9.2)", "pre-commit (>=1.0)", "pytest-forked", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "pandas", "torchvision (>=0.9)", "gym[classic_control] (>=0.17.0)", "ipython", "fairscale (>=0.4.5)", "deepspeed", "horovod (>=0.21.2,!=0.24.0)", "hivemind (>=1.0.1)"]
-deepspeed = ["deepspeed"]
-dev = ["matplotlib (>3.1)", "torchtext (>=0.9)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.7.1)", "gcsfs (>=2021.5.0)", "rich (>=10.2.2,<10.15.0 || >=10.16.0)", "neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "test-tube (>=0.7.5)", "wandb (>=0.8.21)", "coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=6.0)", "pytest-rerunfailures (>=10.2)", "mypy (>=0.920)", "flake8 (>=3.9.2)", "pre-commit (>=1.0)", "pytest-forked", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "pandas"]
-examples = ["torchvision (>=0.9)", "gym[classic_control] (>=0.17.0)", "ipython"]
-extra = ["matplotlib (>3.1)", "torchtext (>=0.9)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.7.1)", "gcsfs (>=2021.5.0)", "rich (>=10.2.2,<10.15.0 || >=10.16.0)"]
+all = ["matplotlib (>3.1)", "torchtext (>=0.10)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.12.0)", "gcsfs (>=2021.5.0)", "rich (>=10.14.0,!=10.15.0.a)", "protobuf (<=3.20.1)", "neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "wandb (>=0.10.22)", "coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=7.0)", "pytest-cov", "pytest-forked", "pytest-rerunfailures (>=10.2)", "pre-commit (>=1.0)", "mypy (==0.971)", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "psutil", "pandas (>1.0)", "fastapi", "uvicorn", "torchvision (>=0.10)", "gym[classic_control] (>=0.17.0)", "ipython", "fairscale (>=0.4.5)", "deepspeed (>=0.6.0)", "horovod (>=0.21.2,!=0.24.0)", "hivemind (>=1.0.1)"]
+deepspeed = ["deepspeed (>=0.6.0)"]
+dev = ["matplotlib (>3.1)", "torchtext (>=0.10)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.12.0)", "gcsfs (>=2021.5.0)", "rich (>=10.14.0,!=10.15.0.a)", "protobuf (<=3.20.1)", "neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "wandb (>=0.10.22)", "coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=7.0)", "pytest-cov", "pytest-forked", "pytest-rerunfailures (>=10.2)", "pre-commit (>=1.0)", "mypy (==0.971)", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "psutil", "pandas (>1.0)", "fastapi", "uvicorn"]
+examples = ["torchvision (>=0.10)", "gym[classic_control] (>=0.17.0)", "ipython"]
+extra = ["matplotlib (>3.1)", "torchtext (>=0.10)", "omegaconf (>=2.0.5)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.12.0)", "gcsfs (>=2021.5.0)", "rich (>=10.14.0,!=10.15.0.a)", "protobuf (<=3.20.1)"]
fairscale = ["fairscale (>=0.4.5)"]
hivemind = ["hivemind (>=1.0.1)"]
horovod = ["horovod (>=0.21.2,!=0.24.0)"]
-loggers = ["neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "test-tube (>=0.7.5)", "wandb (>=0.8.21)"]
-strategies = ["fairscale (>=0.4.5)", "deepspeed", "horovod (>=0.21.2,!=0.24.0)", "hivemind (>=1.0.1)"]
-test = ["coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=6.0)", "pytest-rerunfailures (>=10.2)", "mypy (>=0.920)", "flake8 (>=3.9.2)", "pre-commit (>=1.0)", "pytest-forked", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "pandas"]
+loggers = ["neptune-client (>=0.10.0)", "comet-ml (>=3.1.12)", "mlflow (>=1.0.0)", "wandb (>=0.10.22)"]
+strategies = ["fairscale (>=0.4.5)", "deepspeed (>=0.6.0)", "horovod (>=0.21.2,!=0.24.0)", "hivemind (>=1.0.1)"]
+test = ["coverage (>=6.4)", "codecov (>=2.1)", "pytest (>=7.0)", "pytest-cov", "pytest-forked", "pytest-rerunfailures (>=10.2)", "pre-commit (>=1.0)", "mypy (==0.971)", "cloudpickle (>=1.3)", "scikit-learn (>0.22.1)", "onnxruntime", "psutil", "pandas (>1.0)", "fastapi", "uvicorn"]
[[package]]
name = "pytz"
-version = "2022.1"
+version = "2022.4"
description = "World timezone definitions, modern and historical"
category = "main"
-optional = false
+optional = true
python-versions = "*"
[[package]]
@@ -1644,14 +1555,6 @@ category = "dev"
optional = false
python-versions = "*"
-[[package]]
-name = "pywinpty"
-version = "2.0.5"
-description = "Pseudo terminal support for Windows from Python."
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
[[package]]
name = "pyyaml"
version = "6.0"
@@ -1661,33 +1564,31 @@ optional = false
python-versions = ">=3.6"
[[package]]
-name = "pyzmq"
-version = "23.2.0"
-description = "Python bindings for 0MQ"
-category = "dev"
+name = "pyyaml-env-tag"
+version = "0.1"
+description = "A custom YAML tag for referencing environment variables in YAML files. "
+category = "main"
optional = false
python-versions = ">=3.6"
[package.dependencies]
-cffi = {version = "*", markers = "implementation_name == \"pypy\""}
-py = {version = "*", markers = "implementation_name == \"pypy\""}
+pyyaml = "*"
[[package]]
-name = "recommonmark"
-version = "0.7.1"
-description = "A docutils-compatibility bridge to CommonMark, enabling you to write CommonMark inside of Docutils & Sphinx projects."
+name = "pyzmq"
+version = "24.0.1"
+description = "Python bindings for 0MQ"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[package.dependencies]
-commonmark = ">=0.8.1"
-docutils = ">=0.11"
-sphinx = ">=1.3.1"
+cffi = {version = "*", markers = "implementation_name == \"pypy\""}
+py = {version = "*", markers = "implementation_name == \"pypy\""}
[[package]]
name = "regex"
-version = "2022.6.2"
+version = "2022.9.13"
description = "Alternative regular expression module, to replace re."
category = "main"
optional = true
@@ -1743,7 +1644,7 @@ tests = ["pytest (>=4.6)", "coverage (>=6.0.0)", "pytest-cov", "pytest-localserv
[[package]]
name = "rsa"
-version = "4.8"
+version = "4.9"
description = "Pure-Python RSA implementation"
category = "main"
optional = true
@@ -1754,58 +1655,49 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "scikit-learn"
-version = "1.0.2"
+version = "1.1.2"
description = "A set of python modules for machine learning and data mining"
category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
[package.dependencies]
-joblib = ">=0.11"
-numpy = ">=1.14.6"
-scipy = ">=1.1.0"
+joblib = ">=1.0.0"
+numpy = ">=1.17.3"
+scipy = ">=1.3.2"
threadpoolctl = ">=2.0.0"
[package.extras]
-benchmark = ["matplotlib (>=2.2.3)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"]
-docs = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
-examples = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"]
-tests = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=21.6b0)", "mypy (>=0.770)", "pyamg (>=4.0.0)"]
+tests = ["numpydoc (>=1.2.0)", "pyamg (>=4.0.0)", "mypy (>=0.961)", "black (>=22.3.0)", "flake8 (>=3.8.2)", "pytest-cov (>=2.9.0)", "pytest (>=5.0.1)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"]
+examples = ["seaborn (>=0.9.0)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"]
+docs = ["sphinxext-opengraph (>=0.4.2)", "sphinx-prompt (>=1.3.0)", "Pillow (>=7.1.2)", "numpydoc (>=1.2.0)", "sphinx-gallery (>=0.7.0)", "sphinx (>=4.0.1)", "memory-profiler (>=0.57.0)", "seaborn (>=0.9.0)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"]
+benchmark = ["memory-profiler (>=0.57.0)", "pandas (>=1.0.5)", "matplotlib (>=3.1.2)"]
[[package]]
name = "scipy"
-version = "1.7.3"
-description = "SciPy: Scientific Library for Python"
+version = "1.9.2"
+description = "Fundamental algorithms for scientific computing in Python"
category = "main"
optional = false
-python-versions = ">=3.7,<3.11"
+python-versions = ">=3.8"
[package.dependencies]
-numpy = ">=1.16.5,<1.23.0"
-
-[[package]]
-name = "send2trash"
-version = "1.8.0"
-description = "Send file to trash natively under Mac OS X, Windows and Linux."
-category = "dev"
-optional = false
-python-versions = "*"
+numpy = ">=1.18.5,<1.26.0"
[package.extras]
-nativelib = ["pyobjc-framework-cocoa", "pywin32"]
-objc = ["pyobjc-framework-cocoa"]
-win32 = ["pywin32"]
+test = ["pytest", "pytest-cov", "pytest-xdist", "asv", "mpmath", "gmpy2", "threadpoolctl", "scikit-umfpack"]
+doc = ["sphinx (!=4.1.0)", "pydata-sphinx-theme (==0.9.0)", "sphinx-panels (>=0.5.2)", "matplotlib (>2)", "numpydoc", "sphinx-tabs"]
+dev = ["mypy", "typing-extensions", "pycodestyle", "flake8"]
[[package]]
name = "setuptools-scm"
-version = "7.0.4"
+version = "7.0.5"
description = "the blessed package to manage your versions by scm tags"
category = "main"
optional = false
python-versions = ">=3.7"
[package.dependencies]
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
packaging = ">=20.0"
tomli = ">=1.0.0"
typing-extensions = "*"
@@ -1830,14 +1722,6 @@ category = "dev"
optional = false
python-versions = ">=3.6"
-[[package]]
-name = "snowballstemmer"
-version = "2.2.0"
-description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "soupsieve"
version = "2.3.2.post1"
@@ -1846,163 +1730,15 @@ category = "dev"
optional = false
python-versions = ">=3.6"
-[[package]]
-name = "sphinx"
-version = "5.0.2"
-description = "Python documentation generator"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-alabaster = ">=0.7,<0.8"
-babel = ">=1.3"
-colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""}
-docutils = ">=0.14,<0.19"
-imagesize = "*"
-importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
-Jinja2 = ">=2.3"
-packaging = "*"
-Pygments = ">=2.0"
-requests = ">=2.5.0"
-snowballstemmer = ">=1.1"
-sphinxcontrib-applehelp = "*"
-sphinxcontrib-devhelp = "*"
-sphinxcontrib-htmlhelp = ">=2.0.0"
-sphinxcontrib-jsmath = "*"
-sphinxcontrib-qthelp = "*"
-sphinxcontrib-serializinghtml = ">=1.1.5"
-
-[package.extras]
-docs = ["sphinxcontrib-websupport"]
-lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.950)", "docutils-stubs", "types-typed-ast", "types-requests"]
-test = ["pytest (>=4.6)", "html5lib", "cython", "typed-ast"]
-
-[[package]]
-name = "sphinx-automodapi"
-version = "0.13"
-description = "Sphinx extension for auto-generating API documentation for entire modules"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-sphinx = ">=1.7"
-
-[package.extras]
-test = ["pytest", "pytest-cov", "cython", "codecov", "coverage (<5.0)"]
-
-[[package]]
-name = "sphinx-copybutton"
-version = "0.4.0"
-description = "Add a copy button to each of your code cells."
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-sphinx = ">=1.8"
-
-[package.extras]
-rtd = ["sphinx-book-theme", "ipython", "sphinx"]
-code_style = ["pre-commit (==2.12.1)"]
-
-[[package]]
-name = "sphinx-rtd-theme"
-version = "0.5.2"
-description = "Read the Docs theme for Sphinx"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-docutils = "<0.17"
-sphinx = "*"
-
-[package.extras]
-dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client"]
-
-[[package]]
-name = "sphinxcontrib-applehelp"
-version = "1.0.2"
-description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
-[package.extras]
-test = ["pytest"]
-lint = ["docutils-stubs", "mypy", "flake8"]
-
-[[package]]
-name = "sphinxcontrib-devhelp"
-version = "1.0.2"
-description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
-[package.extras]
-test = ["pytest"]
-lint = ["docutils-stubs", "mypy", "flake8"]
-
-[[package]]
-name = "sphinxcontrib-htmlhelp"
-version = "2.0.0"
-description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-test = ["html5lib", "pytest"]
-lint = ["docutils-stubs", "mypy", "flake8"]
-
-[[package]]
-name = "sphinxcontrib-jsmath"
-version = "1.0.1"
-description = "A sphinx extension which renders display math in HTML via JavaScript"
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
-[package.extras]
-test = ["mypy", "flake8", "pytest"]
-
-[[package]]
-name = "sphinxcontrib-qthelp"
-version = "1.0.3"
-description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
-[package.extras]
-test = ["pytest"]
-lint = ["docutils-stubs", "mypy", "flake8"]
-
-[[package]]
-name = "sphinxcontrib-serializinghtml"
-version = "1.1.5"
-description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
-category = "dev"
-optional = false
-python-versions = ">=3.5"
-
-[package.extras]
-test = ["pytest"]
-lint = ["docutils-stubs", "mypy", "flake8"]
-
[[package]]
name = "stevedore"
-version = "3.5.0"
+version = "4.0.0"
description = "Manage dynamic plugins for Python applications"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
[package.dependencies]
-importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
pbr = ">=2.0.0,<2.1.0 || >2.1.0"
[[package]]
@@ -2013,9 +1749,6 @@ category = "main"
optional = false
python-versions = ">=3.6"
-[package.dependencies]
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
-
[package.extras]
dev = ["pre-commit", "rich", "cogapp", "tomli", "coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest (>=6.0)", "simplejson", "furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"]
docs = ["furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"]
@@ -2023,7 +1756,7 @@ tests = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest
[[package]]
name = "tensorboard"
-version = "2.9.1"
+version = "2.10.1"
description = "TensorBoard lets you watch Tensors Flow"
category = "main"
optional = true
@@ -2058,22 +1791,6 @@ category = "main"
optional = true
python-versions = "*"
-[[package]]
-name = "terminado"
-version = "0.15.0"
-description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library."
-category = "dev"
-optional = false
-python-versions = ">=3.7"
-
-[package.dependencies]
-ptyprocess = {version = "*", markers = "os_name != \"nt\""}
-pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""}
-tornado = ">=6.1.0"
-
-[package.extras]
-test = ["pre-commit", "pytest-timeout", "pytest (>=6.0)"]
-
[[package]]
name = "threadpoolctl"
version = "3.1.0"
@@ -2094,8 +1811,8 @@ python-versions = ">=3.6"
webencodings = ">=0.4"
[package.extras]
-doc = ["sphinx", "sphinx-rtd-theme"]
-test = ["pytest", "pytest-cov", "pytest-flake8", "pytest-isort", "coverage"]
+test = ["coverage", "pytest-isort", "pytest-flake8", "pytest-cov", "pytest"]
+doc = ["sphinx-rtd-theme", "sphinx"]
[[package]]
name = "tokenizers"
@@ -2127,7 +1844,7 @@ python-versions = ">=3.6"
[[package]]
name = "torch"
-version = "1.12.0"
+version = "1.12.1"
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
category = "main"
optional = false
@@ -2161,7 +1878,6 @@ python-versions = ">=3.7"
numpy = ">=1.17.2"
packaging = "*"
torch = ">=1.3.1"
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
[package.extras]
test = ["pycocotools", "codecov (>=2.1)", "check-manifest", "torch-complex", "scikit-learn (>1.0,<1.1.1)", "cloudpickle (>=1.3)", "mir-eval (>=0.6)", "pytest-cov (>2.10)", "pytest-timeout", "scikit-image (>0.17.1)", "pytest (>=6.0.0,<7.0.0)", "pypesq", "transformers (>=4.0)", "pre-commit (>=1.0)", "fire", "bert-score (==0.3.10)", "pytorch-msssim", "psutil", "coverage (>5.2)", "huggingface-hub (<0.7)", "pytest-doctestplus (>=0.9.0)", "requests", "rouge-score (==0.0.4)", "jiwer (>=2.3.0)", "twine (>=3.2)", "sacrebleu (>=2.0.0)", "fast-bss-eval (>=0.1.0)", "phmdoctest (>=1.1.1)", "mypy (>=0.790)"]
@@ -2175,7 +1891,7 @@ text = ["nltk (>=3.6)", "regex (>=2021.9.24)", "tqdm (>=4.41.0)"]
[[package]]
name = "torchvision"
-version = "0.13.0"
+version = "0.13.1"
description = "image and video datasets and models for torch deep learning"
category = "main"
optional = false
@@ -2185,7 +1901,7 @@ python-versions = ">=3.7"
numpy = "*"
pillow = ">=5.3.0,<8.3.0 || >=8.4.0"
requests = "*"
-torch = "1.12.0"
+torch = "1.12.1"
typing-extensions = "*"
[package.extras]
@@ -2193,15 +1909,15 @@ scipy = ["scipy"]
[[package]]
name = "tornado"
-version = "6.1"
+version = "6.2"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
category = "dev"
optional = false
-python-versions = ">= 3.5"
+python-versions = ">= 3.7"
[[package]]
name = "tqdm"
-version = "4.64.0"
+version = "4.64.1"
description = "Fast, Extensible Progress Meter"
category = "main"
optional = false
@@ -2218,7 +1934,7 @@ telegram = ["requests"]
[[package]]
name = "traitlets"
-version = "5.3.0"
+version = "5.4.0"
description = ""
category = "dev"
optional = false
@@ -2229,7 +1945,7 @@ test = ["pre-commit", "pytest"]
[[package]]
name = "transformers"
-version = "4.20.1"
+version = "4.22.2"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
category = "main"
optional = true
@@ -2237,8 +1953,7 @@ python-versions = ">=3.7.0"
[package.dependencies]
filelock = "*"
-huggingface-hub = ">=0.1.0,<1.0"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+huggingface-hub = ">=0.9.0,<1.0"
numpy = ">=1.17"
packaging = ">=20.0"
pyyaml = ">=5.1"
@@ -2248,18 +1963,19 @@ tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.13"
tqdm = ">=4.27"
[package.extras]
-all = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.3.5)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)"]
+accelerate = ["accelerate (>=0.10.0)"]
+all = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.4.1)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "accelerate (>=0.10.0)"]
audio = ["librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
codecarbon = ["codecarbon (==1.2.0)"]
-deepspeed = ["deepspeed (>=0.6.5)"]
-deepspeed-testing = ["deepspeed (>=0.6.5)", "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "pytest-timeout", "black (>=22.3,<23.0)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "optuna"]
-dev = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.3.5)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "pytest-timeout", "black (>=22.3,<23.0)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)", "hf-doc-builder", "scikit-learn"]
-dev-tensorflow = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "pytest-timeout", "black (>=22.3,<23.0)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "pillow", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "hf-doc-builder", "scikit-learn", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
-dev-torch = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "pytest-timeout", "black (>=22.3,<23.0)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "torch (>=1.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)", "hf-doc-builder", "scikit-learn", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
-docs = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.3.5)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "hf-doc-builder"]
+deepspeed = ["deepspeed (>=0.6.5)", "accelerate (>=0.10.0)"]
+deepspeed-testing = ["deepspeed (>=0.6.5)", "accelerate (>=0.10.0)", "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "evaluate (>=0.2.0)", "pytest-timeout", "black (==22.3)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "optuna"]
+dev = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.4.1)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "accelerate (>=0.10.0)", "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "evaluate (>=0.2.0)", "pytest-timeout", "black (==22.3)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)", "hf-doc-builder", "scikit-learn"]
+dev-tensorflow = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "evaluate (>=0.2.0)", "pytest-timeout", "black (==22.3)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "pillow", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "hf-doc-builder", "scikit-learn", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
+dev-torch = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "evaluate (>=0.2.0)", "pytest-timeout", "black (==22.3)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)", "torch (>=1.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "unidic-lite (>=1.0.7)", "unidic (>=1.0.2)", "hf-doc-builder", "scikit-learn", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+docs = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text", "torch (>=1.0)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.4.1)", "optax (>=0.0.8)", "sentencepiece (>=0.1.91,!=0.1.92)", "protobuf (<=3.20.1)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer", "pillow", "optuna", "ray", "sigopt", "timm", "codecarbon (==1.2.0)", "accelerate (>=0.10.0)", "hf-doc-builder"]
docs_specific = ["hf-doc-builder"]
fairscale = ["fairscale (>0.3)"]
-flax = ["jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.3.5)", "optax (>=0.0.8)"]
+flax = ["jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "flax (>=0.4.1)", "optax (>=0.0.8)"]
flax-speech = ["librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
ftfy = ["ftfy"]
integrations = ["optuna", "ray", "sigopt"]
@@ -2268,7 +1984,7 @@ modelcreation = ["cookiecutter (==1.7.3)"]
onnx = ["onnxconverter-common", "tf2onnx", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
optuna = ["optuna"]
-quality = ["black (>=22.3,<23.0)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)"]
+quality = ["black (==22.3)", "isort (>=5.5.4)", "flake8 (>=3.8.3)", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)"]
ray = ["ray"]
retrieval = ["faiss-cpu", "datasets"]
sagemaker = ["sagemaker (>=2.31.0)"]
@@ -2277,28 +1993,20 @@ serving = ["pydantic", "uvicorn", "fastapi", "starlette"]
sigopt = ["sigopt"]
sklearn = ["scikit-learn"]
speech = ["torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
-testing = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "pytest-timeout", "black (>=22.3,<23.0)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)"]
-tf = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx"]
-tf-cpu = ["tensorflow-cpu (>=2.3)", "onnxconverter-common", "tf2onnx"]
+testing = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill (<0.3.5)", "evaluate (>=0.2.0)", "pytest-timeout", "black (==22.3)", "sacrebleu (>=1.4.12,<2.0.0)", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "nltk", "GitPython (<3.1.19)", "hf-doc-builder (>=0.3.0)", "protobuf (<=3.20.1)", "sacremoses", "rjieba", "faiss-cpu", "cookiecutter (==1.7.3)"]
+tf = ["tensorflow (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text"]
+tf-cpu = ["tensorflow-cpu (>=2.3)", "onnxconverter-common", "tf2onnx", "tensorflow-text"]
tf-speech = ["librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
timm = ["timm"]
tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.13)"]
torch = ["torch (>=1.0)"]
torch-speech = ["torchaudio", "librosa", "pyctcdecode (>=0.3.0)", "phonemizer"]
-torchhub = ["filelock", "huggingface-hub (>=0.1.0,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.1)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.0)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "tqdm (>=4.27)"]
+torchhub = ["filelock", "huggingface-hub (>=0.9.0,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.1)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.0)", "tokenizers (>=0.11.1,!=0.11.3,<0.13)", "tqdm (>=4.27)"]
vision = ["pillow"]
-[[package]]
-name = "typed-ast"
-version = "1.4.3"
-description = "a fork of Python 2 and 3 ast modules with type comment support"
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "typing-extensions"
-version = "4.3.0"
+version = "4.4.0"
description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
@@ -2306,24 +2014,27 @@ python-versions = ">=3.7"
[[package]]
name = "urllib3"
-version = "1.26.9"
+version = "1.26.12"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
[package.extras]
brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"]
-secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
-name = "wcwidth"
-version = "0.2.5"
-description = "Measures the displayed width of unicode strings in a terminal"
-category = "dev"
+name = "watchdog"
+version = "2.1.9"
+description = "Filesystem events monitoring"
+category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
+
+[package.extras]
+watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "webencodings"
@@ -2335,26 +2046,18 @@ python-versions = "*"
[[package]]
name = "werkzeug"
-version = "2.1.2"
+version = "2.2.2"
description = "The comprehensive WSGI web application library."
category = "main"
optional = true
python-versions = ">=3.7"
+[package.dependencies]
+MarkupSafe = ">=2.1.1"
+
[package.extras]
watchdog = ["watchdog"]
-[[package]]
-name = "widgetsnbextension"
-version = "3.6.1"
-description = "IPython HTML widgets for Jupyter"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-notebook = ">=4.4.1"
-
[[package]]
name = "xxhash"
version = "3.0.0"
@@ -2365,28 +2068,27 @@ python-versions = ">=3.6"
[[package]]
name = "yarl"
-version = "1.7.2"
+version = "1.8.1"
description = "Yet another URL library"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
idna = ">=2.0"
multidict = ">=4.0"
-typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
[[package]]
name = "zipp"
-version = "3.8.0"
+version = "3.9.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
category = "main"
optional = false
python-versions = ">=3.7"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
+docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "jaraco.functools", "more-itertools", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
[extras]
nlp = ["transformers", "datasets"]
@@ -2394,24 +2096,17 @@ vision = ["torchvision", "lightning-flash"]
[metadata]
lock-version = "1.1"
-python-versions = ">=3.7,<4"
-content-hash = "bf26a664b86992814dcf444f011370cd03e656a43fd3c735f473256458e953e6"
+python-versions = ">=3.8,<4"
+content-hash = "146804eb73a03d155e981e6c07d6aadbfdd1453d84031e06986691c24be0fcad"
[metadata.files]
absl-py = []
aiohttp = []
aiosignal = []
-alabaster = []
-appnope = []
-argon2-cffi = []
-argon2-cffi-bindings = []
-asteroid-sphinx-theme = []
+astunparse = []
async-timeout = []
-asynctest = []
atomicwrites = []
attrs = []
-babel = []
-backcall = []
bandit = []
beautifulsoup4 = []
black = []
@@ -2422,12 +2117,10 @@ cffi = []
charset-normalizer = []
click = []
colorama = []
-commonmark = []
+contourpy = []
coverage = []
cycler = []
datasets = []
-debugpy = []
-decorator = []
defusedxml = []
dill = []
docstring-parser = []
@@ -2439,41 +2132,49 @@ flake8 = []
fonttools = []
frozenlist = []
fsspec = []
+ghp-import = []
gitdb = []
gitpython = []
google-auth = []
google-auth-oauthlib = []
+griffe = []
grpcio = []
h5py = []
huggingface-hub = []
hypothesis = []
idna = []
-imagesize = []
importlib-metadata = []
importlib-resources = []
iniconfig = []
-ipykernel = []
-ipython = []
-ipython-genutils = []
-ipywidgets = []
-jedi = []
jinja2 = []
joblib = []
jsonargparse = []
jsonschema = []
jupyter-client = []
jupyter-core = []
-jupyter-sphinx = []
jupyterlab-pygments = []
-jupyterlab-widgets = []
+jupytext = []
kiwisolver = []
lightning-flash = []
+lxml = []
markdown = []
+markdown-it-py = []
markupsafe = []
matplotlib = []
-matplotlib-inline = []
mccabe = []
+mdit-py-plugins = []
+mdurl = []
+mergedeep = []
mistune = []
+mkdocs = []
+mkdocs-autorefs = []
+mkdocs-exclude-search = []
+mkdocs-jupyter = []
+mkdocs-material = []
+mkdocs-material-extensions = []
+mkdocstrings = []
+mkdocstrings-python = []
+mkdocstrings-python-legacy = []
multidict = []
multiprocess = []
mypy = []
@@ -2481,28 +2182,19 @@ mypy-extensions = []
nbclient = []
nbconvert = []
nbformat = []
-nbsphinx = []
nest-asyncio = []
-notebook = []
numpy = []
-numpydoc = []
oauthlib = []
packaging = []
pandas = []
pandocfilters = []
-parso = []
pathspec = []
pbr = []
-pexpect = []
-pickleshare = []
pillow = []
+pkgutil-resolve-name = []
platformdirs = []
pluggy = []
-prometheus-client = []
-prompt-toolkit = []
protobuf = []
-psutil = []
-ptyprocess = []
py = []
pyarrow = []
pyasn1 = []
@@ -2512,19 +2204,20 @@ pycparser = []
pydeprecate = []
pyflakes = []
pygments = []
+pymdown-extensions = []
pyparsing = []
pyrsistent = []
pytest = []
pytest-cov = []
pytest-mock = []
python-dateutil = []
+pytkdocs = []
pytorch-lightning = []
pytz = []
pywin32 = []
-pywinpty = []
pyyaml = []
+pyyaml-env-tag = []
pyzmq = []
-recommonmark = []
regex = []
requests = []
requests-oauthlib = []
@@ -2532,28 +2225,15 @@ responses = []
rsa = []
scikit-learn = []
scipy = []
-send2trash = []
setuptools-scm = []
six = []
smmap = []
-snowballstemmer = []
soupsieve = []
-sphinx = []
-sphinx-automodapi = []
-sphinx-copybutton = []
-sphinx-rtd-theme = []
-sphinxcontrib-applehelp = []
-sphinxcontrib-devhelp = []
-sphinxcontrib-htmlhelp = []
-sphinxcontrib-jsmath = []
-sphinxcontrib-qthelp = []
-sphinxcontrib-serializinghtml = []
stevedore = []
structlog = []
tensorboard = []
tensorboard-data-server = []
tensorboard-plugin-wit = []
-terminado = []
threadpoolctl = []
tinycss2 = []
tokenizers = []
@@ -2567,13 +2247,11 @@ tornado = []
tqdm = []
traitlets = []
transformers = []
-typed-ast = []
typing-extensions = []
urllib3 = []
-wcwidth = []
+watchdog = []
webencodings = []
werkzeug = []
-widgetsnbextension = []
xxhash = []
yarl = []
zipp = []
diff --git a/pyproject.toml b/pyproject.toml
index 5e113c5b..c9beaa3c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,7 +14,7 @@ documentation = "https://baal.readthedocs.io"
repository = "https://github.com/ElementAI/baal/"
[tool.poetry.dependencies]
-python = ">=3.7,<4"
+python = ">=3.8,<4"
torch = ">=1.6.0"
torchmetrics = "^0.9.3"
h5py = "^3.4.0"
@@ -41,23 +41,19 @@ torch-hypothesis = "0.2.0"
hypothesis = "4.24.0"
flake8 = "^3.9.2"
pytest-mock = "^3.6.1"
-black = "^22.6.0"
+black = "^21.8b0"
+mypy = "^0.910"
+bandit = "^1.7.1"
# Documentation
-Sphinx = ">2"
-sphinx-rtd-theme = "^0.5.2"
-asteroid-sphinx-theme = "^0.0.3"
-jupyter-sphinx = "^0.3.2"
-Pygments = ">=2.6.1"
-nbsphinx = "^0.8.7"
-sphinx-automodapi = "^0.13"
-sphinx-copybutton = "^0.4.0"
-numpydoc = "^1.1.0"
docutils = "0.16"
-recommonmark = "^0.7.1"
-mypy = "^0.910"
-bandit = "^1.7.1"
+# Documentation
+mkdocs-jupyter = "^0.21.0"
+mkdocs-material = "^8.5.6"
+Pygments = "^2.12.0"
+mkdocstrings = {extras = ["python"], version = "^0.18.1"}
+mkdocs-exclude-search = "^0.6.4"
[tool.poetry.extras]
vision = ["torchvision", "lightning-flash"]
diff --git a/tests/utils/ssl_module_test.py b/tests/utils/ssl_module_test.py
index ad20aa2c..2f66cbdf 100644
--- a/tests/utils/ssl_module_test.py
+++ b/tests/utils/ssl_module_test.py
@@ -55,8 +55,8 @@ def test_epoch(self):
'workers': 0}
module = TestSSLModule(self.al_dataset, **hparams)
- trainer = Trainer(max_epochs=1, num_sanity_val_steps=0, progress_bar_refresh_rate=0, logger=False,
- checkpoint_callback=False)
+ trainer = Trainer(max_epochs=1, num_sanity_val_steps=0, logger=False,
+ enable_checkpointing=False)
trainer.fit(module)
assert len(module.labeled_data) == len(module.unlabeled_data)