From 14028f7699b6765d582e62bcc82264700a126404 Mon Sep 17 00:00:00 2001
From: Vasiliy Kuznetsov <vkuzo@users.noreply.github.com>
Date: Sat, 22 Oct 2022 09:43:11 -0700
Subject: [PATCH] update NS for FX tutorial for PyTorch v1.13 (#2089)

* update NS for FX tutorial for PyTorch v1.13

Summary:

Makes a couple of updates to ensure this tutorial still runs on 1.13:
1. changes the `qconfig_dict` argument of `prepare_fx` to `qconfig_mapping`
2. adds `example_inputs` to `prepare_fx`

Test plan:

Run the tutorial, it runs without errors on master

* Pin importlib_metadata<5.0 for python <= 3.7 in requirements.txt (#2091)

v5.0.0 causes issues with python 3.7: https://github.com/python/importlib_metadata/issues/411

* Enable the FX tutorial

Co-authored-by: Max Balandat <Balandat@users.noreply.github.com>
Co-authored-by: Svetlana Karslioglu <svekars@fb.com>
---
 .jenkins/validate_tutorials_built.py          | 1 -
 prototype_source/fx_numeric_suite_tutorial.py | 4 ++--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py
index 5f9d563475..b426049b58 100644
--- a/.jenkins/validate_tutorials_built.py
+++ b/.jenkins/validate_tutorials_built.py
@@ -50,7 +50,6 @@
     "recipes/Captum_Recipe",
     "hyperparameter_tuning_tutorial",
     "flask_rest_api_tutorial",
-    "fx_numeric_suite_tutorial", # remove when https://github.com/pytorch/tutorials/pull/2089 is fixed
     "ax_multiobjective_nas_tutorial",
 ]
 
diff --git a/prototype_source/fx_numeric_suite_tutorial.py b/prototype_source/fx_numeric_suite_tutorial.py
index ac43ae49e0..922f48ae0f 100644
--- a/prototype_source/fx_numeric_suite_tutorial.py
+++ b/prototype_source/fx_numeric_suite_tutorial.py
@@ -84,9 +84,9 @@ def plot(xdata, ydata, xlabel, ylabel, title):
 # Note: quantization APIs are inplace, so we save a copy of the float model for
 # later comparison to the quantized model. This is done throughout the
 # tutorial.
-mobilenetv2_prepared = quantize_fx.prepare_fx(
-    copy.deepcopy(mobilenetv2_float), qconfig_dict)
 datum = torch.randn(1, 3, 224, 224)
+mobilenetv2_prepared = quantize_fx.prepare_fx(
+    copy.deepcopy(mobilenetv2_float), qconfig_dict, (datum,))
 mobilenetv2_prepared(datum)
 # Note: there is a long standing issue that we cannot copy.deepcopy a
 # quantized model. Since quantization APIs are inplace and we need to use