From 1a4fefa4f491fcf1686edc27960cc70804cd204d Mon Sep 17 00:00:00 2001 From: "Tzvetomir Stoyanov (VMware)" Date: Wed, 8 Dec 2021 06:36:26 +0200 Subject: [PATCH] Export the trained model When the model is trained, in order to run an inference service to serve it, the model should be exported. Two optional parameters are introduced: "-save NAME" "-save_version VERSION" By default, the model is not exported. If "-save NAME" is specified, the model is saved using given NAME. If "-save_version VERSION" is specified, together with "-save NAME", the model is saved using given NAME and VERSION. The "-save_version" is ignored, if "-save" is missing. By default, version "001" is used. Models are exported in directory: models/-// and are compressed in file: models/-/--.tar.gz The exported models are tested with kserve, the layout of directories and archive file is designed in a way kserve tensorflow predictor expects. fixes #2 Signed-off-by: Tzvetomir Stoyanov (VMware) --- .gitignore | 1 + README.md | 2 ++ ml-conversational-analytic-tool/baseCNN.py | 3 ++ ml-conversational-analytic-tool/baseLSTM.py | 3 ++ ml-conversational-analytic-tool/run.py | 38 +++++++++++++++++---- 5 files changed, 41 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 89d102c..1d1298d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .DS_Store .ipynb_checkpoints env/ +models/ exports/ __pycache__/ virtualenv-ml-conversational/ \ No newline at end of file diff --git a/README.md b/README.md index ecf166e..c9150e4 100644 --- a/README.md +++ b/README.md @@ -135,6 +135,8 @@ python ./ml-conversational-analytic-tool/run.py 0: + save_model(model=model, name=save_name+"-"+outcome, version=model_ver) return scores @@ -46,6 +67,10 @@ def run(annotated_filename, dataset_filename, outcome, encoding_type, model_type parser.add_argument('dataset_filename', help='File location of extracted dataset') parser.add_argument('model', help='Model type to use for training, supported CNN and LSTM') parser.add_argument('outcome', help='Inclusive, Constructive, or Both') + parser.add_argument('-save', metavar='NAME', help='Save the model using given NAME') + parser.add_argument('-save_version', metavar='VERSION', default='001', + help='Together with -save NAME: save the model using given NAME and VERSION. '\ + 'If omitted, 001 is used. The parameter is ignored if -save is missing.') parser.add_argument('-roleRelevant', action='store_true', default=False, help='Encoding method differentiates b/w conversation roles') parser.add_argument('-pad', action='store_true', default=False, help='Pad total length of each pull') @@ -60,13 +85,14 @@ def run(annotated_filename, dataset_filename, outcome, encoding_type, model_type encodingType = 'role-agnostic' if args.outcome != 'Both': - run_res = run(args.annotated_filename, args.dataset_filename, args.outcome, encodingType, args.model, args.pad) + run_res = run(args.annotated_filename, args.dataset_filename, args.outcome, encodingType, + args.model, args.pad, args.save, args.save_version) print(run_res) else: run_res_constructive = run(args.annotated_filename, args.dataset_filename, 'Constructive', encodingType, - args.model, args.pad) + args.model, args.pad, args.save, args.save_version) print("Constructive: {}".format(run_res_constructive)) - run_res_inclusive = run(args.annotated_filename, args.dataset_filename, 'Inclusive', encodingType, args.model, - args.pad) + run_res_inclusive = run(args.annotated_filename, args.dataset_filename, 'Inclusive', encodingType, + args.model, args.pad, args.save, args.save_version) print("Inclusvie: {}".format(run_res_inclusive))