-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathservice.py
196 lines (164 loc) · 5.58 KB
/
service.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
#!flask/bin/src
"""
Flask application to serve Machine Learning models
"""
import os
import flask
import json
import logging
from time import time
from src.utils.encoder import ExtendedEncoder, returns_json
from src.factory import ModelFactory
# Version of this APP template
__version__ = '2.2.0'
# Read env variables
DEBUG = os.environ.get('DEBUG', False)
MODEL_NAME = os.environ.get('MODEL_NAME', 'model.joblib')
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'local')
MODEL_TYPE = os.environ.get('MODEL_TYPE', 'SKLEARN_MODEL')
SERVICE_START_TIMESTAMP = time()
# Create Flask Application
application = flask.Flask(__name__)
# Customize Flask Application
application.logger.setLevel(logging.DEBUG if DEBUG else logging.ERROR)
application.json_encoder = ExtendedEncoder
# Create Model instance
model = ModelFactory.create_model(MODEL_NAME, MODEL_TYPE)
# load saved model
application.logger.info('ENVIRONMENT: {}'.format(ENVIRONMENT))
application.logger.info('Using template version: {}'.format(__version__))
application.logger.info('Loading model...')
model.load()
@application.route('/predict', methods=['POST'])
@returns_json
def predict():
"""Make preditcions and explain them
Model inference using input data. This is the main function.
URL Params:
proba (int):
1 in order to compute probabilities for classification models or 0
to return predicted class (classification) or value (regression).
Default 0.
explain (int):
1 in order to compute moeldel explanations for the predicted value.
This will return a status 500 when the model does not support
explanations. Default 0.
Payload:
JSON string that can take two forms:
The first, the payload is a record or a list of records with one value
per feature. This will be directly interpreted as the input for the
model.
The second, the payload is a dictionary with 1 or 2 elements. The key
"_data" is mandatory because this will be the input for the model and
its format is expected to be a record or a list of records. On the
other hand the key "_samples" (optional) will be used to obtain
different explanations (see :func:`~model.Model.explain`)
"""
# Parameters
do_proba = int(flask.request.args.get('proba', 0))
do_explain = int(flask.request.args.get('explain', 0))
input = json.loads(flask.request.data or '{}')
if isinstance(input, dict):
samples = input.get('_samples', None)
data = input.get('_data', {})
if len(data.items()):
input = data
else:
samples = None
# Predict
before_time = time()
try:
predict_function = 'predict_proba' if do_proba else 'predict'
prediction = getattr(model, predict_function)(input)
except Exception as err:
return flask.Response(str(err), status=500)
result = {'prediction': prediction}
# Explain
if do_explain:
try:
explanation = model.explain(input, samples=samples)
except Exception as err:
return flask.Response(str(err), status=500)
else:
result['explanation'] = explanation
after_time = time()
# log
to_be_logged = {
'input': flask.request.data,
'params': flask.request.args,
'request_id': flask.request.headers.get('X-Correlation-ID'),
'result': result,
'model_info': model.info,
'elapsed_time': after_time - before_time
}
application.logger.debug(to_be_logged)
return result
@application.route('/info', methods=['GET'])
@returns_json
def info():
"""Model information
Get the model information: metadata, type, classifier, etc.
"""
try:
info = model.info
except Exception as err:
return flask.Response(str(err), status=500)
else:
return info
@application.route('/features', methods=['GET'])
@returns_json
def features():
"""Model features
Get the model accepted features. This includes feature inportance if the
model allows it.
"""
try:
features = model.features()
except Exception as err:
return flask.Response(str(err), status=500)
else:
return features
@application.route('/preprocess', methods=['POST'])
@returns_json
def preprocess():
"""Preporcess input data
Get the preprocessed version of the input data. If the model does not
include preprocessing steps, this method will return the same data as the
input.
"""
input = json.loads(flask.request.data or '{}')
try:
data = model.preprocess(input)
except Exception as err:
return flask.Response(str(err), status=500)
else:
return data
@application.route('/health')
def health_check():
return flask.Response("up", status=200)
@application.route('/ready')
def readiness_check():
if model.is_ready():
return flask.Response("ready", status=200)
else:
return flask.Response("not ready", status=503)
@application.route('/service-info')
@returns_json
def service_info():
"""Service information
Get information about the service: up-time, varsion of the template, name
of the served model, etc.
"""
info = {
'version-template': __version__,
'running-since': SERVICE_START_TIMESTAMP,
'serving-model-file': MODEL_NAME,
'serving-model-family': model.family,
'debug': DEBUG
}
return info
if __name__ == '__main__':
application.run(
debug=DEBUG,
host=os.environ.get('HOST', 'localhost'),
port=os.environ.get('PORT', '5000'))