diff --git a/jupyter_server/base/handlers.py b/jupyter_server/base/handlers.py index f0980378a1..37341f5a01 100755 --- a/jupyter_server/base/handlers.py +++ b/jupyter_server/base/handlers.py @@ -589,8 +589,11 @@ def finish(self, *args, **kwargs): return super(APIHandler, self).finish(*args, **kwargs) def options(self, *args, **kwargs): - self.set_header('Access-Control-Allow-Headers', - 'accept, content-type, authorization, x-xsrftoken') + if 'Access-Control-Allow-Headers' in self.settings.get('headers', {}): + self.set_header('Access-Control-Allow-Headers', self.settings['headers']['Access-Control-Allow-Headers']) + else: + self.set_header('Access-Control-Allow-Headers', + 'accept, content-type, authorization, x-xsrftoken') self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, PATCH, DELETE, OPTIONS') diff --git a/jupyter_server/log.py b/jupyter_server/log.py index 64b35d811d..3621a70cae 100644 --- a/jupyter_server/log.py +++ b/jupyter_server/log.py @@ -7,7 +7,8 @@ import json from tornado.log import access_log -from .metrics import prometheus_log_method +from .prometheus.log_functions import prometheus_log_method + def log_request(handler): """log a bit more information about each request than tornado's default diff --git a/jupyter_server/metrics.py b/jupyter_server/metrics.py index 24a08d8c88..a67a252ade 100644 --- a/jupyter_server/metrics.py +++ b/jupyter_server/metrics.py @@ -1,18 +1,5 @@ -""" -Prometheus metrics exported by Jupyter Notebook Server +from ..prometheus.metrics import HTTP_REQUEST_DURATION_SECONDS -Read https://prometheus.io/docs/practices/naming/ for naming -conventions for metrics & labels. -""" - -from prometheus_client import Histogram - -# This is a fairly standard name for HTTP duration latency reporting -HTTP_REQUEST_DURATION_SECONDS = Histogram( - 'http_request_duration_seconds', - 'duration in seconds for all HTTP requests', - ['method', 'handler', 'status_code'], -) def prometheus_log_method(handler): """ diff --git a/jupyter_server/prometheus/__init__.py b/jupyter_server/prometheus/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/jupyter_server/prometheus/log_functions.py b/jupyter_server/prometheus/log_functions.py new file mode 100644 index 0000000000..338b59d0d1 --- /dev/null +++ b/jupyter_server/prometheus/log_functions.py @@ -0,0 +1,24 @@ +from notebook.prometheus.metrics import HTTP_REQUEST_DURATION_SECONDS + + +def prometheus_log_method(handler): + """ + Tornado log handler for recording RED metrics. + + We record the following metrics: + Rate - the number of requests, per second, your services are serving. + Errors - the number of failed requests per second. + Duration - The amount of time each request takes expressed as a time interval. + + We use a fully qualified name of the handler as a label, + rather than every url path to reduce cardinality. + + This function should be either the value of or called from a function + that is the 'log_function' tornado setting. This makes it get called + at the end of every request, allowing us to record the metrics we need. + """ + HTTP_REQUEST_DURATION_SECONDS.labels( + method=handler.request.method, + handler='{}.{}'.format(handler.__class__.__module__, type(handler).__name__), + status_code=handler.get_status() + ).observe(handler.request.request_time()) diff --git a/jupyter_server/prometheus/metrics.py b/jupyter_server/prometheus/metrics.py new file mode 100644 index 0000000000..abc9d0e16b --- /dev/null +++ b/jupyter_server/prometheus/metrics.py @@ -0,0 +1,27 @@ +""" +Prometheus metrics exported by Jupyter Notebook Server + +Read https://prometheus.io/docs/practices/naming/ for naming +conventions for metrics & labels. +""" + + +from prometheus_client import Histogram, Gauge + + +HTTP_REQUEST_DURATION_SECONDS = Histogram( + 'http_request_duration_seconds', + 'duration in seconds for all HTTP requests', + ['method', 'handler', 'status_code'], +) + +TERMINAL_CURRENTLY_RUNNING_TOTAL = Gauge( + 'terminal_currently_running_total', + 'counter for how many terminals are running', +) + +KERNEL_CURRENTLY_RUNNING_TOTAL = Gauge( + 'kernel_currently_running_total', + 'counter for how many kernels are running labeled by type', + ['type'] +) diff --git a/jupyter_server/services/kernels/handlers.py b/jupyter_server/services/kernels/handlers.py index c75f243643..cf744894c9 100644 --- a/jupyter_server/services/kernels/handlers.py +++ b/jupyter_server/services/kernels/handlers.py @@ -188,7 +188,7 @@ def _finish_kernel_info(self, info): protocol_version = info.get('protocol_version', client_protocol_version) if protocol_version != client_protocol_version: self.session.adapt_version = int(protocol_version.split('.')[0]) - self.log.info("Adapting to protocol v%s for kernel %s", protocol_version, self.kernel_id) + self.log.info("Adapting from protocol version {protocol_version} (kernel {kernel_id}) to {client_protocol_version} (client).".format(protocol_version=protocol_version, kernel_id=self.kernel_id, client_protocol_version=client_protocol_version)) if not self._kernel_info_future.done(): self._kernel_info_future.set_result(info) diff --git a/jupyter_server/services/kernels/kernelmanager.py b/jupyter_server/services/kernels/kernelmanager.py index 8356b50697..b7f6d9f9a3 100644 --- a/jupyter_server/services/kernels/kernelmanager.py +++ b/jupyter_server/services/kernels/kernelmanager.py @@ -26,6 +26,8 @@ from jupyter_server._tz import utcnow, isoformat from ipython_genutils.py3compat import getcwd +from notebook.prometheus.metrics import KERNEL_CURRENTLY_RUNNING_TOTAL + class MappingKernelManager(MultiKernelManager): """A KernelManager that handles @@ -178,6 +180,13 @@ def start_kernel(self, kernel_id=None, path=None, **kwargs): lambda : self._handle_kernel_died(kernel_id), 'dead', ) + + # Increase the metric of number of kernels running + # for the relevant kernel type by 1 + KERNEL_CURRENTLY_RUNNING_TOTAL.labels( + type=self._kernels[kernel_id].kernel_name + ).inc() + else: self._check_kernel_id(kernel_id) self.log.info("Using existing kernel: %s" % kernel_id) @@ -282,11 +291,19 @@ def shutdown_kernel(self, kernel_id, now=False): """Shutdown a kernel by kernel_id""" self._check_kernel_id(kernel_id) kernel = self._kernels[kernel_id] - kernel._activity_stream.close() - kernel._activity_stream = None + if kernel._activity_stream: + kernel._activity_stream.close() + kernel._activity_stream = None self.stop_buffering(kernel_id) self._kernel_connections.pop(kernel_id, None) self.last_kernel_activity = utcnow() + + # Decrease the metric of number of kernels + # running for the relevant kernel type by 1 + KERNEL_CURRENTLY_RUNNING_TOTAL.labels( + type=self._kernels[kernel_id].kernel_name + ).dec() + return super(MappingKernelManager, self).shutdown_kernel(kernel_id, now=now) def restart_kernel(self, kernel_id): diff --git a/jupyter_server/services/nbconvert/handlers.py b/jupyter_server/services/nbconvert/handlers.py index 7e27783ef0..63e731238f 100644 --- a/jupyter_server/services/nbconvert/handlers.py +++ b/jupyter_server/services/nbconvert/handlers.py @@ -4,18 +4,32 @@ from ...base.handlers import APIHandler + class NbconvertRootHandler(APIHandler): @web.authenticated def get(self): try: - from nbconvert.exporters.export import exporter_map + from nbconvert.exporters import base except ImportError as e: raise web.HTTPError(500, "Could not import nbconvert: %s" % e) res = {} - for format, exporter in exporter_map.items(): - res[format] = info = {} - info['output_mimetype'] = exporter.output_mimetype + exporters = base.get_export_names() + for exporter_name in exporters: + try: + exporter_class = base.get_exporter(exporter_name) + except ValueError: + # I think the only way this will happen is if the entrypoint + # is uninstalled while this method is running + continue + # XXX: According to the docs, it looks like this should be set to None + # if the exporter shouldn't be exposed to the front-end and a friendly + # name if it should. However, none of the built-in exports have it defined. + # if not exporter_class.export_from_notebook: + # continue + res[exporter_name] = { + "output_mimetype": exporter_class.output_mimetype, + } self.finish(json.dumps(res)) diff --git a/jupyter_server/terminal/api_handlers.py b/jupyter_server/terminal/api_handlers.py index 1e92b58e4c..d64e1acb3f 100644 --- a/jupyter_server/terminal/api_handlers.py +++ b/jupyter_server/terminal/api_handlers.py @@ -1,7 +1,8 @@ import json from tornado import web, gen from ..base.handlers import APIHandler -from ..utils import url_path_join +from ..prometheus.metrics import TERMINAL_CURRENTLY_RUNNING_TOTAL + class TerminalRootHandler(APIHandler): @@ -12,12 +13,20 @@ def get(self): terms = [{'name': name} for name in tm.terminals] self.finish(json.dumps(terms)) + # Update the metric below to the length of the list 'terms' + TERMINAL_CURRENTLY_RUNNING_TOTAL.set( + len(terms) + ) + @web.authenticated def post(self): """POST /terminals creates a new terminal and redirects to it""" name, _ = self.terminal_manager.new_named_terminal() self.finish(json.dumps({'name': name})) + # Increase the metric by one because a new terminal was created + TERMINAL_CURRENTLY_RUNNING_TOTAL.inc() + class TerminalHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'DELETE') @@ -38,5 +47,10 @@ def delete(self, name): yield tm.terminate(name, force=True) self.set_status(204) self.finish() + + # Decrease the metric below by one + # because a terminal has been shutdown + TERMINAL_CURRENTLY_RUNNING_TOTAL.dec() + else: raise web.HTTPError(404, "Terminal not found: %r" % name)