From dbc7d0f30ad08fdfbacf450ccd5bddcaac1527fc Mon Sep 17 00:00:00 2001 From: Komu Wairagu Date: Sat, 2 Feb 2019 11:10:16 +0300 Subject: [PATCH] Issues/91 - BYO logger (#93) What: - add ability to bring your own logger Why: - closes: https://github.com/komuw/naz/issues/91 - give people more options --- .travis.yml | 1 - CHANGELOG.md | 1 + cli/cli.py | 37 ++- documentation/config.md | 1 + documentation/sphinx-docs/index.rst | 3 +- documentation/sphinx-docs/introduction.rst | 30 +++ documentation/sphinx-docs/logger.rst | 7 + naz/__init__.py | 2 +- naz/client.py | 295 +++++++++++++-------- naz/hooks.py | 10 +- naz/logger.py | 86 +++++- naz/ratelimiter.py | 7 +- naz/throttle.py | 14 +- tests/test_client.py | 13 + tests/test_logger.py | 96 +++++++ 15 files changed, 444 insertions(+), 159 deletions(-) create mode 100644 documentation/sphinx-docs/logger.rst create mode 100644 tests/test_logger.py diff --git a/.travis.yml b/.travis.yml index ff28afa1..3be012d6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,7 +33,6 @@ script: git remote set-branches --add origin master # https://github.com/travis-ci/travis-ci/issues/6069 git fetch printf "\n current branch is $TRAVIS_BRANCH \n" - git branch -a if [ "$TRAVIS_BRANCH" == "master" ]; then printf "\n $TRAVIS_BRANCH branch, ignoring check for relese notes \n" else diff --git a/CHANGELOG.md b/CHANGELOG.md index 4bbc4039..4f03e92f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,3 +3,4 @@ - Add sphinx documentation: https://github.com/komuw/naz/pull/92 - Start tracking changes in a changelog - Add more type hints and also run `mypy` across the entire repo: https://github.com/komuw/naz/pull/92 +- It's now possible to bring your own logger: https://github.com/komuw/naz/pull/93 diff --git a/cli/cli.py b/cli/cli.py index 68d812d2..a32bd72a 100644 --- a/cli/cli.py +++ b/cli/cli.py @@ -106,14 +106,8 @@ def main(): """ """ _client_id = "".join(random.choices(string.ascii_uppercase + string.digits, k=17)) - logger = logging.getLogger("naz.cli") - handler = logging.StreamHandler() - formatter = logging.Formatter("%(message)s") - handler.setFormatter(formatter) - if not logger.handlers: - logger.addHandler(handler) - logger.setLevel("DEBUG") - logger.info({"event": "naz.cli.main", "stage": "start", "client_id": _client_id}) + logger = naz.logger.SimpleBaseLogger("naz.cli") + logger.log(logging.INFO, {"event": "naz.cli.main", "stage": "start", "client_id": _client_id}) loop = asyncio.get_event_loop() try: @@ -129,7 +123,6 @@ def main(): loglevel = ( kwargs.get("loglevel").upper() if kwargs.get("loglevel") else args.loglevel.upper() ) - logger.setLevel(loglevel) log_metadata = kwargs.get("log_metadata") if not log_metadata: log_metadata = {} @@ -144,15 +137,15 @@ def main(): "client_id": client_id, } ) - extra_log_data = {"log_metadata": log_metadata} - logger = naz.NazLoggingAdapter(logger, extra_log_data) + logger.bind(loglevel=loglevel, log_metadata=log_metadata) - logger.info("\n\n\t {} \n\n".format("Naz: the SMPP client.")) + logger.log(logging.INFO, "\n\n\t {} \n\n".format("Naz: the SMPP client.")) if dry_run: - logger.warn( + logger.log( + logging.WARN, "\n\n\t {} \n\n".format( "Naz: Caution; You have activated dry-run, naz may not behave correctly." - ) + ), ) # Load custom classes @@ -167,7 +160,7 @@ def main(): if inspect.isclass(outboundqueue): # DO NOT instantiate class instance, fail with appropriate error instead. msg = "outboundqueue should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) sequence_generator = kwargs.get("sequence_generator") @@ -177,7 +170,7 @@ def main(): kwargs["sequence_generator"] = sequence_generator if inspect.isclass(sequence_generator): msg = "sequence_generator should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) codec_class = kwargs.get("codec_class") @@ -186,7 +179,7 @@ def main(): kwargs["codec_class"] = codec_class if inspect.isclass(codec_class): msg = "codec_class should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) rateLimiter = kwargs.get("rateLimiter") if rateLimiter: @@ -194,7 +187,7 @@ def main(): kwargs["rateLimiter"] = rateLimiter if inspect.isclass(rateLimiter): msg = "rateLimiter should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) hook = kwargs.get("hook") if hook: @@ -202,7 +195,7 @@ def main(): kwargs["hook"] = hook if inspect.isclass(hook): msg = "hook should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) throttle_handler = kwargs.get("throttle_handler") if throttle_handler: @@ -210,7 +203,7 @@ def main(): kwargs["throttle_handler"] = throttle_handler if inspect.isclass(throttle_handler): msg = "throttle_handler should be a class instance." - logger.exception({"event": "naz.cli.main", "stage": "end", "error": msg}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": msg}) sys.exit(77) if dry_run: @@ -234,10 +227,10 @@ def main(): loop.run_until_complete(cli.unbind()) cli.writer.close() except Exception as e: - logger.exception({"event": "naz.cli.main", "stage": "end", "error": str(e)}) + logger.log(logging.ERROR, {"event": "naz.cli.main", "stage": "end", "error": str(e)}) sys.exit(77) finally: - logger.info({"event": "naz.cli.main", "stage": "end"}) + logger.log(logging.INFO, {"event": "naz.cli.main", "stage": "end"}) if __name__ == "__main__": diff --git a/documentation/config.md b/documentation/config.md index ac9b9b70..e21865e1 100644 --- a/documentation/config.md +++ b/documentation/config.md @@ -48,6 +48,7 @@ encoding | encoding1 used to encode messages been sent to SMSC | gsm0 sequence_generator | python class instance used to generate sequence_numbers| naz.sequence.SimpleSequenceGenerator outboundqueue | python class instance implementing some queueing mechanism. messages to be sent to SMSC are queued using the said mechanism before been sent | N/A client_id | a unique string identifying a naz client class instance | "".join(random.choices(string.ascii_uppercase + string.digits, k=17)) +log_handler | python class instance to be used for logging | naz.logger.SimpleBaseLogger loglevel | the level at which to log | DEBUG log_metadata | metadata that will be included in all log statements | {"smsc_host": smsc_host, "system_id": system_id} codec_class | python class instance to be used to encode/decode messages | naz.nazcodec.SimpleNazCodec diff --git a/documentation/sphinx-docs/index.rst b/documentation/sphinx-docs/index.rst index 3759058e..0437c2f3 100644 --- a/documentation/sphinx-docs/index.rst +++ b/documentation/sphinx-docs/index.rst @@ -21,4 +21,5 @@ API reference ratelimiter sequence throttle - state \ No newline at end of file + state + logger diff --git a/documentation/sphinx-docs/introduction.rst b/documentation/sphinx-docs/introduction.rst index 961ce976..d8147702 100644 --- a/documentation/sphinx-docs/introduction.rst +++ b/documentation/sphinx-docs/introduction.rst @@ -170,6 +170,36 @@ So, for example if you wanted to annotate all log-events with a release version and then these will show up in all log events. by default, naz annotates all log events with smsc_host, system_id and client_id +``naz`` also gives you the ability to supply your own logger. +For example if you wanted ``naz`` to use key=value style of logging, then just create a logger that does just that: + +.. code-block:: python + + import naz + + class KVlogger(naz.logger.BaseLogger): + def __init__(self): + self.logger = logging.getLogger("myKVlogger") + handler = logging.StreamHandler() + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + if not self.logger.handlers: + self.logger.addHandler(handler) + self.logger.setLevel("DEBUG") + def bind(self, loglevel, log_metadata): + pass + def log(self, level, log_data): + # implementation of key=value log renderer + message = ", ".join("{0}={1}".format(k, v) for k, v in log_data.items()) + self.logger.log(level, message) + + kvLog = KVlogger() + cli = naz.Client( + ... + log_handler=kvLog, + ) + + 3.2.2 hooks ===================== a hook is a class with two methods request and response, ie it implements naz's BaseHook interface as defined here. diff --git a/documentation/sphinx-docs/logger.rst b/documentation/sphinx-docs/logger.rst new file mode 100644 index 00000000..7d01f631 --- /dev/null +++ b/documentation/sphinx-docs/logger.rst @@ -0,0 +1,7 @@ +logger +--------------- + +.. automodule:: naz.logger + :members: BaseLogger, SimpleBaseLogger + :show-inheritance: + diff --git a/naz/__init__.py b/naz/__init__.py index b14f6701..9ebbbbba 100644 --- a/naz/__init__.py +++ b/naz/__init__.py @@ -3,6 +3,7 @@ from . import q # noqa: F401 from . import throttle # noqa: F401 from . import sequence # noqa: F401 +from . import logger # noqa: F401 from . import nazcodec # noqa: F401 from . import correlater # noqa: F401 from . import ratelimiter # noqa: F401 @@ -15,6 +16,5 @@ DataCoding, SmppDataCoding, ) -from .logger import NazLoggingAdapter # noqa: F401 from . import __version__ # noqa: F401 diff --git a/naz/client.py b/naz/client.py index bfce7aef..ea662a94 100644 --- a/naz/client.py +++ b/naz/client.py @@ -75,6 +75,7 @@ def __init__( replace_if_present_flag: int = 0x00000000, sm_default_msg_id: int = 0x00000000, enquire_link_interval: int = 300, + log_handler=None, loglevel: str = "DEBUG", log_metadata=None, codec_class=None, @@ -115,6 +116,7 @@ def __init__( outboundqueue: python class instance implementing some queueing mechanism. \ messages to be sent to SMSC are queued using the said mechanism before been sent client_id: a unique string identifying a naz client class instance + log_handler: python class instance to be used for logging loglevel: the level at which to log log_metadata: metadata that will be included in all log statements codec_class: python class instance to be used to encode/decode messages @@ -211,15 +213,11 @@ def __init__( self.writer: typing.Any = None # NB: currently, naz only uses to log levels; INFO and EXCEPTION - extra_log_data = {"log_metadata": self.log_metadata} - self._logger = logging.getLogger("naz.client") - handler = logging.StreamHandler() - formatter = logging.Formatter("%(message)s") - handler.setFormatter(formatter) - if not self._logger.handlers: - self._logger.addHandler(handler) - self._logger.setLevel(self.loglevel) - self.logger: logging.LoggerAdapter = logger.NazLoggingAdapter(self._logger, extra_log_data) + self.logger = log_handler + if not self.logger: + self.logger = logger.SimpleBaseLogger("naz.client") + self.logger.bind(loglevel=self.loglevel, log_metadata=self.log_metadata) + self._sanity_check_logger() self.rateLimiter = rateLimiter if not self.rateLimiter: @@ -248,6 +246,23 @@ def __init__( self.current_session_state = SmppSessionState.CLOSED + def _sanity_check_logger(self): + """ + called when instantiating the Client just to make sure the supplied + logger can log. + """ + try: + self.logger.log(logging.DEBUG, {"event": "sanity_check_logger"}) + except Exception as e: + raise e + + def _log(self, level, log_data): + # if the supplied logger is unable to log; we move on + try: + self.logger.log(level, log_data) + except Exception: + pass + @staticmethod def _find_data_coding(encoding): for key, val in SmppDataCoding.__dict__.items(): @@ -293,13 +308,13 @@ async def connect( """ make a network connection to SMSC server. """ - self.logger.info({"event": "naz.Client.connect", "stage": "start"}) + self._log(logging.INFO, {"event": "naz.Client.connect", "stage": "start"}) reader, writer = await asyncio.open_connection( self.smsc_host, self.smsc_port, loop=self.async_loop ) self.reader: asyncio.streams.StreamReader = reader self.writer: asyncio.streams.StreamWriter = writer - self.logger.info({"event": "naz.Client.connect", "stage": "end"}) + self._log(logging.INFO, {"event": "naz.Client.connect", "stage": "end"}) self.current_session_state = SmppSessionState.OPEN return reader, writer @@ -309,13 +324,14 @@ async def tranceiver_bind(self) -> None: """ smpp_command = SmppCommand.BIND_TRANSCEIVER log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.tranceiver_bind", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body body = b"" @@ -342,13 +358,14 @@ async def tranceiver_bind(self) -> None: try: sequence_number = self.sequence_generator.next_sequence() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.tranceiver_bind", "stage": "end", "error": str(e), "smpp_command": smpp_command, - } + }, ) if sequence_number > self.max_sequence_number: @@ -366,7 +383,8 @@ async def tranceiver_bind(self) -> None: sequence_number=sequence_number, log_id=log_id, hook_metadata="" ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.tranceiver_bind", "stage": "end", @@ -374,19 +392,20 @@ async def tranceiver_bind(self) -> None: "log_id": log_id, "state": "correlater put error", "error": str(e), - } + }, ) header = struct.pack(">IIII", command_length, command_id, command_status, sequence_number) full_pdu = header + body await self.send_data(smpp_command=smpp_command, msg=full_pdu, log_id=log_id) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.tranceiver_bind", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None]: @@ -403,13 +422,14 @@ async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None] await asyncio.sleep(self.enquire_link_interval) log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.enquire_link", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body body = b"" @@ -421,14 +441,15 @@ async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None] try: sequence_number = self.sequence_generator.next_sequence() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.enquire_link", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) if sequence_number > self.max_sequence_number: # prevent third party sequence_generators from ruining our party @@ -443,7 +464,8 @@ async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None] sequence_number=sequence_number, log_id=log_id, hook_metadata="" ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.enquire_link", "stage": "end", @@ -451,7 +473,7 @@ async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None] "log_id": log_id, "state": "correlater put error", "error": str(e), - } + }, ) header = struct.pack( @@ -460,13 +482,14 @@ async def enquire_link(self, TESTING: bool = False) -> typing.Union[bytes, None] full_pdu = header + body # dont queue enquire_link in SimpleOutboundQueue since we dont want it to be behind 10k msgs etc await self.send_data(smpp_command=smpp_command, msg=full_pdu, log_id=log_id) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.enquire_link", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) if TESTING: return full_pdu @@ -481,13 +504,14 @@ async def enquire_link_resp(self, sequence_number: int) -> None: """ smpp_command = SmppCommand.ENQUIRE_LINK_RESP log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.enquire_link_resp", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body @@ -510,22 +534,24 @@ async def enquire_link_resp(self, sequence_number: int) -> None: try: await self.outboundqueue.enqueue(item_to_enqueue) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.enquire_link_resp", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.enquire_link_resp", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) async def unbind_resp(self, sequence_number: int) -> None: @@ -537,13 +563,14 @@ async def unbind_resp(self, sequence_number: int) -> None: """ smpp_command = SmppCommand.UNBIND_RESP log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.unbind_resp", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body @@ -559,13 +586,14 @@ async def unbind_resp(self, sequence_number: int) -> None: full_pdu = header + body # dont queue unbind_resp in SimpleOutboundQueue since we dont want it to be behind 10k msgs etc await self.send_data(smpp_command=smpp_command, msg=full_pdu, log_id=log_id) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.unbind_resp", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) async def deliver_sm_resp(self, sequence_number: int) -> None: @@ -577,13 +605,14 @@ async def deliver_sm_resp(self, sequence_number: int) -> None: """ smpp_command = SmppCommand.DELIVER_SM_RESP log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.deliver_sm_resp", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body body = b"" @@ -611,23 +640,25 @@ async def deliver_sm_resp(self, sequence_number: int) -> None: try: await self.outboundqueue.enqueue(item_to_enqueue) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.deliver_sm_resp", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.deliver_sm_resp", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # this method just enqueues a submit_sm msg to queue @@ -677,7 +708,8 @@ async def submit_sm( # 2. Octet String - A series of octets, not necessarily NULL terminated. smpp_command = SmppCommand.SUBMIT_SM - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.submit_sm", "stage": "start", @@ -686,7 +718,7 @@ async def submit_sm( "source_addr": source_addr, "destination_addr": destination_addr, "smpp_command": smpp_command, - } + }, ) item_to_enqueue = { "version": self.naz_message_protocol_version, @@ -699,16 +731,18 @@ async def submit_sm( try: await self.outboundqueue.enqueue(item_to_enqueue) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.submit_sm", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.submit_sm", "stage": "end", @@ -717,7 +751,7 @@ async def submit_sm( "source_addr": source_addr, "destination_addr": destination_addr, "smpp_command": smpp_command, - } + }, ) async def build_submit_sm_pdu( @@ -734,7 +768,8 @@ async def build_submit_sm_pdu( destination_addr: the identifier(eg msisdn) of the message recipient """ smpp_command = SmppCommand.SUBMIT_SM - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.build_submit_sm_pdu", "stage": "start", @@ -743,7 +778,7 @@ async def build_submit_sm_pdu( "source_addr": source_addr, "destination_addr": destination_addr, "smpp_command": smpp_command, - } + }, ) encoded_short_message = self.codec_class.encode( short_message, self.encoding, self.codec_errors_level @@ -789,14 +824,15 @@ async def build_submit_sm_pdu( try: sequence_number = self.sequence_generator.next_sequence() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.build_submit_sm_pdu", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) if sequence_number > self.max_sequence_number: # prevent third party sequence_generators from ruining our party @@ -811,7 +847,8 @@ async def build_submit_sm_pdu( sequence_number=sequence_number, log_id=log_id, hook_metadata=hook_metadata ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.build_submit_sm_pdu", "stage": "end", @@ -819,12 +856,13 @@ async def build_submit_sm_pdu( "log_id": log_id, "state": "correlater put error", "error": str(e), - } + }, ) header = struct.pack(">IIII", command_length, command_id, command_status, sequence_number) full_pdu = header + body - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.build_submit_sm_pdu", "stage": "end", @@ -833,7 +871,7 @@ async def build_submit_sm_pdu( "source_addr": source_addr, "destination_addr": destination_addr, "smpp_command": smpp_command, - } + }, ) return full_pdu @@ -862,14 +900,15 @@ async def send_data( log_msg = log_msg.replace(self.password, "{REDACTED}") except Exception: pass - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_data", "stage": "start", "smpp_command": smpp_command, "log_id": log_id, "msg": log_msg, - } + }, ) # check session state to see if we can send messages. @@ -878,7 +917,8 @@ async def send_data( error_msg = "smpp_command: {0} cannot be sent to SMSC when the client session state is: {1}".format( smpp_command, self.current_session_state ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_data", "stage": "end", @@ -887,7 +927,7 @@ async def send_data( "msg": log_msg, "current_session_state": self.current_session_state, "error": error_msg, - } + }, ) raise ValueError(error_msg) elif self.current_session_state == SmppSessionState.OPEN and smpp_command not in [ @@ -900,7 +940,8 @@ async def send_data( error_msg = "smpp_command: {0} cannot be sent to SMSC when the client session state is: {1}".format( smpp_command, self.current_session_state ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_data", "stage": "end", @@ -909,7 +950,7 @@ async def send_data( "msg": log_msg, "current_session_state": self.current_session_state, "error": error_msg, - } + }, ) raise ValueError(error_msg) @@ -922,7 +963,8 @@ async def send_data( smpp_command=smpp_command, log_id=log_id, hook_metadata=hook_metadata ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_data", "stage": "end", @@ -930,7 +972,7 @@ async def send_data( "log_id": log_id, "state": "request hook error", "error": str(e), - } + }, ) # We use writer.drain() which is a flow control method that interacts with the IO write buffer. @@ -940,14 +982,15 @@ async def send_data( # ref: https://docs.python.org/3/library/asyncio-stream.html#asyncio.StreamWriter.drain self.writer.write(msg) await self.writer.drain() - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_data", "stage": "end", "smpp_command": smpp_command, "log_id": log_id, "msg": log_msg, - } + }, ) async def send_forever( @@ -961,7 +1004,7 @@ async def send_forever( """ retry_count = 0 while True: - self.logger.info({"event": "naz.Client.send_forever", "stage": "start"}) + self._log(logging.INFO, {"event": "naz.Client.send_forever", "stage": "start"}) # TODO: there are so many try-except classes in this func. # do something about that. @@ -969,13 +1012,14 @@ async def send_forever( # check with throttle handler send_request = await self.throttle_handler.allow_request() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_forever", "stage": "end", "state": "send_forever error", "error": str(e), - } + }, ) continue if send_request: @@ -983,13 +1027,14 @@ async def send_forever( # rate limit ourselves await self.rateLimiter.limit() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_forever", "stage": "end", "state": "send_forever error", "error": str(e), - } + }, ) continue @@ -998,7 +1043,8 @@ async def send_forever( except Exception as e: retry_count += 1 poll_queue_interval = self._retry_after(retry_count) - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_forever", "stage": "end", @@ -1007,7 +1053,7 @@ async def send_forever( ), "retry_count": retry_count, "error": str(e), - } + }, ) await asyncio.sleep(poll_queue_interval) continue @@ -1031,13 +1077,14 @@ async def send_forever( e = KeyError( "enqueued message/object is missing required field:{}".format(str(e)) ) - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_forever", "stage": "end", "state": "send_forever error", "error": str(e), - } + }, ) continue @@ -1047,37 +1094,40 @@ async def send_forever( log_id=log_id, hook_metadata=hook_metadata, ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_forever", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, "send_request": send_request, - } + }, ) if TESTING: # offer escape hatch for tests to come out of endless loop return item_to_dequeue else: # throttle_handler didn't allow us to send request. - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.send_forever", "stage": "end", "send_request": send_request, - } + }, ) try: await asyncio.sleep(await self.throttle_handler.throttle_delay()) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.send_forever", "stage": "end", "state": "send_forever error", "error": str(e), - } + }, ) continue if TESTING: @@ -1094,13 +1144,14 @@ async def receive_data(self, TESTING: bool = False) -> typing.Union[bytes, None] """ retry_count = 0 while True: - self.logger.info({"event": "naz.Client.receive_data", "stage": "start"}) + self._log(logging.INFO, {"event": "naz.Client.receive_data", "stage": "start"}) # todo: look at `pause_reading` and `resume_reading` methods command_length_header_data = await self.reader.read(4) if command_length_header_data == b"": retry_count += 1 poll_read_interval = self._retry_after(retry_count) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.receive_data", "stage": "start", @@ -1108,7 +1159,7 @@ async def receive_data(self, TESTING: bool = False) -> typing.Union[bytes, None] poll_read_interval / 60 ), "retry_count": retry_count, - } + }, ) await asyncio.sleep(poll_read_interval) continue @@ -1125,20 +1176,21 @@ async def receive_data(self, TESTING: bool = False) -> typing.Union[bytes, None] chunk = await self.reader.read(min(MSGLEN - bytes_recd, 2048)) if chunk == b"": err = RuntimeError("socket connection broken") - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.receive_data", "stage": "end", "state": "socket connection broken", "error": str(err), - } + }, ) raise err chunks.append(chunk) bytes_recd = bytes_recd + len(chunk) full_pdu_data = command_length_header_data + b"".join(chunks) await self.parse_response_pdu(full_pdu_data) - self.logger.info({"event": "naz.Client.receive_data", "stage": "end"}) + self._log(logging.INFO, {"event": "naz.Client.receive_data", "stage": "end"}) if TESTING: # offer escape hatch for tests to come out of endless loop return full_pdu_data @@ -1151,7 +1203,7 @@ async def parse_response_pdu(self, pdu: bytes) -> None: Parameters: pdu: PDU in bytes, that have been read from network """ - self.logger.info({"event": "naz.Client.parse_response_pdu", "stage": "start"}) + self._log(logging.INFO, {"event": "naz.Client.parse_response_pdu", "stage": "start"}) header_data = pdu[:16] command_id_header_data = header_data[4:8] @@ -1169,25 +1221,27 @@ async def parse_response_pdu(self, pdu: bytes) -> None: ) except Exception as e: log_id, hook_metadata = "", "" - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.parse_response_pdu", "stage": "start", "log_id": log_id, "state": "correlater get error", "error": str(e), - } + }, ) smpp_command = self._search_by_command_id_code(command_id) if not smpp_command: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.parse_response_pdu", "stage": "end", "log_id": log_id, "state": "command_id:{0} is unknown.".format(command_id), - } + }, ) raise ValueError("command_id:{0} is unknown.".format(command_id)) @@ -1198,14 +1252,15 @@ async def parse_response_pdu(self, pdu: bytes) -> None: log_id=log_id, hook_metadata=hook_metadata, ) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.parse_response_pdu", "stage": "end", "smpp_command": smpp_command, "log_id": log_id, "command_status": command_status, - } + }, ) async def speficic_handlers( @@ -1230,18 +1285,20 @@ async def speficic_handlers( command_status_value=command_status_value ) if not commandStatus: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.speficic_handlers", "stage": "start", "smpp_command": smpp_command, "log_id": log_id, "error": "command_status:{0} is unknown.".format(command_status_value), - } + }, ) elif commandStatus.value != SmppCommandStatus.ESME_ROK.value: # we got an error from SMSC - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.speficic_handlers", "stage": "start", @@ -1249,10 +1306,11 @@ async def speficic_handlers( "log_id": log_id, "command_status": commandStatus.value, "state": commandStatus.description, - } + }, ) else: - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.speficic_handlers", "stage": "start", @@ -1260,7 +1318,7 @@ async def speficic_handlers( "log_id": log_id, "command_status": commandStatus.value, "state": commandStatus.description, - } + }, ) try: @@ -1270,7 +1328,8 @@ async def speficic_handlers( elif commandStatus.value == SmppCommandStatus.ESME_RTHROTTLED.value: await self.throttle_handler.throttled() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.speficic_handlers", "stage": "end", @@ -1278,7 +1337,7 @@ async def speficic_handlers( "smpp_command": smpp_command, "log_id": log_id, "state": commandStatus.description, - } + }, ) if smpp_command in [ @@ -1345,7 +1404,8 @@ async def speficic_handlers( # it has no body await self.enquire_link_resp(sequence_number=sequence_number) else: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.speficic_handlers", "stage": "end", @@ -1356,7 +1416,7 @@ async def speficic_handlers( "error": "the smpp_command:{0} has not been implemented in naz. please create a github issue".format( smpp_command ), - } + }, ) # call user's hook for responses @@ -1368,7 +1428,8 @@ async def speficic_handlers( smsc_response=commandStatus, ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.speficic_handlers", "stage": "end", @@ -1376,7 +1437,7 @@ async def speficic_handlers( "log_id": log_id, "state": "response hook error", "error": str(e), - } + }, ) async def unbind(self) -> None: @@ -1385,13 +1446,14 @@ async def unbind(self) -> None: """ smpp_command = SmppCommand.UNBIND log_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=17)) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.unbind", "stage": "start", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) # body body = b"" @@ -1403,14 +1465,15 @@ async def unbind(self) -> None: try: sequence_number = self.sequence_generator.next_sequence() except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.unbind", "stage": "end", "error": str(e), "log_id": log_id, "smpp_command": smpp_command, - } + }, ) if sequence_number > self.max_sequence_number: # prevent third party sequence_generators from ruining our party @@ -1425,7 +1488,8 @@ async def unbind(self) -> None: sequence_number=sequence_number, log_id=log_id, hook_metadata="" ) except Exception as e: - self.logger.exception( + self._log( + logging.ERROR, { "event": "naz.Client.unbind", "stage": "end", @@ -1433,18 +1497,19 @@ async def unbind(self) -> None: "log_id": log_id, "state": "correlater put error", "error": str(e), - } + }, ) header = struct.pack(">IIII", command_length, command_id, command_status, sequence_number) full_pdu = header + body # dont queue unbind in SimpleOutboundQueue since we dont want it to be behind 10k msgs etc await self.send_data(smpp_command=smpp_command, msg=full_pdu, log_id=log_id) - self.logger.info( + self._log( + logging.INFO, { "event": "naz.Client.unbind", "stage": "end", "log_id": log_id, "smpp_command": smpp_command, - } + }, ) diff --git a/naz/hooks.py b/naz/hooks.py index 691202f9..1dcfecf8 100644 --- a/naz/hooks.py +++ b/naz/hooks.py @@ -51,20 +51,22 @@ def __init__(self, logger) -> None: self.logger: logging.Logger = logger async def request(self, smpp_command: str, log_id: str, hook_metadata: str) -> None: - self.logger.info( + self.logger.log( + logging.INFO, { "event": "naz.SimpleHook.request", "stage": "start", "smpp_command": smpp_command, "log_id": log_id, "hook_metadata": hook_metadata, - } + }, ) async def response( self, smpp_command: str, log_id: str, hook_metadata: str, smsc_response: "naz.CommandStatus" ) -> None: - self.logger.info( + self.logger.log( + logging.INFO, { "event": "naz.SimpleHook.response", "stage": "start", @@ -72,5 +74,5 @@ async def response( "log_id": log_id, "hook_metadata": hook_metadata, "smsc_response": smsc_response.description, - } + }, ) diff --git a/naz/logger.py b/naz/logger.py index 4d29eb1d..264177bf 100644 --- a/naz/logger.py +++ b/naz/logger.py @@ -1,16 +1,88 @@ +import typing import logging -class NazLoggingAdapter(logging.LoggerAdapter): +class BaseLogger: + """ + Interface that must be implemented to satisfy naz's logger. + User implementations should inherit this class and + implement the :func:`bind ` and :func:`log ` methods with the type signatures shown. + + A logger is class with methods that are called whenever naz wants to log something. + This enables developers to implement logging in any way that they want. """ - This example adapter expects the passed in dict-like object to have a - 'log_metadata' key, whose value in brackets is apended to the log message. + + def bind(self, loglevel: str, log_metadata: dict) -> None: + """ + called when a naz client is been instantiated so that the logger can be + notified of loglevel & log_metadata that a user supplied to a naz client. + The logger can choose to bind these log_metadata to itself. + + Parameters: + loglevel: logging level eg DEBUG + log_metadata: log metadata that can be included in all log statements + """ + raise NotImplementedError("bind method must be implemented.") + + def log(self, level: int, log_data: dict) -> None: + """ + called by naz everytime it wants to log something. + + Parameters: + level: logging level eg `logging.INFO` + log_data: the message to log + """ + raise NotImplementedError("log method must be implemented.") + + +class SimpleBaseLogger(BaseLogger): """ + This is an implementation of BaseLogger. + It implements a structured logger that renders logs in a json/dict like manner. + + example usage: + .. code-block:: python + + logger = SimpleBaseLogger("myLogger") + logger.bind(loglevel="INFO", + log_metadata={"customer_id": "34541"}) + logger.log(logging.INFO, + {"event": "web_request", "url": "https://www.google.com/"}) + """ + + def __init__(self, logger_name: str): + """ + Parameters: + logger_name: name of the logger + """ + self.logger_name = logger_name + self.logger: typing.Any = None + + def bind(self, loglevel: str, log_metadata: dict) -> None: + self._logger = logging.getLogger(self.logger_name) + handler = logging.StreamHandler() + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + if not self._logger.handlers: + self._logger.addHandler(handler) + self._logger.setLevel(loglevel) + self.logger: logging.LoggerAdapter = NazLoggingAdapter(self._logger, log_metadata) + + def log(self, level: int, log_data: dict) -> None: + if not self.logger: + self.bind(loglevel="DEBUG", log_metadata={}) + if level >= logging.ERROR: + self.logger.log(level, log_data, exc_info=True) + else: + self.logger.log(level, log_data) + + +class NazLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): if isinstance(msg, str): - return msg, kwargs + merged_msg = "{0} {1}".format(msg, self.extra) + return merged_msg, kwargs else: - log_metadata = self.extra.get("log_metadata") - merged_log_event = {**msg, **log_metadata} - return "{0}".format(merged_log_event), kwargs + merged_msg = {**msg, **self.extra} + return "{0}".format(merged_msg), kwargs diff --git a/naz/ratelimiter.py b/naz/ratelimiter.py index ce4b8332..4dc24b4a 100644 --- a/naz/ratelimiter.py +++ b/naz/ratelimiter.py @@ -61,12 +61,13 @@ def __init__( self.effective_send_rate: float = 0 async def limit(self) -> None: - self.logger.info({"event": "naz.SimpleRateLimiter.limit", "stage": "start"}) + self.logger.log(logging.INFO, {"event": "naz.SimpleRateLimiter.limit", "stage": "start"}) while self.tokens < 1: self._add_new_tokens() # todo: sleep in an exponetial manner upto a maximum then wrap around. await asyncio.sleep(self.delay_for_tokens) - self.logger.info( + self.logger.log( + logging.INFO, { "event": "naz.SimpleRateLimiter.limit", "stage": "end", @@ -74,7 +75,7 @@ async def limit(self) -> None: "send_rate": self.send_rate, "delay": self.delay_for_tokens, "effective_send_rate": self.effective_send_rate, - } + }, ) self.messages_delivered += 1 diff --git a/naz/throttle.py b/naz/throttle.py index 83e45cb9..2eb83ab2 100644 --- a/naz/throttle.py +++ b/naz/throttle.py @@ -92,7 +92,9 @@ def percent_throttles(self) -> float: return round((self.throttle_responses / (total_smsc_responses)) * 100, 2) async def allow_request(self) -> bool: - self.logger.info({"event": "naz.SimpleThrottleHandler.allow_request", "stage": "start"}) + self.logger.log( + logging.INFO, {"event": "naz.SimpleThrottleHandler.allow_request", "stage": "start"} + ) # calculat percentage of throttles before resetting NON_throttle_responses and throttle_responses current_percent_throttles: float = self.percent_throttles _throttle_responses: int = self.throttle_responses @@ -107,7 +109,8 @@ async def allow_request(self) -> bool: self.throttle_responses = 0 self.updated_at = now if current_percent_throttles > self.deny_request_at: - self.logger.info( + self.logger.log( + logging.INFO, { "event": "naz.SimpleThrottleHandler.allow_request", "stage": "end", @@ -118,10 +121,11 @@ async def allow_request(self) -> bool: "sample_size": self.sample_size, "deny_request_at": self.deny_request_at, "state": "deny_request", - } + }, ) return False - self.logger.info( + self.logger.log( + logging.INFO, { "event": "naz.SimpleThrottleHandler.allow_request", "stage": "end", @@ -132,7 +136,7 @@ async def allow_request(self) -> bool: "sample_size": self.sample_size, "deny_request_at": self.deny_request_at, "state": "allow_request", - } + }, ) return True diff --git a/tests/test_client.py b/tests/test_client.py index bcb8742b..b354d2bf 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -534,3 +534,16 @@ def mock_create_client(): self.assertIn( "That encoding:{0} is not recognised.".format(encoding), str(raised_exception.exception) ) + + def test_logger_called(self): + with mock.patch("naz.logger.SimpleBaseLogger.log") as mock_logger_log: + mock_logger_log.return_value = None + self._run( + self.cli.parse_response_pdu( + pdu=b"\x00\x00\x00\x18\x80\x00\x00\t\x00\x00\x00\x00\x00\x00\x00\x06SMPPSim\x00" + ) + ) + self.assertTrue(mock_logger_log.called) + self.assertEqual( + mock_logger_log.call_args[0][1]["event"], "naz.Client.parse_response_pdu" + ) diff --git a/tests/test_logger.py b/tests/test_logger.py new file mode 100644 index 00000000..1d36e8f3 --- /dev/null +++ b/tests/test_logger.py @@ -0,0 +1,96 @@ +# do not to pollute the global namespace. +# see: https://python-packaging.readthedocs.io/en/latest/testing.html + + +import logging +import datetime +from unittest import TestCase + +import naz + + +class TestLogger(TestCase): + """ + run tests as: + python -m unittest discover -v -s . + run one testcase as: + python -m unittest -v tests.test_logger.TestLogger.test_something + """ + + def setUp(self): + self.logger = naz.logger.SimpleBaseLogger("myLogger") + + def tearDown(self): + pass + + def test_can_bind(self): + self.logger.bind(loglevel="INFO", log_metadata={"customer_id": "34541"}) + + def test_can_log_string(self): + self.logger.log(level=logging.WARN, log_data="can log string") + + def test_can_log_dict(self): + log_id = 234_255 + now = datetime.datetime.now() + self.logger.log( + level=logging.WARN, + log_data={"event": "myEvent", "stage": "start", "log_id": log_id, "now": now}, + ) + + def test_bind_and_log_string(self): + self.logger.bind(loglevel="INFO", log_metadata={"customer_id": "34541"}) + self.logger.log(level=logging.WARN, log_data="can log string") + + def test_bind_and_log_dict(self): + self.logger.bind(loglevel="INFO", log_metadata={"customer_id": "34541"}) + self.logger.log(level=logging.WARN, log_data={"name": "Magic Johnson"}) + + +class KVlogger(naz.logger.BaseLogger): + """ + A simple implementation of a key=value + log renderer. + """ + + def __init__(self): + self.logger = logging.getLogger("myKVlogger") + handler = logging.StreamHandler() + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + if not self.logger.handlers: + self.logger.addHandler(handler) + self.logger.setLevel("DEBUG") + + def bind(self, loglevel, log_metadata): + pass + + def log(self, level, log_data): + # implementation of key=value log renderer + message = ", ".join("{0}={1}".format(k, v) for k, v in log_data.items()) + self.logger.log(level, message) + + +class TestCustomLogger(TestCase): + """ + run tests as: + python -m unittest discover -v -s . + run one testcase as: + python -m unittest -v tests.test_logger.TestCustomLogger.test_something + """ + + def setUp(self): + self.kvLog = KVlogger() + + def tearDown(self): + pass + + def test_can_bind(self): + self.kvLog.bind(loglevel="INFO", log_metadata={"customer_id": "34541"}) + + def test_can_log_dict(self): + log_id = 234_255 + now = datetime.datetime.now() + self.kvLog.log( + level=logging.WARN, + log_data={"event": "myEvent", "stage": "start", "log_id": log_id, "now": now}, + )