Skip to content

Commit

Permalink
Merge pull request #973 from lgtm-migrator/codeql
Browse files Browse the repository at this point in the history
Add CodeQL workflow for GitHub code scanning
  • Loading branch information
nolar authored Nov 13, 2022
2 parents 2d42f70 + bcaf7c7 commit 825151a
Show file tree
Hide file tree
Showing 26 changed files with 153 additions and 109 deletions.
41 changes: 41 additions & 0 deletions .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: "CodeQL"

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
schedule:
- cron: "6 2 * * 3"

jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write

strategy:
fail-fast: false
matrix:
language: [ python ]

steps:
- name: Checkout
uses: actions/checkout@v3

- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
queries: +security-and-quality

- name: Autobuild
uses: github/codeql-action/autobuild@v2

- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{ matrix.language }}"
3 changes: 2 additions & 1 deletion _importlinter_conditional.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import astpath
from importlinter import Contract, ContractCheck, fields, output
from importlinter.domain.ports.graph import ImportGraph


class ConditionalImportContract(Contract):
Expand All @@ -28,7 +29,7 @@ class ConditionalImportContract(Contract):
source_modules = fields.ListField(subfield=fields.ModuleField())
conditional_modules = fields.ListField(subfield=fields.ModuleField())

def check(self, graph):
def check(self, graph: ImportGraph) -> ContractCheck:
failed_details = []

# Combine all source x all target (secured) modules.
Expand Down
3 changes: 2 additions & 1 deletion docs/kwargs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,8 @@ each time with the proper values of old/new/diff/param kwargs for those fields:
@kopf.on.update('KopfExample', param=10, field='spec.field')
@kopf.on.update('KopfExample', param=1, field='spec')
def fn(param, **_): ...
def fn(param, **_):
pass
.. kwarg:: settings
Expand Down
4 changes: 2 additions & 2 deletions kopf/_cogs/aiokits/aioadapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ async def wait_flag(
but we support them for convenience.
"""
if flag is None:
pass
return None
elif isinstance(flag, asyncio.Future):
return await flag
elif isinstance(flag, asyncio.Event):
Expand All @@ -43,7 +43,7 @@ async def raise_flag(
but we support them for convenience.
"""
if flag is None:
pass
return None
elif isinstance(flag, asyncio.Future):
flag.set_result(None)
elif isinstance(flag, asyncio.Event):
Expand Down
2 changes: 1 addition & 1 deletion kopf/_cogs/aiokits/aioenums.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,5 +172,5 @@ def __await__(self) -> Generator[None, None, AsyncFlagWaiter[FlagReasonT]]:
try:
yield from task
except asyncio.TimeoutError:
pass
pass # the requested time limit is reached, exit regardless of the state
return self._waiter # the original checker! not the time-limited one!
5 changes: 3 additions & 2 deletions kopf/_cogs/aiokits/aiotasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ async def cancel_coro(
try:
await corotask
except asyncio.CancelledError:
pass
pass # cancellations are expected at this point


async def guard(
Expand Down Expand Up @@ -237,7 +237,7 @@ async def reraise(
try:
task.result() # can raise the regular (non-cancellation) exceptions.
except asyncio.CancelledError:
pass
pass # re-raise anything except regular cancellations/exits


async def all_tasks(
Expand Down Expand Up @@ -386,6 +386,7 @@ async def _task_cleaner(self) -> None:
try:
await task
except BaseException:
# The errors are handled in the done-callback. Suppress what has leaked for safety.
pass

# Ping other tasks to refill the pool of running tasks (or to close the scheduler).
Expand Down
2 changes: 1 addition & 1 deletion kopf/_cogs/clients/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,5 +213,5 @@ def purge(self) -> None:
try:
os.remove(path)
except OSError:
pass
pass # already removed
self._paths.clear()
8 changes: 4 additions & 4 deletions kopf/_cogs/structs/dicts.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,14 +177,14 @@ def remove(
try:
del d[path[0]]
except KeyError:
pass
pass # already absent

else:
try:
# Recursion is the easiest way to implement it, assuming the bodies/patches are shallow.
remove(d[path[0]], path[1:])
except KeyError:
pass
pass # already absent
else:
# Clean the parent dict if it has become empty due to deletion of the only sub-key.
# Upper parents will be handled by upper recursion functions.
Expand Down Expand Up @@ -245,14 +245,14 @@ def walk(
try:
yield resolve_obj(objs, parse_field(subfield))
except (AttributeError, KeyError):
pass
pass # do not dive deep into non-existent fields or non-dicts
elif isinstance(objs, collections.abc.Mapping):
yield objs # type: ignore
for subfield in (nested if nested is not None else []):
try:
yield resolve(objs, parse_field(subfield))
except KeyError:
pass
pass # avoid diving into non-dicts, ignore them
elif isinstance(objs, collections.abc.Iterable):
for obj in objs:
yield from walk(obj, nested=nested)
Expand Down
2 changes: 1 addition & 1 deletion kopf/_core/engines/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _discard(self, acckey: Key) -> None:
try:
del self.__items[acckey]
except KeyError:
pass
pass # already absent

# Indexers' internal protocol. Must not be used by handlers & operators.
def _replace(self, acckey: Key, obj: _V) -> None:
Expand Down
2 changes: 1 addition & 1 deletion kopf/_core/engines/peering.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ async def keepalive(
lifetime=0,
))
except asyncio.CancelledError:
pass
pass # cancellations are treated as normal exiting
except Exception:
logger.exception(f"Couldn't remove self from the peering. Ignoring.")

Expand Down
2 changes: 1 addition & 1 deletion kopf/_core/engines/probing.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ async def health_reporter(
async def get_health(
request: aiohttp.web.Request,
) -> aiohttp.web.Response:
nonlocal probing_timestamp
nonlocal probing_container, probing_timestamp, probing_max_age, probing_lock

# Recollect the data on-demand, and only if is is older that a reasonable caching period.
# Protect against multiple parallel requests performing the same heavy activity.
Expand Down
8 changes: 4 additions & 4 deletions kopf/_core/reactor/queueing.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ async def watcher(
watcher_task = asyncio.current_task()
worker_error: Optional[BaseException] = None
def exception_handler(exc: BaseException) -> None:
nonlocal worker_error
nonlocal worker_error, watcher_task
if worker_error is None:
worker_error = exc
if watcher_task is not None: # never happens, but is needed for type-checking.
Expand Down Expand Up @@ -306,7 +306,7 @@ async def worker(
shouldstop = shouldstop or isinstance(next_event, EOS)
raw_event = prev_event if isinstance(next_event, EOS) else next_event
except asyncio.TimeoutError:
pass
pass # the batch accumulation is over, we can proceed to the processing

# Exit gracefully and immediately on the end-of-stream marker sent by the watcher.
if isinstance(raw_event, EOS):
Expand All @@ -333,7 +333,7 @@ async def worker(
try:
del streams[key]
except KeyError:
pass
pass # already absent

# Notify the depletion routine about the changes in the workers'/streams' overall state.
# * This should happen STRICTLY AFTER the removal from the streams[], and
Expand Down Expand Up @@ -363,7 +363,7 @@ async def _wait_for_depletion(
signaller.wait_for(lambda: not streams or scheduler.empty()),
timeout=settings.batching.exit_timeout)
except asyncio.TimeoutError:
pass
pass # if not depleted as configured, proceed with what's left and let it fail

# The last check if the termination is going to be graceful or not.
if streams:
Expand Down
4 changes: 2 additions & 2 deletions kopf/_kits/webhooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,11 +384,11 @@ def build_certificate(
try:
parsed_ips[hostname] = ipaddress.IPv4Address(hostname)
except ipaddress.AddressValueError:
pass
pass # non-parsable IPs are considered to be regular hostnames
try:
parsed_ips[hostname] = ipaddress.IPv6Address(hostname)
except ipaddress.AddressValueError:
pass
pass # non-parsable IPs are considered to be regular hostnames

# Later, only the normalised IPs are used as SANs, not the raw IPs.
# Remove bindable but non-accessible addresses (like 0.0.0.0) form the SANs.
Expand Down
12 changes: 6 additions & 6 deletions tests/admission/test_admission_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,10 @@ async def test_patching_on_changes(
mocker, settings, registry, insights, selector, resource, reason, k8s_mocked):

@kopf.on.validate(*resource, registry=registry)
def fn_v(**_): ...
def fn_v(**_): pass

@kopf.on.mutate(*resource, registry=registry)
def fn_m(**_): ...
def fn_m(**_): pass

container = Container()
mocker.patch.object(container, 'as_changed', return_value=aiter([
Expand Down Expand Up @@ -164,10 +164,10 @@ async def test_patching_purges_non_permanent_webhooks(
mocker, settings, registry, insights, selector, resource, reason, k8s_mocked):

@kopf.on.validate(*resource, registry=registry, persistent=False)
def fn_v(**_): ...
def fn_v(**_): pass

@kopf.on.mutate(*resource, registry=registry, persistent=False)
def fn_m(**_): ...
def fn_m(**_): pass

container = Container()
mocker.patch.object(container, 'as_changed', return_value=aiter([
Expand Down Expand Up @@ -195,10 +195,10 @@ async def test_patching_leaves_permanent_webhooks(
mocker, settings, registry, insights, selector, resource, reason, k8s_mocked):

@kopf.on.validate(*resource, registry=registry, persistent=True)
def fn_v(**_): ...
def fn_v(**_): pass

@kopf.on.mutate(*resource, registry=registry, persistent=True)
def fn_m(**_): ...
def fn_m(**_): pass

container = Container()
mocker.patch.object(container, 'as_changed', return_value=aiter([
Expand Down
4 changes: 2 additions & 2 deletions tests/admission/test_admission_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ async def test_requires_webserver_if_webhooks_are_defined(
settings, registry, insights, resource):

@kopf.on.validate(*resource, registry=registry)
def fn_v(**_): ...
def fn_v(**_): pass

@kopf.on.mutate(*resource, registry=registry)
def fn_m(**_): ...
def fn_m(**_): pass

container = Container()
with pytest.raises(Exception) as err:
Expand Down
2 changes: 1 addition & 1 deletion tests/apis/test_api_requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ async def stream_slowly(request: aiohttp.web.Request) -> aiohttp.web.StreamRespo
await response.write(b'{"fake": "result2"}\n')
await response.write_eof()
except ConnectionError:
pass
pass # the client side sometimes disconnects earlier, ignore it
return response

aresponses.add(hostname, '/url', method, stream_slowly)
Expand Down
4 changes: 2 additions & 2 deletions tests/observation/test_processing_of_resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def insights_resources(request, registry, insights):
decorator, insights_field = request.param

@decorator('group1', 'version1', 'plural1')
def fn(**_): ...
def fn(**_): pass

return getattr(insights, insights_field)

Expand All @@ -115,7 +115,7 @@ async def test_nonwatchable_resources_are_ignored(
settings, registry, apis_mock, group1_mock, timer, etype, decorator, insights):

@decorator('group1', 'version1', 'plural1')
def fn(**_): ...
def fn(**_): pass

e1 = RawEvent(type=etype, object=RawBody(spec={'group': 'group1'}))

Expand Down
Loading

0 comments on commit 825151a

Please sign in to comment.