Skip to content

Commit

Permalink
Merge tag 'upstream0910' into develop
Browse files Browse the repository at this point in the history
  • Loading branch information
kentnsw committed Sep 14, 2022
2 parents 0c7020e + e1685a5 commit e71b22a
Show file tree
Hide file tree
Showing 49 changed files with 18,685 additions and 1,171 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci-main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ jobs:
. test.env && poetry run pytest -n auto tests tools \
--cov c7n --cov tools/c7n_azure/c7n_azure \
--cov tools/c7n_gcp/c7n_gcp --cov tools/c7n_kube/c7n_kube \
--cov tools/c7n_mailer/c7n_mailer
--cov tools/c7n_mailer/c7n_mailer --cov tools/c7n_tencentcloud/c7n_tencentcloud
poetry run coverage xml
else
. test.env && poetry run pytest -n auto tests tools
Expand Down
49 changes: 49 additions & 0 deletions .github/workflows/functional.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: "Functional"

env:
terraform_version: "1.2.8"
C7N_FUNCTIONAL: true
C7N_TEST_RUN: true

on:
workflow_dispatch: {}
schedule:
# UTC 6 AM aka 11 PM PST, 2 AM EST, everyday
- cron: "0 6 * * *"

jobs:
AWS:
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
checks: write
steps:
- uses: actions/checkout@v3
- uses: actions/[email protected]
with:
python-version: "3.8"

- uses: aws-actions/[email protected]
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1

- uses: hashicorp/setup-terraform@v2
with:
terraform_version: "${{ env.terraform_version }}"

- name: Install Requirements
run: |
mkdir -p .tfcache
pip install --cache-dir=./.pip-cache -r requirements.txt
- name: Test
run: |
pytest tests -m "not skiplive" -m terraform -n auto --junit-xml=report-aws.xml
- name: JUnit Report Action
uses: mikepenz/[email protected]
if: always()
with:
report_paths: 'report-aws.xml'
81 changes: 76 additions & 5 deletions c7n/resources/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
from c7n.provider import clouds, Provider

from collections import Counter, namedtuple
from urllib.request import urlopen, urlparse, Request
from urllib.error import HTTPError
import contextlib
import copy
import datetime
Expand All @@ -23,7 +25,7 @@

from c7n.credentials import SessionFactory
from c7n.config import Bag
from c7n.exceptions import PolicyValidationError
from c7n.exceptions import ClientError, InvalidOutputConfig, PolicyValidationError
from c7n.log import CloudWatchLogHandler

from .resource_map import ResourceMap
Expand Down Expand Up @@ -123,6 +125,66 @@ def shape_validate(params, shape_name, service):
raise PolicyValidationError(report.generate_report())


def get_bucket_region_clientless(bucket, s3_endpoint):
"""Attempt to determine a bucket region without a client
We can make an unauthenticated HTTP HEAD request to S3 in an attempt to find a bucket's
region. This avoids some issues with cross-account/cross-region uses of the
GetBucketPolicy API action. Because bucket names are unique within
AWS partitions, we can make requests to a single regional S3 endpoint
and get redirected if a bucket lives in another region within the
same partition.
This approach is inspired by some sample code from a Go SDK issue comment,
which @sean-zou mentioned in #7593:
https://github.com/aws/aws-sdk-go/issues/720#issuecomment-613038544
Return a region string, or None if we're unable to determine one.
"""
region = None
s3_endpoint_parts = urlparse(s3_endpoint)
# Use a "path-style" S3 URL here to avoid failing TLS certificate validation
# on buckets with a dot in the name.
#
# According to the following blog post, before deprecating path-style
# URLs AWS will provide a way for virtual-hosted-style URLs to handle
# buckets with dots in their names. Using path-style URLs here in
# the meantime seems reasonable, compared to alternatives like forcing
# HTTP or ignoring certificate validation.
#
# https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/
bucket_endpoint = f'https://{s3_endpoint_parts.netloc}/{bucket}'
request = Request(bucket_endpoint, method='HEAD')
try:
# Dynamic use of urllib trips up static analyzers because
# of the potential to accidentally allow unexpected schemes
# like file:/. Here we're hardcoding the https scheme, so
# we can ignore those specific checks.
# nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected.dynamic-urllib-use-detected # noqa
response = urlopen(request) # nosec B310
region = response.headers.get('x-amz-bucket-region')
except HTTPError as err:
# Permission errors or redirects for valid buckets should still contain a
# header we can use to determine the bucket region.
region = err.headers.get('x-amz-bucket-region')

return region


def get_bucket_region(bucket, client):
"""Determine a bucket's region using the GetBucketLocation API action
Look up a bucket's location constraint and map it to a region name as described in
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html
"""
location = client.get_bucket_location(Bucket=bucket)['LocationConstraint']

# Remap region for cases where the location constraint doesn't match
region = {None: 'us-east-1', 'EU': 'eu-west-1'}.get(location, location)
return region


class Arn(namedtuple('_Arn', (
'arn', 'partition', 'service', 'region',
'account_id', 'resource', 'resource_type', 'separator'))):
Expand Down Expand Up @@ -544,11 +606,20 @@ def __init__(self, ctx, config):
super().__init__(ctx, config)
# can't use a local session as we dont want an unassumed session cached.
s3_client = self.ctx.session_factory(assume=False).client('s3')
region = s3_client.get_bucket_location(Bucket=self.bucket)['LocationConstraint']
# if region is None, we use us-east-1
region = region or "us-east-1"

# Try determining the output bucket region via HTTP requests since
# that works more consistently in cross-region scenarios. Fall back
# the GetBucketLocation API if necessary.
try:
self.bucket_region = (
get_bucket_region_clientless(self.bucket, s3_client.meta.endpoint_url) or
get_bucket_region(self.bucket, s3_client)
)
except ClientError as err:
raise InvalidOutputConfig(
f'unable to determine a region for output bucket {self.bucket}: {err}') from None
self.transfer = S3Transfer(
self.ctx.session_factory(region=region, assume=False).client('s3'))
self.ctx.session_factory(region=self.bucket_region, assume=False).client('s3'))

def upload_file(self, path, key):
self.transfer.upload_file(
Expand Down
24 changes: 10 additions & 14 deletions c7n/tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,13 +553,11 @@ def create_set(self, instances):

def filter_resources(self, resources):
old_key = self.data.get('old_key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if old_key not in tags.keys():
resources.pop(res)
res += 1
return resources
filtered_resources = [
r for r in resources
if old_key in (t['Key'] for t in r.get('Tags', []))
]
return filtered_resources

def process(self, resources):
count = len(resources)
Expand Down Expand Up @@ -792,13 +790,11 @@ def create_set(self, instances):

def filter_resources(self, resources):
key = self.data.get('key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if key not in tags.keys():
resources.pop(res)
res += 1
return resources
filtered_resources = [
r for r in resources
if key in (t['Key'] for t in r.get('Tags', []))
]
return filtered_resources

def process(self, resources):
count = len(resources)
Expand Down
Loading

0 comments on commit e71b22a

Please sign in to comment.