diff --git a/README.md b/README.md index e0c90bc9..402a6f4e 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,5 @@ # Credentials Fetcher -NOTE: This branch is un-released, additional tests are not complete. --------------------------------------------------------------------- - `credentials-fetcher` is a Linux daemon that retrieves gMSA credentials from Active Directory over LDAP. It creates and refreshes kerberos tickets from gMSA credentials. Kerberos tickets can be used by containers to run apps/services that authenticate using Active Directory. This daemon works in a similar way as ccg.exe and the gMSA plugin in Windows as described in - https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/manage-serviceaccounts#gmsa-architecture-and-improvements @@ -26,7 +23,7 @@ https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html#linu dnf install -y samba-common-tools # install custom credentials-fetcher rpm from branch - https://github.com/aws/credentials-fetcher/tree/fixes_for_DNS_and_distinguishedName gMSA credentials management for containers - curl -L -O https://github.com/aws/credentials-fetcher/raw/refs/heads/fixes_for_DNS_and_distinguishedName/rpm/credentials-fetcher-..-0.amzn2023.x86_64.rpm + curl -L -O https://github.com/aws/credentials-fetcher/raw/refs/heads/mainline/rpm/credentials-fetcher-..-0.amzn2023.x86_64.rpm dnf install -y ./credentials-fetcher-..-0.amzn2023.x86_64.rpm # start credentials-fetcher diff --git a/cdk/cdk-domainless-mode/data.json b/cdk/cdk-domainless-mode/data.json index 68af567c..4c94561b 100644 --- a/cdk/cdk-domainless-mode/data.json +++ b/cdk/cdk-domainless-mode/data.json @@ -22,6 +22,6 @@ "ecr_repo_name": "my-ecr-repo", "docker_image_tag": "latest", "dockerfile_path": "./Dockerfile", - "rpm_file": "credentials-fetcher-1.3.65-0.amzn2023.x86_64.rpm", + "rpm_file": "xxxxxxxx", "max_tasks_per_instance": 3 } diff --git a/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py b/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py index dabe0a96..3f5c3300 100644 --- a/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py +++ b/cdk/cdk-domainless-mode/tests/copy_credspecs_and_create_task_defs.py @@ -86,6 +86,7 @@ def modify_task_definition(task_definition, ecs_cluster_arn, bucket_arn, s3_key) task_definition["compatibilities"].append("FARGATE") for container_def in task_definition['containerDefinitions']: + container_def['credentialSpecs']=[] credspec = container_def['credentialSpecs'] credspec = [d for d in credspec if 'credentialspecdomainless' not in d] credspec.append(f"credentialspecdomainless:{bucket_arn}/{s3_key}") diff --git a/cdk/cdk-domainless-mode/tests/create_secrets.py b/cdk/cdk-domainless-mode/tests/create_secrets.py index edc4ea6f..d6fe21a2 100644 --- a/cdk/cdk-domainless-mode/tests/create_secrets.py +++ b/cdk/cdk-domainless-mode/tests/create_secrets.py @@ -8,29 +8,28 @@ def create_secrets(): client = boto3.client('secretsmanager') # Base path for the secrets - base_path = "aws/directoryservice/contoso/gmsa" + secret_name = "aws/directoryservice/contoso/gmsa" - for i in range(1, number_of_gmsa_accounts + 1): - # Create the secret name - secret_name = f"{base_path}/WebApp0{i}" + # Create the secret value + secret_value = { + "username": username, + "password": password, + "domainName": directory_name, + # "distinguishedName": f"CN=WebApp0{i},OU=MYOU,OU=Users,OU={netbios_name},DC={netbios_name},DC=com" + } - # Create the secret value - secret_value = { - "username": username, - "password": password, - "domainName": directory_name, - "distinguishedName": f"CN=WebApp0{i},OU=MYOU,OU=Users,OU={netbios_name},DC={netbios_name},DC=com" - } - - try: - # Create the secret - response = client.create_secret( - Name=secret_name, - Description=f"Secret for WebApp0{i}", - SecretString=json.dumps(secret_value) - ) - print(f"Created secret: {secret_name}") - except client.exceptions.ResourceExistsException: - print(f"Secret already exists: {secret_name}") - except Exception as e: - print(f"Error creating secret {secret_name}: {str(e)}") \ No newline at end of file + try: + # Create the secret + response = client.create_secret( + Name=secret_name, + Description=f"Secret for WebApp01", + SecretString=json.dumps(secret_value) + ) + print(f"Created secret: {secret_name}") + + except client.exceptions.ResourceExistsException: + print(f"Secret already exists: {secret_name}") + except Exception as e: + print(f"Error creating secret {secret_name}: {str(e)}") + return False + return True \ No newline at end of file diff --git a/cdk/cdk-domainless-mode/tests/delete_secrets.py b/cdk/cdk-domainless-mode/tests/delete_secrets.py deleted file mode 100644 index 73d88bec..00000000 --- a/cdk/cdk-domainless-mode/tests/delete_secrets.py +++ /dev/null @@ -1,24 +0,0 @@ -import boto3 -from parse_data_from_json import number_of_gmsa_accounts -def delete_secrets(): - # Initialize the AWS Secrets Manager client - client = boto3.client('secretsmanager') - - # Base path for the secrets - base_path = "aws/directoryservice/contoso/gmsa" - - for i in range(1, number_of_gmsa_accounts + 1): - # Create the secret name - secret_name = f"{base_path}/WebApp0{i}" - - try: - # Delete the secret - response = client.delete_secret( - SecretId=secret_name, - ForceDeleteWithoutRecovery=True - ) - print(f"Deleted secret: {secret_name}") - except client.exceptions.ResourceNotFoundException: - print(f"Secret not found: {secret_name}") - except Exception as e: - print(f"Error deleting secret {secret_name}: {str(e)}") diff --git a/cdk/cdk-domainless-mode/tests/gmsa.ps1 b/cdk/cdk-domainless-mode/tests/gmsa.ps1 index d5be8166..c0d709ea 100644 --- a/cdk/cdk-domainless-mode/tests/gmsa.ps1 +++ b/cdk/cdk-domainless-mode/tests/gmsa.ps1 @@ -7,116 +7,109 @@ # 5) Add members to the security group that is allowed to retrieve gMSA password # 6) Create gMSA accounts with PrincipalsAllowedToRetrievePassword set to the security group created in 4) -# 1) Install SSM agent -function Test-SSMAgentUpdate { - $ssm = Get-Service -Name "AmazonSSMAgent" -ErrorAction SilentlyContinue - if (-not $ssm) { return $false } - # Add additional version checking logic if needed - return $true +# Create a temporary directory for downloads +$tempDir = "C:\temp" +if (-not (Test-Path $tempDir)) { + New-Item -ItemType Directory -Path $tempDir } +# 1) Install SSM agent +Write-Output "Updating SSM agent..." +[System.Net.ServicePointManager]::SecurityProtocol = 'TLS12' +$progressPreference = 'silentlyContinue' +Invoke-WebRequest https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/windows_amd64/AmazonSSMAgentSetup.exe -OutFile $env:USERPROFILE\Desktop\SSMAgent_latest.exe +Start-Process -FilePath $env:USERPROFILE\Desktop\SSMAgent_latest.exe -ArgumentList "/S" + # To install the AD module on Windows Server, run Install-WindowsFeature RSAT-AD-PowerShell # To install the AD module on Windows 10 version 1809 or later, run Add-WindowsCapability -Online -Name 'Rsat.ActiveDirectory.DS-LDS.Tools~~~~0.0.1.0' # To install the AD module on older versions of Windows 10, see https://aka.ms/rsat -try { -# 1) Check and Update SSM agent if needed - if (-not (Test-SSMAgentUpdate)) { - Write-Output "Updating SSM agent..." - [System.Net.ServicePointManager]::SecurityProtocol = 'TLS12' - $progressPreference = 'silentlyContinue' - Invoke-WebRequest https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/windows_amd64/AmazonSSMAgentSetup.exe -OutFile $env:USERPROFILE\Desktop\SSMAgent_latest.exe - Start-Process -FilePath $env:USERPROFILE\Desktop\SSMAgent_latest.exe -ArgumentList "/S" - } +Write-Output "Installing Active Directory management tools..." +Install-WindowsFeature -Name "RSAT-AD-Tools" -IncludeAllSubFeature +Install-WindowsFeature RSAT-AD-PowerShell +Install-Module CredentialSpec +Install-Module -Name SqlServer -AllowClobber -Force -# Check if AD tools are installed - if (-not (Get-WindowsFeature -Name "RSAT-AD-Tools").Installed) { - Write-Output "Installing Active Directory management tools..." - Install-WindowsFeature -Name "RSAT-AD-Tools" -IncludeAllSubFeature - Install-WindowsFeature RSAT-AD-PowerShell - Install-Module CredentialSpec -Force - Install-Module -Name SqlServer -AllowClobber -Force - } +$username = "admin@DOMAINNAME" +$password = "INPUTPASSWORD" | ConvertTo-SecureString -AsPlainText -Force +$credential = New-Object System.Management.Automation.PSCredential($username, $password) +$groupAllowedToRetrievePassword = "WebAppAccounts_OU" +# This is the basedn path that needs to be in secrets manager as "distinguishedName" : "OU=MYOU,OU=Users,OU=ActiveDirectory,DC=contoso,DC=com" +$path = "OU=MYOU,OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" +$supath = "OU=Users,OU=contoso,DC=contoso,DC=com" - $username = "admin@DOMAINNAME" - $password = "INPUTPASSWORD" | ConvertTo-SecureString -AsPlainText -Force - $credential = New-Object System.Management.Automation.PSCredential($username, $password) - $groupAllowedToRetrievePassword = "WebAppAccounts_OU" - # This is the basedn path that needs to be in secrets manager as "distinguishedName" : "OU=MYOU,OU=Users,OU=ActiveDirectory,DC=contoso,DC=com" - $path = "OU=MYOU,OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" - # 2) Create OU if it doesn't exist - if (-not (Get-ADOrganizationalUnit -Filter "Name -eq 'MYOU'" -ErrorAction SilentlyContinue)) { - New-ADOrganizationalUnit -Name "MYOU" -Path "OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" -Credential $credential - } +# 2) Create OU +New-ADOrganizationalUnit -Name "MYOU" -Path "OU=Users,OU=contoso,DC=NETBIOS_NAME,DC=com" -Credential $credential - # 3) Create security group if it doesn't exist - if (-not (Get-ADGroup -Filter "SamAccountName -eq '$groupAllowedToRetrievePassword'" -ErrorAction SilentlyContinue)) { - New-ADGroup -Name "WebApp Authorized Accounts in OU" -SamAccountName $groupAllowedToRetrievePassword -Credential $credential -GroupScope DomainLocal -Server DOMAINNAME - } - - # 4) Create standard user if it doesn't exist - if (-not (Get-ADUser -Filter "SamAccountName -eq 'StandardUser01'" -ErrorAction SilentlyContinue)) { - New-ADUser -Name "StandardUser01" -AccountPassword (ConvertTo-SecureString -AsPlainText "********" -Force) -Enabled 1 -Credential $credential -Path $path -Server DOMAINNAME - } - - # 5) Add members to security group if not already members - $group = Get-ADGroup $groupAllowedToRetrievePassword - $members = Get-ADGroupMember $group | Select-Object -ExpandProperty SamAccountName +# 3) Create the security group +try { + New-ADGroup -Name "WebApp Authorized Accounts in OU" -SamAccountName $groupAllowedToRetrievePassword -Credential $credential -GroupScope DomainLocal -Server DOMAINNAME +} catch { + Write-Output "Security Group created" +} - foreach ($member in @("StandardUser01", "admin")) { - if ($member -notin $members) { - Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members $member -Credential $credential -Server DOMAINNAME - } - } +# 4) Create a new standard user account, this account's username and password needs to be stored in a secret store like AWS secrets manager. +try { + New-ADUser -Name "StandardUser01" -AccountPassword (ConvertTo-SecureString -AsPlainText "p@ssw0rd" -Force) -Enabled 1 -Credential $credential -Path $supath -Server DOMAINNAME +} catch { + Write-Output "Created StandardUser01" +} - # 6) Create gMSA accounts if they don't exist - for (($i = 1); $i -le $NUMBER_OF_GMSA_ACCOUNTS; $i++) { - $gmsa_account_name = "WebApp0" + $i - $gmsa_account_with_domain = $gmsa_account_name + ".DOMAINNAME" - $gmsa_account_with_host = "host/" + $gmsa_account_name - $gmsa_account_with_host_and_domain = $gmsa_account_with_host + ".DOMAINNAME" +# 5) Add members to the security group that is allowed to retrieve gMSA password +try { + Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members "StandardUser01" -Credential $credential -Server DOMAINNAME + Add-ADGroupMember -Identity $groupAllowedToRetrievePassword -Members "admin" -Credential $credential -Server DOMAINNAME +} catch { + Write-Output "Created AD Group $groupAllowedToRetrievePassword" +} - if (-not (Get-ADServiceAccount -Filter "Name -eq '$gmsa_account_name'" -ErrorAction SilentlyContinue)) { +# 6) Create gMSA accounts with PrincipalsAllowedToRetrievePassword set to the security group created in 4) +$string_err = "" +for (($i = 1); $i -le NUMBER_OF_GMSA_ACCOUNTS; $i++) +{ + # Create the gMSA account + $gmsa_account_name = "WebApp0" + $i + $gmsa_account_with_domain = $gmsa_account_name + ".DOMAINNAME" + $gmsa_account_with_host = "host/" + $gmsa_account_name + $gmsa_account_with_host_and_domain = $gmsa_account_with_host + ".DOMAINNAME" + + try { + # Check if the service account already exists + if (-not (Get-ADServiceAccount -Filter {Name -eq $gmsa_account_name} -ErrorAction SilentlyContinue)) { New-ADServiceAccount -Name $gmsa_account_name ` - -DnsHostName $gmsa_account_with_domain ` - -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain ` - -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword ` - -Path $path ` - -Credential $credential ` - -Server DOMAINNAME + -DnsHostName $gmsa_account_with_domain ` + -ServicePrincipalNames $gmsa_account_with_host, $gmsa_account_with_host_and_domain ` + -PrincipalsAllowedToRetrieveManagedPassword $groupAllowedToRetrievePassword ` + -Path $path ` + -Credential $credential ` + -Server DOMAINNAME + Write-Output "Created new gMSA account: $gmsa_account_name" + } else { + Write-Output "gMSA account $gmsa_account_name already exists - skipping creation" } + } catch { + $string_err = $_ | Out-String + Write-Output "Error while processing gMSA account $gmsa_account_name : $string_err" } +} - # SQL Server Configuration - $sqlInstance = $env:computername - - # Create firewall rules if they don't exist - $firewallRules = Get-NetFirewallRule | Select-Object -ExpandProperty DisplayName +# Set the SQL Server instance name +$sqlInstance = $env:computername - if ("SQLServer default instance" -notin $firewallRules) { - New-NetFirewallRule -DisplayName "SQLServer default instance" -Direction Inbound -LocalPort 1433 -Protocol TCP -Action Allow - } - if ("SQLServer Browser service" -notin $firewallRules) { - New-NetFirewallRule -DisplayName "SQLServer Browser service" -Direction Inbound -LocalPort 1434 -Protocol UDP -Action Allow - } - if ("AllowRDP" -notin $firewallRules) { - New-NetFirewallRule -DisplayName "AllowRDP" -Direction Inbound -Protocol TCP -LocalPort 3389 -Action Allow - } - if ("AllowSQLServer" -notin $firewallRules) { - New-NetFirewallRule -DisplayName "AllowSQLServer" -Direction Inbound -Protocol TCP -LocalPort 1433 -Action Allow - } +New-NetFirewallRule -DisplayName "SQLServer default instance" -Direction Inbound -LocalPort 1433 -Protocol TCP -Action Allow +New-NetFirewallRule -DisplayName "SQLServer Browser service" -Direction Inbound -LocalPort 1434 -Protocol UDP -Action Allow +netsh advfirewall firewall add rule name = SQLPort dir = in protocol = tcp action = allow localport = 1433 remoteip = localsubnet profile = DOMAIN +New-NetFirewallRule -DisplayName “AllowRDP” -Direction Inbound -Protocol TCP –LocalPort 3389 -Action Allow +New-NetFirewallRule -DisplayName "AllowSQLServer" -Direction Inbound -Protocol TCP -LocalPort 1433 -Action Allow - # SQL Database creation and configuration - $connectionString0 = "Server=$sqlInstance;Integrated Security=True;" - $connectionString1 = "Server=$sqlInstance;Database=EmployeesDB;Integrated Security=True;" - # Check if database exists - $dbExists = Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "SELECT name FROM sys.databases WHERE name = 'EmployeesDB'" +# Create a connection string +$connectionString0 = "Server=$sqlInstance;Integrated Security=True;" +$connectionString1 = "Server=$sqlInstance;Database=EmployeesDB;Integrated Security=True;" - if (-not $dbExists) { - Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "CREATE DATABASE EmployeesDB" +$createDatabaseQuery = "CREATE DATABASE EmployeesDB" - $query = @" +$query = @" CREATE TABLE dbo.EmployeesTable ( EmpID INT IDENTITY(1,1) PRIMARY KEY, EmpName VARCHAR(50) NOT NULL, @@ -133,21 +126,28 @@ VALUES ('DEWANE PAUL', 'PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), ('MATTS', 'SR. PROGRAMMER', 'IT', '2022-03-05 03:57:09.967'), ('PLANK OTO', 'ACCOUNTANT', 'ACCOUNTS', '2022-03-05 03:57:09.967'); -alter authorization on database::[EmployeesDB] to [WebApp01$] "@ - Invoke-Sqlcmd -ConnectionString $connectionString1 -Query $query - } - - # Check if login exists before creating - $loginExists = Invoke-Sqlcmd -ConnectionString $connectionString0 -Query "SELECT name FROM sys.server_principals WHERE name = 'NETBIOS_NAME\webapp01$'" +Invoke-Sqlcmd -ConnectionString $connectionString0 -Query $createDatabaseQuery -QueryTimeout 60 +Invoke-Sqlcmd -ConnectionString $connectionString1 -Query $query + +# Sleep for 10 seconds +Start-Sleep -Seconds 10 + +# Loop through WebApp01$ to WebApp010$ +for ($i = 1; $i -le NUMBER_OF_GMSA_ACCOUNTS; $i++) { + $webAppName = "WebApp0$i`$" + + $createLoginQuery = @" +CREATE LOGIN [NETBIOS_NAME\$webAppName] FROM WINDOWS WITH DEFAULT_DATABASE = [master], DEFAULT_LANGUAGE = [us_english]; +USE [EmployeesDB]; +CREATE USER [$webAppName] FOR LOGIN [NETBIOS_NAME\$webAppName]; +ALTER ROLE [db_owner] ADD MEMBER [$webAppName]; +ALTER AUTHORIZATION ON DATABASE::[EmployeesDB] TO [$webAppName]; +"@ - if (-not $loginExists) { - $createLoginQuery = "CREATE LOGIN [NETBIOS_NAME\webapp01$] FROM WINDOWS WITH DEFAULT_DATABASE = [master], DEFAULT_LANGUAGE = [us_english]; EXEC sp_addrolemember 'db_owner', 'NETBIOS_NAME\webapp01$';" - Invoke-Sqlcmd -ConnectionString $connectionString0 -Query $createLoginQuery - } + Write-Host "Creating login and granting permissions for $webAppName" + Invoke-Sqlcmd -ConnectionString $connectionString0 -Query $createLoginQuery +} -} catch { - Write-Error "An error occurred: $_" - throw -} \ No newline at end of file + diff --git a/cdk/cdk-domainless-mode/tests/parse_data_from_json.py b/cdk/cdk-domainless-mode/tests/parse_data_from_json.py index e2474690..28de60f2 100644 --- a/cdk/cdk-domainless-mode/tests/parse_data_from_json.py +++ b/cdk/cdk-domainless-mode/tests/parse_data_from_json.py @@ -27,6 +27,7 @@ def get_value(key): password = data["password"] windows_instance_tag = data["windows_instance_tag"] domain_admin_password = data["domain_admin_password"] +containers_per_instance = data["max_tasks_per_instance"] * 10 # maximum number of container definitions per task is 10 (ref: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-quotas.html) if "XXX" in bucket_name: print("S3_PREFIX is not setup correctly, please set it and retry") diff --git a/cdk/cdk-domainless-mode/tests/run_e2e_test.py b/cdk/cdk-domainless-mode/tests/run_e2e_test.py index bcf80501..21f2a752 100644 --- a/cdk/cdk-domainless-mode/tests/run_e2e_test.py +++ b/cdk/cdk-domainless-mode/tests/run_e2e_test.py @@ -2,6 +2,9 @@ import boto3 import os +import math +import copy +import json from create_secrets import create_secrets from copy_credspecs_and_create_task_defs import (setup_aws_session, get_ecs_task_execution_role_arn, @@ -16,13 +19,12 @@ task_definition_template_name, cluster_name, region, bucket_name, aws_profile_name, instance_name, - windows_instance_tag, repository_name) + windows_instance_tag, repository_name, containers_per_instance) from setup_windows_instance import get_instance_id_by_name, run_powershell_script from update_inbound_rules import add_security_group_to_instance from update_task_def_and_run_tasks import (get_task_definition_families, update_task_definition_image, run_task) from run_sql_test import get_windows_hostname, run_shell_script -from delete_secrets import delete_secrets from botocore.exceptions import ClientError s3_client = boto3.client('s3') @@ -78,37 +80,147 @@ def create_s3_bucket(): print(f"Bucket {bucket_name} created successfully.") return True +def update_asg_min_capacity(capacity): + try: + autoscaling_client = boto3.client('autoscaling', region_name=region) + response = autoscaling_client.describe_auto_scaling_groups() + asg_found = False + for asg in response['AutoScalingGroups']: + if stack_name in asg['AutoScalingGroupName']: + asg_found = True + response = autoscaling_client.update_auto_scaling_group( + AutoScalingGroupName=asg['AutoScalingGroupName'], + MinSize=capacity, + MaxSize=capacity, + DesiredCapacity=capacity + ) + print(f"Successfully updated ASG {asg['AutoScalingGroupName']} desired capacity to {capacity}") + return True + + if not asg_found: + print(f"No Auto Scaling Group found containing '{stack_name}'") + return False + except Exception as e: + print(f"Error updating ASG: {str(e)}") + return False +def create_task_definition_groups(number_of_gmsa_accounts, max_containers=10): + """ + Create groups of GMSA accounts for task definitions + """ + groups = [] + current_group = [] + + for i in range(1, number_of_gmsa_accounts + 1): + current_group.append(i) + if len(current_group) == max_containers: + groups.append(current_group) + current_group = [] + + if current_group: # Add any remaining accounts + groups.append(current_group) + + return groups + def create_and_register_tasks(): - setup_aws_session(aws_profile_name) - ecs_task_execution_role_arn = get_ecs_task_execution_role_arn() - task_definition_template = get_task_definition_template(ecs_client, task_definition_template_name) - ecs_cluster_arn, ecs_cluster_instance_arn = get_ecs_cluster_info(ecs_client, "Credentials-fetcher-ecs-load-test") + try: + setup_aws_session(aws_profile_name) + ecs_task_execution_role_arn = get_ecs_task_execution_role_arn() + task_definition_template = get_task_definition_template(ecs_client, task_definition_template_name) + ecs_cluster_arn, ecs_cluster_instance_arn = get_ecs_cluster_info(ecs_client, "Credentials-fetcher-ecs-load-test") - if not all([ecs_task_execution_role_arn, task_definition_template, ecs_cluster_arn]): - print("Failed to retrieve necessary resources.") - return + if not all([ecs_task_execution_role_arn, task_definition_template, ecs_cluster_arn]): + print("Failed to retrieve necessary resources.") + return False - if not all([ecs_task_execution_role_arn, task_definition_template, ecs_cluster_arn]): - print("Failed to retrieve necessary resources.") - return + # Print template structure for debugging + # print(f"Template structure: {json.dumps(task_definition_template, indent=2)}") - for i in range(1, number_of_gmsa_accounts + 1): - gmsa_name = f"WebApp0{i}" - secret_id = f"aws/directoryservice/{netbios_name}/gmsa/{gmsa_name}" - gmsa_secret_arn = secrets_manager_client.get_secret_value(SecretId=secret_id)['ARN'] + # Get the new image URI + try: + ecr_client = boto3.client('ecr', region_name=region) + response = ecr_client.describe_repositories(repositoryNames=[repository_name]) + repository_uri = response['repositories'][0]['repositoryUri'] + image_uri = f"{repository_uri}:{tag}" + except Exception as e: + print(f"Failed to get ECR repository URI: {str(e)}") + return False + + required_instances = math.ceil(number_of_gmsa_accounts / containers_per_instance) + if not update_asg_min_capacity(required_instances): + print(f"Error updating desired capacity to {required_instances}, exiting...") + return False - credspec = create_credspec(directory_name, netbios_name, gmsa_name, gmsa_secret_arn) - bucket_arn, s3_key = upload_credspec_to_s3(s3_client, bucket_name, gmsa_name, credspec) + # Group GMSA accounts into sets of 10 or fewer + account_groups = create_task_definition_groups(number_of_gmsa_accounts) + + template_task_def = task_definition_template['taskDefinition'] # Get the task definition part once + + for group_index, group in enumerate(account_groups): + # Create container definitions for this group + container_definitions = [] + + for account_number in group: + gmsa_name = f"WebApp0{account_number}" + secret_id = f"aws/directoryservice/{netbios_name}/gmsa" + gmsa_secret_arn = secrets_manager_client.get_secret_value(SecretId=secret_id)['ARN'] + + credspec = create_credspec(directory_name, netbios_name, gmsa_name, gmsa_secret_arn) + bucket_arn, s3_key = upload_credspec_to_s3(s3_client, bucket_name, gmsa_name, credspec) + + if not bucket_arn: + print(f"Failed to upload credspec for {gmsa_name}") + continue + + # Create a container definition for this GMSA account + container_def = create_container_definition( + template_task_def['containerDefinitions'][0], + bucket_arn, + s3_key, + gmsa_name, + image_uri + ) + container_definitions.append(container_def) + + # Create family name for this group + base_family = template_task_def['family'] + family = f"{base_family}-group-{group_index + 1}" + + # Create the complete task definition + modified_task_def = { + 'networkMode': template_task_def['networkMode'], + 'containerDefinitions': container_definitions, + 'cpu': template_task_def['cpu'], + 'memory': template_task_def['memory'] + } + + # Register the task definition for this group + response = register_new_task_definition( + ecs_client, + modified_task_def, + family, + ecs_task_execution_role_arn + ) + + print(f"Registered new task definition for group {group_index + 1}: {response['taskDefinition']['taskDefinitionArn']}") + + return True - if not bucket_arn: - print(f"Failed to upload credspec for {gmsa_name}") - continue + except Exception as e: + print(f"An error occurred in create_and_register_tasks: {str(e)}") + import traceback + traceback.print_exc() # This will print the full stack trace + return False - modified_task_definition = modify_task_definition(task_definition_template, ecs_cluster_arn, bucket_arn, s3_key) - family = f"{task_definition_template['taskDefinition']['family']}-{i}" - response = register_new_task_definition(ecs_client, modified_task_definition, family, ecs_task_execution_role_arn) - print(f"Registered new task definition for {gmsa_name}: {response['taskDefinition']['taskDefinitionArn']}") +def create_container_definition(template_container, bucket_arn, s3_key, gmsa_name, image_uri): + """ + Create a container definition based on the template and GMSA details + """ + container_def = copy.deepcopy(template_container) + container_def['name'] = f"MyContainer-{gmsa_name}" + container_def['image'] = image_uri # Update the image URI + container_def['credentialSpecs'] = [f"credentialspecdomainless:{bucket_arn}/{s3_key}"] + return container_def def is_s3_bucket_empty(): try: @@ -133,6 +245,7 @@ def is_s3_bucket_empty(): return None def empty_s3_bucket(): + """ Empty an S3 bucket by deleting all objects and versions. @@ -142,62 +255,94 @@ def empty_s3_bucket(): bucket = s3.Bucket(bucket_name) try: - # Delete all objects - print(f"Deleting all objects in bucket '{bucket_name}'...") - bucket.objects.all().delete() - - # Delete all object versions (if versioning is enabled) - print(f"Deleting all object versions in bucket '{bucket_name}'...") - bucket.object_versions.all().delete() - - print(f"Bucket '{bucket_name}' has been emptied successfully.") + # Delete objects (excluding .rpm files) + print(f"Deleting objects in bucket '{bucket_name}' (only credspec.json files)...") + objects_to_delete = [obj for obj in bucket.objects.all() if obj.key.endswith('credspec.json')] + + if objects_to_delete: + bucket.delete_objects( + Delete={ + 'Objects': [{'Key': obj.key} for obj in objects_to_delete] + } + ) + + # Delete object versions (excluding .rpm files) + print(f"Deleting object versions in bucket '{bucket_name}' (only credspec.json files)...") + versions_to_delete = [ver for ver in bucket.object_versions.all() + if ver.object_key.endswith('credspec.json')] + + if versions_to_delete: + bucket.delete_objects( + Delete={ + 'Objects': [{'Key': ver.object_key, 'VersionId': ver.id} + for ver in versions_to_delete] + } + ) + print(f"Removed all credspec.json files from {bucket_name}") + return True except ClientError as e: print(f"An error occurred while emptying the bucket: {e}") + return False def update_windows_instance(): instance_id = get_instance_id_by_name(region, instance_name) script_path = os.path.join(os.path.dirname(__file__), 'gmsa.ps1') - run_powershell_script(instance_id, script_path) - -def update_task_defs_and_run_tasks(): - ecs_client = boto3.client('ecs', region_name=region) - ec2_client = boto3.client('ec2', region_name=region) - - response = ec2_client.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [vpc_name]}]) - if not response['Vpcs']: - raise ValueError(f"No VPC found with name: {vpc_name}") - vpc_id = response['Vpcs'][0]['VpcId'] - - # Get subnets - response = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) - if not response['Subnets']: - raise ValueError(f"No subnets found in VPC: {vpc_id}") - subnet_ids = [subnet['SubnetId'] for subnet in response['Subnets']] - - # Get security group - response = ec2_client.describe_security_groups(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) - if not response['SecurityGroups']: - raise ValueError(f"No security groups found in VPC: {vpc_id}") - security_group_id = response['SecurityGroups'][0]['GroupId'] - - # Get all task definition families - task_families = get_task_definition_families(ecs_client, task_definition_template_name) - if not task_families: - raise ValueError(f"No task definition families found matching pattern: {task_definition_template_name}") - - for task_family in task_families: - try: - # Update task definition and get the new ARN - new_task_definition_arn = update_task_definition_image(task_family, repository_name, tag, region) - task_arn = run_task(ecs_client, cluster_name, new_task_definition_arn, subnet_ids, security_group_id) - if task_arn: - print(f"Task started for family {task_family}: {task_arn}") - else: - print(f"Failed to start task for family {task_family}") - except Exception as e: - print(f"Error processing task family {task_family}: {str(e)}") + return run_powershell_script(instance_id, script_path) - print("All tasks have been processed.") +def run_tasks(): + try: + ecs_client = boto3.client('ecs', region_name=region) + ec2_client = boto3.client('ec2', region_name=region) + + # Get VPC info + response = ec2_client.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [vpc_name]}]) + if not response['Vpcs']: + print(f"No VPC found with name: {vpc_name}") + return False + vpc_id = response['Vpcs'][0]['VpcId'] + + # Get subnets + response = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) + if not response['Subnets']: + print(f"No subnets found in VPC: {vpc_id}") + return False + subnet_ids = [subnet['SubnetId'] for subnet in response['Subnets']] + + # Get security group + response = ec2_client.describe_security_groups(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) + if not response['SecurityGroups']: + print(f"No security groups found in VPC: {vpc_id}") + return False + security_group_id = response['SecurityGroups'][0]['GroupId'] + + # Get all task definition families + task_families = get_task_definition_families(ecs_client, task_definition_template_name) + if not task_families: + print(f"No task definition families found matching pattern: {task_definition_template_name}") + return False + + # Run tasks for each family + for task_family in task_families: + try: + # Get the latest task definition ARN for this family + response = ecs_client.describe_task_definition(taskDefinition=task_family) + task_definition_arn = response['taskDefinition']['taskDefinitionArn'] + + task_arn = run_task(ecs_client, cluster_name, task_definition_arn, subnet_ids, security_group_id) + if task_arn: + print(f"Task started for family {task_family}: {task_arn}") + else: + print(f"Failed to start task for family {task_family}") + + except Exception as e: + print(f"Error processing task family {task_family}: {str(e)}") + + print("All tasks have been processed.") + return True + + except Exception as e: + print(f"An unexpected error occurred: {str(e)}") + return False def get_running_task_definitions(): @@ -265,7 +410,7 @@ def delete_unused_task_definitions(): taskDefinition=arn )['taskDefinition'] - print(task_def) + # print(task_def) # Check if any container has credentialSpecs for container in task_def['containerDefinitions']: if 'credentialSpecs' in container: @@ -285,71 +430,103 @@ def delete_unused_task_definitions(): print(f"\nSummary:") print(f"Deleted task definitions: {deleted_count}") print(f"Skipped task definitions: {skipped_count}") + return True except ClientError as e: print(f"Error: {e}") + return False def run_sql_test(): instance_name_linux = stack_name + '/MyAutoScalingGroup' instance_id_linux = get_instance_id_by_name(region, instance_name_linux) instance_id_windows = get_instance_id_by_name(region, windows_instance_tag) hostname = get_windows_hostname(instance_id_windows) - run_shell_script(instance_id_linux, hostname) + return run_shell_script(instance_id_linux, hostname) def run_e2e_test(): if not check_s3_bucket_exists(): - if not create_s3_bucket(): - print("s3 bucket was not created properly, exiting...") - return + print("Please create S3 bucket and try again, exiting...") + return if not is_s3_bucket_empty(): - empty_s3_bucket() + if not empty_s3_bucket(): + print("s3 bucket was not emptied properly, exiting...") + return print("Using s3 bucket: " + bucket_name) - print("----------S3 bucket created and ready for use-----------------") - create_secrets() + print("----------S3 bucket ready for use-----------------") + if not create_secrets(): + print("secrets were not created properly, exiting...") + return print("\n" * 3) print("-----------------Secret Creation Complete.-------------------") - print("\n" * 3) - create_and_register_tasks() - print("\n" * 3) - print("-----------------Created and Registered Tasks.---------------") + if not update_windows_instance(): + print("Error updating Windows instance, exiting...") + return print("\n" * 3) print("-----------------Windows instance is Ready--------------------") - add_security_group_to_instance(directory_name, instance_name) + if not add_security_group_to_instance(directory_name, instance_name): + print("Error adding inbound rule to security group, exiting...") + return print("\n" * 3) print("--------Linux instance has necessary Security groups Added----") print("\n" * 3) - update_task_defs_and_run_tasks() + if not delete_unused_task_definitions(): + print("Old task definitions weren't deleted properly, will try to create and register new task definitions still. If the next step fails, please manually delete old and unused task definitions and try again...") + if not create_and_register_tasks(): + print("Not able to create and register new task definitions, exiting...") + return + print("\n" * 3) + print("-----------------Created and Registered Tasks.----------------") + print("\n" * 3) + if not run_tasks(): + print("Error running tasks, exiting...") + return print("\n" * 3) - print("--------Task definition updated, ready to run SQL Test--------") + print("--------Tasks running, ready to run SQL Test--------") print("Waiting 15 seconds before running SQL Test") print("\n" * 3) sleep(15) print("Sleep complete. Executing SQL test now.") - run_sql_test() - print("###################################################") - print("###################################################") - print("###################################################") - print("###################################################") - print("\n" * 3) - print("------------E2E Test Successful!!------------------") - print("\n" * 3) - print("###################################################") - print("###################################################") - print("###################################################") - print("###################################################") - + if run_sql_test(): + print("###################################################") + print("\n" * 3) + print("------------E2E Test Successful!!------------------") + print("\n" * 3) + print("###################################################") + else: + print("Error running E2E test, exiting...") + retry = input("Do you want to retry? (yes/no): ").lower().strip() + if retry in ['yes', 'y']: + print("Retrying E2E test...") + if run_sql_test(): + print("###################################################") + print("\n" * 3) + print("------------E2E Test Successful!!------------------") + print("\n" * 3) + print("###################################################") + else: + print("Error running E2E test, exiting...") + return + response = input("\nAre you ready to run cleanup? (yes/no): ").lower().strip() + if response in ['yes', 'y']: + return True + elif response in ['no', 'n']: + return False + else: + print("Please enter 'yes' or 'no'") + + def cleanup_after_test_complete(): print("\n" * 3) print("------------Initiating cleanup after test--------------") print("\n" * 3) - delete_secrets() empty_s3_bucket() delete_unused_task_definitions() print("\n" * 3) print("------------Cleanup Complete!!--------------") print("\n" * 3) -run_e2e_test() -cleanup_after_test_complete() - +if run_e2e_test(): + cleanup_after_test_complete() +else: + print("Cleanup skipped, please cleanup manually later...") diff --git a/cdk/cdk-domainless-mode/tests/run_sql_test.py b/cdk/cdk-domainless-mode/tests/run_sql_test.py index b3b60c68..9c12d2f8 100644 --- a/cdk/cdk-domainless-mode/tests/run_sql_test.py +++ b/cdk/cdk-domainless-mode/tests/run_sql_test.py @@ -1,4 +1,5 @@ import boto3 +import sys from parse_data_from_json import stack_name, windows_instance_tag, region """ @@ -24,6 +25,8 @@ def run_shell_script(instance_id, hostname): ' echo "Container ID: $IMAGEID"', ' echo "Running commands inside the container:"', ' echo "klist && sqlcmd -S $HOSTNAME.contoso.com -C -Q \'SELECT * FROM employeesdb.dbo.employeestable;\'" | docker exec -i $IMAGEID env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/mssql-tools/bin bash', + ' SQL_EXIT_CODE=$?', + ' echo "SQL_SUCCESS_MARKER: $SQL_EXIT_CODE"', 'else', ' echo "No container found with my-ecr-repo:latest"', 'fi' @@ -60,13 +63,25 @@ def run_shell_script(instance_id, hostname): print(f"Command output:\n{output.get('StandardOutputContent', '')}") + sql_success = False + if output['Status'] == 'Success': print(f"Command status: Success") + # Look for the SQL success marker in the output + output_content = output.get('StandardOutputContent', '') + for line in output_content.splitlines(): + if line.startswith('SQL_SUCCESS_MARKER: '): + exit_code = int(line.split(': ')[1]) + if exit_code == 0: + sql_success = True + break else: print(f"Command failed with status: {output['Status']}") print(f"Error: {output.get('StandardErrorContent', 'No error content available')}") raise Exception(f"Command execution failed with status: {output['Status']}") + return sql_success + def get_windows_hostname(instance_id): commands = [ 'hostname' diff --git a/cdk/cdk-domainless-mode/tests/setup_windows_instance.py b/cdk/cdk-domainless-mode/tests/setup_windows_instance.py index a9fa62a1..3ae688ae 100644 --- a/cdk/cdk-domainless-mode/tests/setup_windows_instance.py +++ b/cdk/cdk-domainless-mode/tests/setup_windows_instance.py @@ -33,29 +33,27 @@ """ def run_powershell_script(instance_id, script_path): + try: + with open(script_path, 'r') as file: + script_content = file.read() + + script_content = script_content.replace("INPUTPASSWORD", domain_admin_password) + script_content = script_content.replace("DOMAINNAME", directory_name) + script_content = script_content.replace("NETBIOS_NAME", netbios_name) + script_content = script_content.replace("NUMBER_OF_GMSA_ACCOUNTS", str(number_of_gmsa_accounts)) + script_content = script_content.replace("BUCKET_NAME", bucket_name) + + ssm = boto3.client('ssm') + + response = ssm.send_command( + InstanceIds=[instance_id], + DocumentName="AWS-RunPowerShellScript", + Parameters={'commands': [script_content]} + ) - with open(script_path, 'r') as file: - script_content = file.read() - - script_content = script_content.replace("INPUTPASSWORD", - domain_admin_password) - script_content = script_content.replace("DOMAINNAME", directory_name) - script_content = script_content.replace("NETBIOS_NAME", netbios_name) - script_content = script_content.replace("NUMBER_OF_GMSA_ACCOUNTS", str(number_of_gmsa_accounts)) - script_content = script_content.replace("BUCKET_NAME", bucket_name) - - ssm = boto3.client('ssm') - - response = ssm.send_command( - InstanceIds=[instance_id], - DocumentName="AWS-RunPowerShellScript", - Parameters={'commands': [script_content]} - ) - - command_id = response['Command']['CommandId'] + command_id = response['Command']['CommandId'] - waiter = ssm.get_waiter('command_executed') - try: + waiter = ssm.get_waiter('command_executed') waiter.wait( CommandId=command_id, InstanceId=instance_id, @@ -64,25 +62,26 @@ def run_powershell_script(instance_id, script_path): 'MaxAttempts': 50 } ) - except Exception as e: - print(f"Command failed: {script_content}") - print(f"Error: {str(e)}") - raise - output = ssm.get_command_invocation( - CommandId=command_id, - InstanceId=instance_id - ) - - print(f"Command output:\n{output.get('StandardOutputContent', '')}") + output = ssm.get_command_invocation( + CommandId=command_id, + InstanceId=instance_id + ) + + print(f"Command output:\n{output.get('StandardOutputContent', '')}") - if output['Status'] == 'Success': - print(f"Command status: Success") - - if output['Status'] != 'Success': + if output['Status'] == 'Success': + print(f"Command status: Success") + return True + else: + print(f"Command failed: {script_content}") + print(f"Error: {output['StandardErrorContent']}") + return False + + except Exception as e: + print(f"An error occurred: {str(e)}") print(f"Command failed: {script_content}") - print(f"Error: {output['StandardErrorContent']}") - raise Exception(f"Command execution failed: {script_content}") + return False def get_instance_id_by_name(region, instance_name): diff --git a/cdk/cdk-domainless-mode/tests/update_inbound_rules.py b/cdk/cdk-domainless-mode/tests/update_inbound_rules.py index 52cca7d2..39dacf36 100644 --- a/cdk/cdk-domainless-mode/tests/update_inbound_rules.py +++ b/cdk/cdk-domainless-mode/tests/update_inbound_rules.py @@ -21,68 +21,67 @@ instance_name = stack_name + "/MyAutoScalingGroup" def add_security_group_to_instance(directory_name, instance_name): - - ds = boto3.client('ds') - ec2 = boto3.client('ec2') - - directories = ds.describe_directories()['DirectoryDescriptions'] - directory = next((d for d in directories if d['Name'] == directory_name), None) - - if not directory: - raise ValueError(f"Directory '{directory_name}' not found") - - directory_id = directory['DirectoryId'] - print(f"Found directory ID: {directory_id}") - - directory_details = ds.describe_directories(DirectoryIds=[directory_id])['DirectoryDescriptions'][0] - security_group_id = directory_details['VpcSettings']['SecurityGroupId'] - - response = ec2.describe_instances( - Filters=[ - { - 'Name': 'tag:Name', - 'Values': [instance_name] - }, - { - 'Name': 'instance-state-name', - 'Values': ['running'] - } - ] - ) - - if not response['Reservations']: - raise ValueError(f"No instances found with tag:Name '{instance_name}'") - - instances = response['Reservations'][0]['Instances'] - if not instances: - raise ValueError(f"No instances found in the reservation") - - instance = instances[0] - - if 'SecurityGroups' not in instance or not instance['SecurityGroups']: - raise ValueError(f"No security groups found for the instance") - - instance_sg_id = instance['SecurityGroups'][0]['GroupId'] - - # Check if the rule already exists - existing_rules = ec2.describe_security_group_rules( - Filters=[{'Name': 'group-id', 'Values': [instance_sg_id]}] - )['SecurityGroupRules'] - - rule_exists = any( - rule['IpProtocol'] == '-1' and - rule['FromPort'] == -1 and - rule['ToPort'] == -1 and - rule.get('ReferencedGroupInfo', {}).get('GroupId') == security_group_id - for rule in existing_rules - ) - - if rule_exists: - print(f"Rule already exists in security group {instance_sg_id}") - return - - # Add the new inbound rule to the security group try: + ds = boto3.client('ds') + ec2 = boto3.client('ec2') + + directories = ds.describe_directories()['DirectoryDescriptions'] + directory = next((d for d in directories if d['Name'] == directory_name), None) + + if not directory: + raise ValueError(f"Directory '{directory_name}' not found") + + directory_id = directory['DirectoryId'] + print(f"Found directory ID: {directory_id}") + + directory_details = ds.describe_directories(DirectoryIds=[directory_id])['DirectoryDescriptions'][0] + security_group_id = directory_details['VpcSettings']['SecurityGroupId'] + + response = ec2.describe_instances( + Filters=[ + { + 'Name': 'tag:Name', + 'Values': [instance_name] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + ) + + if not response['Reservations']: + raise ValueError(f"No instances found with tag:Name '{instance_name}'") + + instances = response['Reservations'][0]['Instances'] + if not instances: + raise ValueError(f"No instances found in the reservation") + + instance = instances[0] + + if 'SecurityGroups' not in instance or not instance['SecurityGroups']: + raise ValueError(f"No security groups found for the instance") + + instance_sg_id = instance['SecurityGroups'][0]['GroupId'] + + # Check if the rule already exists + existing_rules = ec2.describe_security_group_rules( + Filters=[{'Name': 'group-id', 'Values': [instance_sg_id]}] + )['SecurityGroupRules'] + + rule_exists = any( + rule['IpProtocol'] == '-1' and + rule['FromPort'] == -1 and + rule['ToPort'] == -1 and + rule.get('ReferencedGroupInfo', {}).get('GroupId') == security_group_id + for rule in existing_rules + ) + + if rule_exists: + print(f"Rule already exists in security group {instance_sg_id}") + return True + + # Add the new inbound rule to the security group ec2.authorize_security_group_ingress( GroupId=instance_sg_id, IpPermissions=[ @@ -95,6 +94,8 @@ def add_security_group_to_instance(directory_name, instance_name): ] ) print(f"Successfully added inbound rule to security group {instance_sg_id}") + return True + except Exception as e: print(f"An error occurred: {str(e)}") - + return False diff --git a/package/credentials-fetcher.spec b/package/credentials-fetcher.spec index 81b4ee19..ccb4b3b7 100644 --- a/package/credentials-fetcher.spec +++ b/package/credentials-fetcher.spec @@ -1,6 +1,6 @@ %global major_version 1 %global minor_version 3 -%global patch_version 65 +%global patch_version 7 # For handling bump release by rpmdev-bumpspec and mass rebuild %global baserelease 0 @@ -12,7 +12,7 @@ Summary: credentials-fetcher is a daemon that refreshes tickets or tokens License: Apache-2.0 URL: https://github.com/aws/credentials-fetcher -Source0: credentials-fetcher-v.1.3.65.tar.gz +Source0: credentials-fetcher-v.1.3.7.tar.gz BuildRequires: cmake3 make chrpath openldap-clients grpc-devel gcc-c++ glib2-devel jsoncpp-devel BuildRequires: openssl-devel zlib-devel protobuf-devel re2-devel krb5-devel systemd-devel @@ -68,6 +68,10 @@ ctest3 %attr(0755, -, -) %{_sbindir}/krb5.conf %changelog +* Fri Jan 17 2025 Samiullah Mohammed - 1.3.7 +- DNS and associated retries +- Complex DN support + * Mon Jan 29 2024 Sai Kiran Akula - 1.3.6 - Create 1.3.6 release, added input validation