From 16c596873573b955466e98d36b5ba89000f8fb06 Mon Sep 17 00:00:00 2001
From: Oriol <oriol.abadal@mongodb.com>
Date: Tue, 6 Feb 2024 15:52:35 +0100
Subject: [PATCH 01/19] chore: Fixes test for
 federated_settings_identity_provider` in QA environment (#1912)

---
 .../data_source_federated_settings_identity_provider_test.go    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go
index d71fe39d04..d0919456c5 100644
--- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go
+++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go
@@ -28,7 +28,7 @@ func TestAccFederatedSettingsIdentityProviderDS_samlBasic(t *testing.T) {
 					resource.TestCheckResourceAttrSet(resourceName, "acs_url"),
 					resource.TestCheckResourceAttr(resourceName, "display_name", "SAML-test"),
 					resource.TestCheckResourceAttr(resourceName, "protocol", "SAML"),
-					resource.TestCheckResourceAttr(resourceName, "okta_idp_id", "0oa10l49zjuBdjDT1358"),
+					resource.TestCheckResourceAttrSet(resourceName, "okta_idp_id"),
 					resource.TestCheckResourceAttr(resourceName, "idp_id", idpID),
 					resource.TestCheckResourceAttr(resourceName, "federation_settings_id", federatedSettingsID),
 				),

From 0f9607af9cb9bfef6e5a0dfa6e9604160c430ec0 Mon Sep 17 00:00:00 2001
From: nsmith78660 <104450429+nsmith78660@users.noreply.github.com>
Date: Tue, 6 Feb 2024 09:07:25 -0600
Subject: [PATCH 02/19] Close code block with three backticks (#1903)

---
 ...nk_endpoint_service_data_federation_online_archives.markdown | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.markdown b/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.markdown
index 4dcabea393..eeacdd119e 100644
--- a/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.markdown
+++ b/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.markdown
@@ -30,7 +30,7 @@ resource "mongodbatlas_privatelink_endpoint_service_data_federation_online_archi
 data "mongodbatlas_privatelink_endpoint_service_data_federation_online_archives" "test_data_source" {
   project_id = mongodbatlas_project.atlas-project.id
 }
-
+```
 
 
 ## Argument Reference

From e89590148686b5959334f5d84d9c970132209d27 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 7 Feb 2024 11:14:44 +0000
Subject: [PATCH 03/19] chore: Bump tj-actions/verify-changed-files (#1914)

Bumps [tj-actions/verify-changed-files](https://github.com/tj-actions/verify-changed-files) from 5ef175f2fd84957530d0fdd1384a541069e403f2 to 8b28bea118e7723e4672bc7ac323bcd26f271ec4.
- [Release notes](https://github.com/tj-actions/verify-changed-files/releases)
- [Changelog](https://github.com/tj-actions/verify-changed-files/blob/main/HISTORY.md)
- [Commits](https://github.com/tj-actions/verify-changed-files/compare/5ef175f2fd84957530d0fdd1384a541069e403f2...8b28bea118e7723e4672bc7ac323bcd26f271ec4)

---
updated-dependencies:
- dependency-name: tj-actions/verify-changed-files
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 .github/workflows/update-sdk.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/update-sdk.yml b/.github/workflows/update-sdk.yml
index 28d544301e..6d827f07ee 100644
--- a/.github/workflows/update-sdk.yml
+++ b/.github/workflows/update-sdk.yml
@@ -18,7 +18,7 @@ jobs:
       - name: Update files
         run:  make tools update-atlas-sdk
       - name: Verify Changed files
-        uses: tj-actions/verify-changed-files@5ef175f2fd84957530d0fdd1384a541069e403f2
+        uses: tj-actions/verify-changed-files@8b28bea118e7723e4672bc7ac323bcd26f271ec4
         id: verify-changed-files
       - name: Create PR
         uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38

From 9d297a228534b83dba1a4d7245a8090cb18654f0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 7 Feb 2024 11:15:04 +0000
Subject: [PATCH 04/19] chore: Bump marocchino/sticky-pull-request-comment from
 2.8.0 to 2.9.0 (#1916)

Bumps [marocchino/sticky-pull-request-comment](https://github.com/marocchino/sticky-pull-request-comment) from 2.8.0 to 2.9.0.
- [Release notes](https://github.com/marocchino/sticky-pull-request-comment/releases)
- [Commits](https://github.com/marocchino/sticky-pull-request-comment/compare/efaaab3fd41a9c3de579aba759d2552635e590fd...331f8f5b4215f0445d3c07b4967662a32a2d3e31)

---
updated-dependencies:
- dependency-name: marocchino/sticky-pull-request-comment
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 .github/workflows/pull-request-lint.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/pull-request-lint.yml b/.github/workflows/pull-request-lint.yml
index 4d22361798..4defb11153 100644
--- a/.github/workflows/pull-request-lint.yml
+++ b/.github/workflows/pull-request-lint.yml
@@ -43,7 +43,7 @@ jobs:
             The subject "{subject}" found in the pull request title "{title}"
             didn't match the configured pattern. Please ensure that the subject
             starts with an uppercase character.
-      - uses: marocchino/sticky-pull-request-comment@efaaab3fd41a9c3de579aba759d2552635e590fd
+      - uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31
         # When the previous steps fails, the workflow would stop. By adding this
         # condition you can continue the execution with the populated error message.
         if: always() && (steps.lint_pr_title.outputs.error_message != null)
@@ -63,7 +63,7 @@ jobs:
             ```
       # Delete a previous comment when the issue has been resolved
       - if: ${{ steps.lint_pr_title.outputs.error_message == null }}
-        uses: marocchino/sticky-pull-request-comment@efaaab3fd41a9c3de579aba759d2552635e590fd
+        uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31
         with:   
           header: pr-title-lint-error
           delete: true

From dc11559cb79caf0509547ba8aee32309cccad18c Mon Sep 17 00:00:00 2001
From: Oriol <oriol.abadal@mongodb.com>
Date: Wed, 7 Feb 2024 13:54:20 +0100
Subject: [PATCH 05/19] chore: Upgrades
 `privatelink_endpoint_service_data_federation_online_archive` resource to
 auto-generated SDK (#1910)

* rename of methods

* migrate resource to new SDK

* migrate data source

* migration test

* lint

* skip tests

* skip test

* fix

* remove skip test

* enable in CI

* pr comments

* lint
---
 .github/workflows/acceptance-tests-runner.yml |  4 ++
 .github/workflows/acceptance-tests.yml        |  1 +
 .github/workflows/migration-tests.yml         |  1 +
 ..._service_data_federation_online_archive.go | 14 ++--
 ...ice_data_federation_online_archive_test.go |  8 +--
 ...service_data_federation_online_archives.go | 22 +++----
 ...ce_data_federation_online_archives_test.go |  8 +--
 ..._service_data_federation_online_archive.go | 66 ++++++++++---------
 ...ederation_online_archive_migration_test.go | 41 ++++++++++++
 ...ice_data_federation_online_archive_test.go | 38 +++++------
 internal/testutil/mig/pre_check.go            |  6 ++
 11 files changed, 132 insertions(+), 77 deletions(-)
 create mode 100644 internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go

diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml
index 938780945f..0c47d41678 100644
--- a/.github/workflows/acceptance-tests-runner.yml
+++ b/.github/workflows/acceptance-tests-runner.yml
@@ -82,6 +82,9 @@ on:
       mongodb_atlas_federated_org_id:
         type: string
         required: true
+      mongodb_atlas_private_endpoint_id:
+        type: string
+        required: true
     secrets: # all secrets are passed explicitly in this workflow
       mongodb_atlas_public_key:
         required: true
@@ -477,6 +480,7 @@ jobs:
           AWS_SECURITY_GROUP_2: ${{ vars.AWS_SECURITY_GROUP_2 }}
           AWS_VPC_CIDR_BLOCK: ${{ vars.AWS_VPC_CIDR_BLOCK }}
           AWS_VPC_ID: ${{ vars.AWS_VPC_ID }}
+          MONGODB_ATLAS_PRIVATE_ENDPOINT_ID: ${{ inputs.mongodb_atlas_private_endpoint_id }}
           TEST_REGEX: "^TestAccNetwork"
         run: make testacc
   config:
diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml
index bd38509321..a38cd03018 100644
--- a/.github/workflows/acceptance-tests.yml
+++ b/.github/workflows/acceptance-tests.yml
@@ -80,3 +80,4 @@ jobs:
       mongodb_atlas_federated_issuer_uri: ${{ vars.MONGODB_ATLAS_FEDERATED_ISSUER_URI }}
       mongodb_atlas_federated_project_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_FEDERATED_PROJECT_ID_QA || vars.MONGODB_ATLAS_FEDERATED_PROJECT_ID }}
       mongodb_atlas_federated_org_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_FEDERATED_ORG_ID_QA || vars.MONGODB_ATLAS_FEDERATED_ORG_ID }}
+      mongodb_atlas_private_endpoint_id: ${{ vars.MONGODB_ATLAS_PRIVATE_ENDPOINT_ID }}
diff --git a/.github/workflows/migration-tests.yml b/.github/workflows/migration-tests.yml
index 0883012d46..663ff934f5 100644
--- a/.github/workflows/migration-tests.yml
+++ b/.github/workflows/migration-tests.yml
@@ -384,6 +384,7 @@ jobs:
           AWS_VPC_CIDR_BLOCK: ${{ vars.AWS_VPC_CIDR_BLOCK }}
           AWS_VPC_ID: ${{ vars.AWS_VPC_ID }}
           MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }}
+          MONGODB_ATLAS_PRIVATE_ENDPOINT_ID: ${{ vars.MONGODB_ATLAS_PRIVATE_ENDPOINT_ID }}
           TEST_REGEX: "^TestAccMigrationNetwork"
         run: make testacc
   encryption:
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive.go
index 4c25859e4d..e8239cd74e 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive.go
@@ -11,7 +11,7 @@ import (
 
 func DataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveRead,
+		ReadContext: dataSourceRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -37,25 +37,25 @@ func DataSource() *schema.Resource {
 	}
 }
 
-func dataSourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	endopointID := d.Get("endpoint_id").(string)
 
-	privateEndpoint, _, err := conn.DataLakes.GetPrivateLinkEndpoint(context.Background(), projectID, endopointID)
+	privateEndpoint, _, err := connV2.DataFederationApi.GetDataFederationPrivateEndpoint(ctx, projectID, endopointID).Execute()
 	if err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("comment", privateEndpoint.Comment); err != nil {
+	if err := d.Set("comment", privateEndpoint.GetComment()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("provider_name", privateEndpoint.Provider); err != nil {
+	if err := d.Set("provider_name", privateEndpoint.GetProvider()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("type", privateEndpoint.Type); err != nil {
+	if err := d.Set("type", privateEndpoint.GetType()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go
index b8c5e01db1..7d24e4b7f5 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go
@@ -16,12 +16,12 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchiveDS_basic
 	resource.Test(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheck(t); acc.PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccDataSourceMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveConfig(projectID, endpointID),
+				Config: dataSourcesConfigBasic(projectID, endpointID),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveExists(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchive, "project_id", projectID),
 					resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchive, "endpoint_id", endpointID),
 					resource.TestCheckResourceAttrSet(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchive, "comment"),
@@ -33,7 +33,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchiveDS_basic
 	})
 }
 
-func testAccDataSourceMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveConfig(projectID, endpointID string) string {
+func dataSourcesConfigBasic(projectID, endpointID string) string {
 	return fmt.Sprintf(`
 	resource "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" "test" {
 	  project_id				= %[1]q
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go
index 5e6619cb3b..39037770fe 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go
@@ -9,14 +9,14 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/service/datalakepipeline"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 const errorPrivateEndpointServiceDataFederationOnlineArchiveList = "error reading Private Endpoings for projectId %s: %s"
 
 func PluralDataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchivesRead,
+		ReadContext: dataSourcePluralRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -50,16 +50,16 @@ func PluralDataSource() *schema.Resource {
 	}
 }
 
-func dataSourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchivesRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 
-	privateEndpoints, _, err := conn.DataLakes.ListPrivateLinkEndpoint(context.Background(), projectID)
+	privateEndpoints, _, err := connV2.DataFederationApi.ListDataFederationPrivateEndpoints(ctx, projectID).Execute()
 	if err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveList, projectID, err)
 	}
 
-	if err := d.Set("results", flattenPrivateLinkEndpointDataLakeResponse(privateEndpoints.Results)); err != nil {
+	if err := d.Set("results", flattenPrivateLinkEndpointDataLakeResponse(privateEndpoints.GetResults())); err != nil {
 		return diag.FromErr(fmt.Errorf(datalakepipeline.ErrorDataLakeSetting, "results", projectID, err))
 	}
 
@@ -68,7 +68,7 @@ func dataSourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchive
 	return nil
 }
 
-func flattenPrivateLinkEndpointDataLakeResponse(atlasPrivateLinkEndpointDataLakes []*matlas.PrivateLinkEndpointDataLake) []map[string]any {
+func flattenPrivateLinkEndpointDataLakeResponse(atlasPrivateLinkEndpointDataLakes []admin.PrivateNetworkEndpointIdEntry) []map[string]any {
 	if len(atlasPrivateLinkEndpointDataLakes) == 0 {
 		return []map[string]any{}
 	}
@@ -77,10 +77,10 @@ func flattenPrivateLinkEndpointDataLakeResponse(atlasPrivateLinkEndpointDataLake
 
 	for i, atlasPrivateLinkEndpointDataLake := range atlasPrivateLinkEndpointDataLakes {
 		results[i] = map[string]any{
-			"endpoint_id":   atlasPrivateLinkEndpointDataLake.EndpointID,
-			"provider_name": atlasPrivateLinkEndpointDataLake.Provider,
-			"comment":       atlasPrivateLinkEndpointDataLake.Comment,
-			"type":          atlasPrivateLinkEndpointDataLake.Type,
+			"endpoint_id":   atlasPrivateLinkEndpointDataLake.GetEndpointId(),
+			"provider_name": atlasPrivateLinkEndpointDataLake.GetProvider(),
+			"comment":       atlasPrivateLinkEndpointDataLake.GetComment(),
+			"type":          atlasPrivateLinkEndpointDataLake.GetType(),
 		}
 	}
 
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go
index bea55689e2..4081efc3ca 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go
@@ -16,12 +16,12 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchivesDSPlura
 	resource.Test(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheck(t); acc.PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccDataSourceMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchivesConfig(projectID, endpointID),
+				Config: dataSourceConfigBasic(projectID, endpointID),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveExists(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchives, "project_id", projectID),
 					resource.TestCheckResourceAttrSet(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchives, "results.#"),
 				),
@@ -30,7 +30,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchivesDSPlura
 	})
 }
 
-func testAccDataSourceMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchivesConfig(projectID, endpointID string) string {
+func dataSourceConfigBasic(projectID, endpointID string) string {
 	return fmt.Sprintf(`
 	resource "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" "test" {
 	  project_id				= %[1]q
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go
index a66d6487fd..14cf5a9a9f 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go
@@ -8,7 +8,7 @@ import (
 	"strings"
 	"time"
 
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 
 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -26,11 +26,11 @@ const (
 
 func Resource() *schema.Resource {
 	return &schema.Resource{
-		CreateContext: resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveCreate,
-		ReadContext:   resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveRead,
-		DeleteContext: resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveDelete,
+		CreateContext: resourceCreate,
+		ReadContext:   resourceRead,
+		DeleteContext: resourceDelete,
 		Importer: &schema.ResourceImporter{
-			StateContext: resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveImportState,
+			StateContext: resourceImport,
 		},
 		Schema: map[string]*schema.Schema{
 			"project_id": {
@@ -65,12 +65,12 @@ func Resource() *schema.Resource {
 	}
 }
 
-func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	endpointID := d.Get("endpoint_id").(string)
 
-	_, _, err := conn.DataLakes.CreatePrivateLinkEndpoint(ctx, projectID, newPrivateLinkEndpointDataLake(d))
+	_, _, err := connV2.DataFederationApi.CreateDataFederationPrivateEndpoint(ctx, projectID, newPrivateNetworkEndpointIDEntry(d)).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveCreate, projectID, err))
 	}
@@ -80,47 +80,46 @@ func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveCr
 		"endpoint_id": endpointID,
 	}))
 
-	return resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveRead(ctx, d, meta)
+	return resourceRead(ctx, d, meta)
 }
 
-func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
 	endopointID := ids["endpoint_id"]
 
-	privateEndpoint, resp, err := conn.DataLakes.GetPrivateLinkEndpoint(context.Background(), projectID, endopointID)
+	privateEndpoint, resp, err := connV2.DataFederationApi.GetDataFederationPrivateEndpoint(ctx, projectID, endopointID).Execute()
 	if err != nil {
 		if resp != nil && resp.StatusCode == http.StatusNotFound {
 			d.SetId("")
 			return nil
 		}
-
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("comment", privateEndpoint.Comment); err != nil {
+	if err := d.Set("comment", privateEndpoint.GetComment()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("provider_name", privateEndpoint.Provider); err != nil {
+	if err := d.Set("provider_name", privateEndpoint.GetProvider()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
-	if err := d.Set("type", privateEndpoint.Type); err != nil {
+	if err := d.Set("type", privateEndpoint.GetType()); err != nil {
 		return diag.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveRead, endopointID, projectID, err)
 	}
 
 	return nil
 }
 
-func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
 	endpointID := ids["endpoint_id"]
 
-	_, err := conn.DataLakes.DeletePrivateLinkEndpoint(ctx, projectID, endpointID)
+	_, _, err := connV2.DataFederationApi.DeleteDataFederationPrivateEndpoint(ctx, projectID, endpointID).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveDelete, endpointID, projectID, err))
 	}
@@ -130,31 +129,31 @@ func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveDe
 	return nil
 }
 
-func resourceMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID, endpointID, err := splitAtlasPrivatelinkEndpointServiceDataFederationOnlineArchive(d.Id())
 	if err != nil {
 		return nil, err
 	}
 
-	privateEndpoint, _, err := conn.DataLakes.GetPrivateLinkEndpoint(ctx, projectID, endpointID)
+	privateEndpoint, _, err := connV2.DataFederationApi.GetDataFederationPrivateEndpoint(ctx, projectID, endpointID).Execute()
 	if err != nil {
 		return nil, fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveImport, endpointID, projectID, err)
 	}
 
-	if err := d.Set("comment", privateEndpoint.Comment); err != nil {
+	if err := d.Set("comment", privateEndpoint.GetComment()); err != nil {
 		return nil, fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveImport, endpointID, projectID, err)
 	}
 
-	if err := d.Set("provider_name", privateEndpoint.Provider); err != nil {
+	if err := d.Set("provider_name", privateEndpoint.GetProvider()); err != nil {
 		return nil, fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveImport, endpointID, projectID, err)
 	}
 
-	if err := d.Set("type", privateEndpoint.Type); err != nil {
+	if err := d.Set("type", privateEndpoint.GetType()); err != nil {
 		return nil, fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveImport, endpointID, projectID, err)
 	}
 
-	if err := d.Set("endpoint_id", privateEndpoint.EndpointID); err != nil {
+	if err := d.Set("endpoint_id", privateEndpoint.GetEndpointId()); err != nil {
 		return nil, fmt.Errorf(errorPrivateEndpointServiceDataFederationOnlineArchiveImport, endpointID, projectID, err)
 	}
 
@@ -184,18 +183,21 @@ func splitAtlasPrivatelinkEndpointServiceDataFederationOnlineArchive(id string)
 	return
 }
 
-func newPrivateLinkEndpointDataLake(d *schema.ResourceData) *matlas.PrivateLinkEndpointDataLake {
-	out := matlas.PrivateLinkEndpointDataLake{
-		EndpointID: d.Get("endpoint_id").(string),
-		Type:       endpointType,
+func newPrivateNetworkEndpointIDEntry(d *schema.ResourceData) *admin.PrivateNetworkEndpointIdEntry {
+	endpointType := endpointType
+	out := admin.PrivateNetworkEndpointIdEntry{
+		EndpointId: d.Get("endpoint_id").(string),
+		Type:       &endpointType,
 	}
 
 	if v, ok := d.GetOk("comment"); ok {
-		out.Comment = v.(string)
+		comment := v.(string)
+		out.Comment = &comment
 	}
 
 	if v, ok := d.GetOk("provider_name"); ok && v != "" {
-		out.Provider = strings.ToUpper(v.(string))
+		providerName := strings.ToUpper(v.(string))
+		out.Provider = &providerName
 	}
 
 	return &out
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go
new file mode 100644
index 0000000000..f2019eaf2d
--- /dev/null
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go
@@ -0,0 +1,41 @@
+package privatelinkendpointservicedatafederationonlinearchive_test
+
+import (
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+	"github.com/hashicorp/terraform-plugin-testing/plancheck"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig"
+)
+
+func TestAccMigrationNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basic(t *testing.T) {
+	resource.Test(t, resource.TestCase{
+		PreCheck:     func() { mig.PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(t) },
+		CheckDestroy: checkDestroy,
+		Steps: []resource.TestStep{
+			{
+				ExternalProviders: mig.ExternalProviders(),
+				Config:            resourceConfigBasic(projectID, endpointID),
+				Check: resource.ComposeTestCheckFunc(
+					checkExists(resourceName),
+					resource.TestCheckResourceAttr(resourceName, "project_id", projectID),
+					resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID),
+					resource.TestCheckResourceAttrSet(resourceName, "comment"),
+					resource.TestCheckResourceAttrSet(resourceName, "type"),
+					resource.TestCheckResourceAttrSet(resourceName, "provider_name"),
+				),
+			},
+			{
+				ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
+				Config:                   resourceConfigBasic(projectID, endpointID),
+				ConfigPlanChecks: resource.ConfigPlanChecks{
+					PreApply: []plancheck.PlanCheck{
+						acc.DebugPlan(),
+						plancheck.ExpectEmptyPlan(),
+					},
+				},
+			},
+		},
+	})
+}
diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go
index 569f2519a6..c35e366243 100644
--- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go
+++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go
@@ -13,31 +13,31 @@ import (
 )
 
 var (
-	resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive = "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive.test"
-	projectID                                                         = os.Getenv("MONGODB_ATLAS_PROJECT_ID")
-	endpointID                                                        = os.Getenv("MONGODB_ATLAS_PRIVATE_ENDPOINT_ID")
+	resourceName = "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive.test"
+	projectID    = os.Getenv("MONGODB_ATLAS_PROJECT_ID")
+	endpointID   = os.Getenv("MONGODB_ATLAS_PRIVATE_ENDPOINT_ID")
 )
 
 func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basic(t *testing.T) {
 	resource.Test(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheck(t); acc.PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveConfig(projectID, endpointID),
+				Config: resourceConfigBasic(projectID, endpointID),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveExists(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive),
-					resource.TestCheckResourceAttr(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive, "project_id", projectID),
-					resource.TestCheckResourceAttr(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive, "endpoint_id", endpointID),
-					resource.TestCheckResourceAttrSet(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive, "comment"),
-					resource.TestCheckResourceAttrSet(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive, "type"),
-					resource.TestCheckResourceAttrSet(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive, "provider_name"),
+					checkExists(resourceName),
+					resource.TestCheckResourceAttr(resourceName, "project_id", projectID),
+					resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID),
+					resource.TestCheckResourceAttrSet(resourceName, "comment"),
+					resource.TestCheckResourceAttrSet(resourceName, "type"),
+					resource.TestCheckResourceAttrSet(resourceName, "provider_name"),
 				),
 			},
 			{
-				ResourceName:      resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive,
-				ImportStateIdFunc: testAccCheckMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveFunc(resourceNamePrivatelinkEdnpointServiceDataFederationOnlineArchive),
+				ResourceName:      resourceName,
+				ImportStateIdFunc: importStateIDFunc(resourceName),
 				ImportState:       true,
 				ImportStateVerify: true,
 			},
@@ -45,7 +45,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basic(t
 	})
 }
 
-func testAccCheckMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchiveFunc(resourceName string) resource.ImportStateIdFunc {
+func importStateIDFunc(resourceName string) resource.ImportStateIdFunc {
 	return func(s *terraform.State) (string, error) {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -58,13 +58,13 @@ func testAccCheckMongoDBAtlasPrivatelinkEndpointServiceDataFederationOnlineArchi
 	}
 }
 
-func testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveDestroy(s *terraform.State) error {
+func checkDestroy(s *terraform.State) error {
 	for _, rs := range s.RootModule().Resources {
 		if rs.Type != "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" {
 			continue
 		}
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		_, _, err := acc.Conn().DataLakes.GetPrivateLinkEndpoint(context.Background(), ids["project_id"], ids["endpoint_id"])
+		_, _, err := acc.ConnV2().DataFederationApi.GetDataFederationPrivateEndpoint(context.Background(), ids["project_id"], ids["endpoint_id"]).Execute()
 		if err == nil {
 			return fmt.Errorf("Private endpoint service data federation online archive still exists")
 		}
@@ -72,7 +72,7 @@ func testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveDe
 	return nil
 }
 
-func testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveExists(resourceName string) resource.TestCheckFunc {
+func checkExists(resourceName string) resource.TestCheckFunc {
 	return func(s *terraform.State) error {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -82,7 +82,7 @@ func testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveEx
 			return fmt.Errorf("Private endpoint service data federation online archive ID not set")
 		}
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		_, _, err := acc.Conn().DataLakes.GetPrivateLinkEndpoint(context.Background(), ids["project_id"], ids["endpoint_id"])
+		_, _, err := acc.ConnV2().DataFederationApi.GetDataFederationPrivateEndpoint(context.Background(), ids["project_id"], ids["endpoint_id"]).Execute()
 		if err != nil {
 			return err
 		}
@@ -90,7 +90,7 @@ func testAccCheckMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveEx
 	}
 }
 
-func testAccMongoDBAtlasPrivateEndpointServiceDataFederationOnlineArchiveConfig(projectID, endpointID string) string {
+func resourceConfigBasic(projectID, endpointID string) string {
 	return fmt.Sprintf(`
 	resource "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" "test" {
 	  project_id				= %[1]q
diff --git a/internal/testutil/mig/pre_check.go b/internal/testutil/mig/pre_check.go
index 261a10ee21..ca34c9484f 100644
--- a/internal/testutil/mig/pre_check.go
+++ b/internal/testutil/mig/pre_check.go
@@ -34,3 +34,9 @@ func PreCheckAtlasUsername(tb testing.TB) {
 	checkLastVersion(tb)
 	acc.PreCheckAtlasUsername(tb)
 }
+
+func PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(tb testing.TB) {
+	tb.Helper()
+	checkLastVersion(tb)
+	acc.PreCheckPrivateEndpointServiceDataFederationOnlineArchiveRun(tb)
+}

From 3d5c0ee9e6b8cc6a09bf9d2f44857835bea12bd0 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Wed, 7 Feb 2024 16:23:56 +0100
Subject: [PATCH 06/19] remove 1.2.x (#1921)

---
 .github/workflows/test-suite.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml
index f8d84552ad..43bc8730f3 100644
--- a/.github/workflows/test-suite.yml
+++ b/.github/workflows/test-suite.yml
@@ -21,7 +21,7 @@ concurrency:
 jobs:
   versions:
     env:
-      schedule_terraform_matrix: '["1.7.x", "1.2.x"]'
+      schedule_terraform_matrix: '["1.7.x"]'
       schedule_provider_matrix: '[""]' # "" for latest version    
     runs-on: ubuntu-latest
     outputs:

From b17086356a93e50a32dd8259de6c2cbd77d17cff Mon Sep 17 00:00:00 2001
From: Marco Suma <marco.suma@mongodb.com>
Date: Wed, 7 Feb 2024 20:40:09 +0100
Subject: [PATCH 07/19] doc: Generates for v1.15.1 (#1919)

* chore: Generates changelog for v1.15.1

* Update CHANGELOG.md

Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com>

* update with latest changes

* Update CHANGELOG.md

Co-authored-by: zach-carr <54542042+zach-carr@users.noreply.github.com>

---------

Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com>
Co-authored-by: Oriol Arbusi <oriol.abadal@mongodb.com>
Co-authored-by: zach-carr <54542042+zach-carr@users.noreply.github.com>
---
 .github_changelog_generator |  4 ++--
 CHANGELOG.md                | 29 +++++++++++++++++++++++++++--
 2 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/.github_changelog_generator b/.github_changelog_generator
index 94349641cc..ea03c2c63b 100644
--- a/.github_changelog_generator
+++ b/.github_changelog_generator
@@ -1,4 +1,4 @@
-future-release=v1.15.0
-since-tag=v1.14.0
+future-release=v1.15.1
+since-tag=v1.15.0
 date-format=%Y-%m-%d
 base=CHANGELOG.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2c6116abfd..aaffc98400 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,8 +1,33 @@
 # Changelog
 
-## [v1.15.0-pre](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.15.0-pre) (2024-02-01)
+## [v1.15.1](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.15.1) (2024-02-07)
 
-[Full Changelog](https://github.com/mongodb/terraform-provider-mongodbatlas/compare/v1.14.0...v1.15.0-pre)
+[Full Changelog](https://github.com/mongodb/terraform-provider-mongodbatlas/compare/v1.15.0...v1.15.1)
+
+**Bug Fixes**
+
+- fix: Sets `replication_specs` IDs when updating them. [\#1876](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1876) ([marcosuma](https://github.com/marcosuma))
+
+**Internal Improvements**
+
+
+- chore: Upgrades `privatelink_endpoint_service_data_federation_online_archive` resource to auto-generated SDK [\#1910](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1910) ([oarbusi](https://github.com/oarbusi))
+- chore: Fixes test for `federated_settings_identity_provider` in QA environment [\#1912](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1912) ([oarbusi](https://github.com/oarbusi))
+- chore: Upgrades `privatelink_endpoint_serverless` resource to auto-generated SDK [\#1908](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1908) ([oarbusi](https://github.com/oarbusi))
+- chore: Fixes acceptance and migrations tests not running in CI [\#1907](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1907) ([lantoli](https://github.com/lantoli))
+- chore: Upgrades `roles_org_id` resource to auto-generated SDK [\#1906](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1906) ([lantoli](https://github.com/lantoli))
+- chore: Upgrades `teams` resource to auto-generated SDK [\#1905](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1905) ([oarbusi](https://github.com/oarbusi))
+- doc: Fixes `mongodbatlas_privatelink_endpoint_service_data_federation_online_archives` doc [\#1903](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1903) ([nsmith78660](https://github.com/nsmith78660))
+- doc: Fixes some of the typos within the `README.MD` for the PIT example [\#1902](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1902) ([nsmith78660](https://github.com/nsmith78660))
+- chore: Upgrades `private_link_endpoint` resource to auto-generated SDK. [\#1901](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1901) ([marcosuma](https://github.com/marcosuma))
+- test: Enables Acceptance test in CI for `mongodbatlas_federated_settings_identity_provider` [\#1895](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1895) ([oarbusi](https://github.com/oarbusi))
+- chore: Upgrades `x509authentication_database_user` resource to auto-generated SDK [\#1884](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1884) ([lantoli](https://github.com/lantoli))
+- chore: Bump marocchino/sticky-pull-request-comment from 2.8.0 to 2.9.0 [\#1916](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1916) ([dependabot[bot]](https://github.com/apps/dependabot))
+- chore: Bump tj-actions/verify-changed-files [\#1914](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/1914) ([dependabot[bot]](https://github.com/apps/dependabot))
+
+## [v1.15.0](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.15.0) (2024-02-01)
+
+[Full Changelog](https://github.com/mongodb/terraform-provider-mongodbatlas/compare/v1.14.0...v1.15.0)
 
 **Breaking changes:**
 

From fe04219ac8f46ef5c2993f451deb3372b65ac14e Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Thu, 8 Feb 2024 07:03:07 +0100
Subject: [PATCH 08/19] chore: Upgrades serverless_instance resource to
 auto-generated SDK (#1913)

* rename

* migration test

* new sdk in tests

* data source

* plural ds

* read

* import

* resourceRefreshFunc

* fix null reference

* refactor connV2 in plural ds

* create

* delete

* update

* fix PageNum nil reference

* remove unneeded comments

* mig TestStep
---
 internal/common/conversion/flatten_expand.go  |  57 +++
 internal/common/conversion/misc.go            |  16 -
 .../organization/data_source_organization.go  |   3 +-
 .../organization/data_source_organizations.go |  17 +-
 .../data_source_serverless_instance.go        | 159 +++++----
 .../data_source_serverless_instances.go       |  79 ++---
 .../resource_serverless_instance.go           | 332 ++++++++----------
 ...urce_serverless_instance_migration_test.go |  37 ++
 .../resource_serverless_instance_test.go      | 118 +++----
 internal/testutil/acc/serverless.go           |  10 +-
 internal/testutil/mig/test_step.go            |  20 ++
 11 files changed, 444 insertions(+), 404 deletions(-)
 create mode 100644 internal/common/conversion/flatten_expand.go
 create mode 100644 internal/service/serverlessinstance/resource_serverless_instance_migration_test.go
 create mode 100644 internal/testutil/mig/test_step.go

diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go
new file mode 100644
index 0000000000..04b539790a
--- /dev/null
+++ b/internal/common/conversion/flatten_expand.go
@@ -0,0 +1,57 @@
+package conversion
+
+import (
+	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
+)
+
+func FlattenLinks(links []admin.Link) []map[string]any {
+	ret := make([]map[string]any, len(links))
+	for i, link := range links {
+		ret[i] = map[string]any{
+			"href": link.GetHref(),
+			"rel":  link.GetRel(),
+		}
+	}
+	return ret
+}
+
+func FlattenTags(tags []admin.ResourceTag) []map[string]any {
+	ret := make([]map[string]any, len(tags))
+	for i, tag := range tags {
+		ret[i] = map[string]any{
+			"key":   tag.GetKey(),
+			"value": tag.GetValue(),
+		}
+	}
+	return ret
+}
+
+func ExpandTagsFromSetSchema(d *schema.ResourceData) []admin.ResourceTag {
+	list := d.Get("tags").(*schema.Set)
+	ret := make([]admin.ResourceTag, list.Len())
+	for i, item := range list.List() {
+		tag := item.(map[string]any)
+		ret[i] = admin.ResourceTag{
+			Key:   StringPtr(tag["key"].(string)),
+			Value: StringPtr(tag["value"].(string)),
+		}
+	}
+	return ret
+}
+
+func ExpandStringList(list []any) (res []string) {
+	for _, v := range list {
+		res = append(res, v.(string))
+	}
+	return
+}
+
+func ExpandStringListFromSetSchema(set *schema.Set) []string {
+	res := make([]string, set.Len())
+	for i, v := range set.List() {
+		res[i] = v.(string)
+	}
+	return res
+}
diff --git a/internal/common/conversion/misc.go b/internal/common/conversion/misc.go
index f13ed2afef..0441dc55c6 100644
--- a/internal/common/conversion/misc.go
+++ b/internal/common/conversion/misc.go
@@ -4,7 +4,6 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 	"github.com/spf13/cast"
 )
 
@@ -30,18 +29,3 @@ func ValRegion(reg any, opt ...string) (string, error) {
 
 	return strings.ReplaceAll(region, "-", "_"), nil
 }
-
-func ExpandStringList(list []any) (res []string) {
-	for _, v := range list {
-		res = append(res, v.(string))
-	}
-	return
-}
-
-func ExpandStringListFromSetSchema(set *schema.Set) []string {
-	res := make([]string, set.Len())
-	for i, v := range set.List() {
-		res[i] = v.(string)
-	}
-	return res
-}
diff --git a/internal/service/organization/data_source_organization.go b/internal/service/organization/data_source_organization.go
index 5460f6e037..6f08f3791c 100644
--- a/internal/service/organization/data_source_organization.go
+++ b/internal/service/organization/data_source_organization.go
@@ -7,6 +7,7 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
 )
 
@@ -77,7 +78,7 @@ func dataSourceMongoDBAtlasOrganizationRead(ctx context.Context, d *schema.Resou
 		return diag.FromErr(fmt.Errorf("error setting `is_deleted`: %s", err))
 	}
 
-	if err := d.Set("links", flattenOrganizationLinks(organization.GetLinks())); err != nil {
+	if err := d.Set("links", conversion.FlattenLinks(organization.GetLinks())); err != nil {
 		return diag.FromErr(fmt.Errorf("error setting `is_deleted`: %s", err))
 	}
 
diff --git a/internal/service/organization/data_source_organizations.go b/internal/service/organization/data_source_organizations.go
index 78415aa0fd..1dfea9024a 100644
--- a/internal/service/organization/data_source_organizations.go
+++ b/internal/service/organization/data_source_organizations.go
@@ -13,6 +13,7 @@ import (
 	"github.com/mwielbut/pointy"
 
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
 )
 
@@ -122,20 +123,6 @@ func dataSourceMongoDBAtlasOrganizationsRead(ctx context.Context, d *schema.Reso
 	return nil
 }
 
-func flattenOrganizationLinks(links []admin.Link) []map[string]any {
-	linksList := make([]map[string]any, 0)
-
-	for _, link := range links {
-		mLink := map[string]any{
-			"href": link.Href,
-			"rel":  link.Rel,
-		}
-		linksList = append(linksList, mLink)
-	}
-
-	return linksList
-}
-
 func flattenOrganizations(ctx context.Context, conn *admin.APIClient, organizations []admin.AtlasOrganization) []map[string]any {
 	var results []map[string]any
 
@@ -155,7 +142,7 @@ func flattenOrganizations(ctx context.Context, conn *admin.APIClient, organizati
 			"id":                         organization.Id,
 			"name":                       organization.Name,
 			"is_deleted":                 organization.IsDeleted,
-			"links":                      flattenOrganizationLinks(organization.GetLinks()),
+			"links":                      conversion.FlattenLinks(organization.GetLinks()),
 			"api_access_list_required":   settings.ApiAccessListRequired,
 			"multi_factor_auth_required": settings.MultiFactorAuthRequired,
 			"restrict_employee_access":   settings.RestrictEmployeeAccess,
diff --git a/internal/service/serverlessinstance/data_source_serverless_instance.go b/internal/service/serverlessinstance/data_source_serverless_instance.go
index 4f4eda190a..fe16ea9568 100644
--- a/internal/service/serverlessinstance/data_source_serverless_instance.go
+++ b/internal/service/serverlessinstance/data_source_serverless_instance.go
@@ -12,89 +12,12 @@ import (
 
 func DataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasServerlessInstanceRead,
-		Schema:      returnServerlessInstanceDSSchema(),
+		ReadContext: dataSourceRead,
+		Schema:      dataSourceSchema(),
 	}
 }
 
-func dataSourceMongoDBAtlasServerlessInstanceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	// Get client connection.
-	conn := meta.(*config.MongoDBClient).Atlas
-
-	projectID, projectIDOk := d.GetOk("project_id")
-	instanceName, instanceNameOk := d.GetOk("name")
-
-	if !(projectIDOk && instanceNameOk) {
-		return diag.Errorf("project_id and name must be configured")
-	}
-
-	serverlessInstance, _, err := conn.ServerlessInstances.Get(ctx, projectID.(string), instanceName.(string))
-	if err != nil {
-		return diag.Errorf("error getting serverless instance information: %s", err)
-	}
-
-	if err := d.Set("id", serverlessInstance.ID); err != nil {
-		return diag.Errorf("error setting `is` for serverless instance (%s): %s", d.Id(), err)
-	}
-
-	if err := d.Set("provider_settings_backing_provider_name", serverlessInstance.ProviderSettings.BackingProviderName); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_backing_provider_name", d.Id(), err)
-	}
-
-	if err := d.Set("provider_settings_provider_name", serverlessInstance.ProviderSettings.ProviderName); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_provider_name", d.Id(), err)
-	}
-
-	if err := d.Set("provider_settings_region_name", serverlessInstance.ProviderSettings.RegionName); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_region_name", d.Id(), err)
-	}
-
-	if err := d.Set("connection_strings_standard_srv", serverlessInstance.ConnectionStrings.StandardSrv); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_standard_srv", d.Id(), err)
-	}
-
-	if len(serverlessInstance.ConnectionStrings.PrivateEndpoint) > 0 {
-		if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(serverlessInstance.ConnectionStrings.PrivateEndpoint)); err != nil {
-			return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_private_endpoint_srv", d.Id(), err)
-		}
-	}
-
-	if err := d.Set("create_date", serverlessInstance.CreateDate); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "create_date", d.Id(), err)
-	}
-
-	if err := d.Set("mongo_db_version", serverlessInstance.MongoDBVersion); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "mongo_db_version", d.Id(), err)
-	}
-
-	if err := d.Set("links", flattenServerlessInstanceLinks(serverlessInstance.Links)); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "links", d.Id(), err)
-	}
-
-	if err := d.Set("state_name", serverlessInstance.StateName); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "state_name", d.Id(), err)
-	}
-
-	if err := d.Set("termination_protection_enabled", serverlessInstance.TerminationProtectionEnabled); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "termination_protection_enabled", d.Id(), err)
-	}
-
-	if err := d.Set("continuous_backup_enabled", serverlessInstance.ServerlessBackupOptions.ServerlessContinuousBackupEnabled); err != nil {
-		return diag.Errorf(errorServerlessInstanceSetting, "continuous_backup_enabled", d.Id(), err)
-	}
-	if err := d.Set("tags", advancedcluster.FlattenTags(serverlessInstance.Tags)); err != nil {
-		return diag.Errorf(advancedcluster.ErrorClusterAdvancedSetting, "tags", d.Id(), err)
-	}
-
-	d.SetId(conversion.EncodeStateID(map[string]string{
-		"project_id": projectID.(string),
-		"name":       instanceName.(string),
-	}))
-
-	return nil
-}
-
-func returnServerlessInstanceDSSchema() map[string]*schema.Schema {
+func dataSourceSchema() map[string]*schema.Schema {
 	return map[string]*schema.Schema{
 		"id": {
 			Type:     schema.TypeString,
@@ -172,3 +95,79 @@ func returnServerlessInstanceDSSchema() map[string]*schema.Schema {
 		"tags": &advancedcluster.DSTagsSchema,
 	}
 }
+
+func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
+
+	projectID, projectIDOk := d.GetOk("project_id")
+	instanceName, instanceNameOk := d.GetOk("name")
+
+	if !(projectIDOk && instanceNameOk) {
+		return diag.Errorf("project_id and name must be configured")
+	}
+
+	instance, _, err := connV2.ServerlessInstancesApi.GetServerlessInstance(ctx, projectID.(string), instanceName.(string)).Execute()
+	if err != nil {
+		return diag.Errorf("error getting serverless instance information: %s", err)
+	}
+
+	if err := d.Set("id", instance.GetId()); err != nil {
+		return diag.Errorf("error setting `is` for serverless instance (%s): %s", d.Id(), err)
+	}
+
+	if err := d.Set("provider_settings_backing_provider_name", instance.ProviderSettings.GetBackingProviderName()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_backing_provider_name", d.Id(), err)
+	}
+
+	if err := d.Set("provider_settings_provider_name", instance.ProviderSettings.GetProviderName()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_provider_name", d.Id(), err)
+	}
+
+	if err := d.Set("provider_settings_region_name", instance.ProviderSettings.GetRegionName()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_region_name", d.Id(), err)
+	}
+
+	if err := d.Set("connection_strings_standard_srv", instance.ConnectionStrings.GetStandardSrv()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_standard_srv", d.Id(), err)
+	}
+
+	if len(instance.ConnectionStrings.GetPrivateEndpoint()) > 0 {
+		if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(instance.ConnectionStrings.GetPrivateEndpoint())); err != nil {
+			return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_private_endpoint_srv", d.Id(), err)
+		}
+	}
+
+	if err := d.Set("create_date", conversion.TimePtrToStringPtr(instance.CreateDate)); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "create_date", d.Id(), err)
+	}
+
+	if err := d.Set("mongo_db_version", instance.GetMongoDBVersion()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "mongo_db_version", d.Id(), err)
+	}
+
+	if err := d.Set("links", conversion.FlattenLinks(instance.GetLinks())); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "links", d.Id(), err)
+	}
+
+	if err := d.Set("state_name", instance.GetStateName()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "state_name", d.Id(), err)
+	}
+
+	if err := d.Set("termination_protection_enabled", instance.GetTerminationProtectionEnabled()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "termination_protection_enabled", d.Id(), err)
+	}
+
+	if err := d.Set("continuous_backup_enabled", instance.ServerlessBackupOptions.GetServerlessContinuousBackupEnabled()); err != nil {
+		return diag.Errorf(errorServerlessInstanceSetting, "continuous_backup_enabled", d.Id(), err)
+	}
+	if err := d.Set("tags", conversion.FlattenTags(instance.GetTags())); err != nil {
+		return diag.Errorf(advancedcluster.ErrorClusterAdvancedSetting, "tags", d.Id(), err)
+	}
+
+	d.SetId(conversion.EncodeStateID(map[string]string{
+		"project_id": projectID.(string),
+		"name":       instanceName.(string),
+	}))
+
+	return nil
+}
diff --git a/internal/service/serverlessinstance/data_source_serverless_instances.go b/internal/service/serverlessinstance/data_source_serverless_instances.go
index c4b93163b4..13cbfe7b73 100644
--- a/internal/service/serverlessinstance/data_source_serverless_instances.go
+++ b/internal/service/serverlessinstance/data_source_serverless_instances.go
@@ -7,15 +7,14 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/id"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
-	"github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster"
-	"github.com/mongodb/terraform-provider-mongodbatlas/internal/service/ldapverify"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 func PluralDataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasServerlessInstancesRead,
+		ReadContext: dataSourcePluralRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -25,69 +24,66 @@ func PluralDataSource() *schema.Resource {
 				Type:     schema.TypeList,
 				Computed: true,
 				Elem: &schema.Resource{
-					Schema: returnServerlessInstanceDSSchema(),
+					Schema: dataSourceSchema(),
 				},
 			},
 		},
 	}
 }
 
-func dataSourceMongoDBAtlasServerlessInstancesRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID, projectIDOK := d.GetOk("project_id")
-
 	if !(projectIDOK) {
 		return diag.Errorf("project_id must be configured")
 	}
-
-	options := &matlas.ListOptions{
-		ItemsPerPage: 500,
-		IncludeCount: true,
+	options := &admin.ListServerlessInstancesApiParams{
+		ItemsPerPage: conversion.IntPtr(500),
+		IncludeCount: conversion.Pointer(true),
+		GroupId:      projectID.(string),
 	}
 
-	serverlessInstances, err := getServerlessList(ctx, meta, projectID.(string), options, 0)
+	serverlessInstances, err := getServerlessList(ctx, connV2, options, 0)
 	if err != nil {
 		return diag.Errorf("error getting serverless instances information: %s", err)
 	}
 
 	flatServerlessInstances := flattenServerlessInstances(serverlessInstances)
-
 	if err := d.Set("results", flatServerlessInstances); err != nil {
 		return diag.Errorf("error setting `results` for serverless instances: %s", err)
 	}
 
 	d.SetId(id.UniqueId())
-
 	return nil
 }
 
-func getServerlessList(ctx context.Context, meta any, projectID string, options *matlas.ListOptions, obtainedItemsCount int) ([]*matlas.Cluster, error) {
-	// Get client connection.
-	var list []*matlas.Cluster
-	options.PageNum++
-	conn := meta.(*config.MongoDBClient).Atlas
-
-	serverlessInstances, _, err := conn.ServerlessInstances.List(ctx, projectID, options)
+func getServerlessList(ctx context.Context, connV2 *admin.APIClient, options *admin.ListServerlessInstancesApiParams, obtainedItemsCount int) ([]admin.ServerlessInstanceDescription, error) {
+	if options.PageNum == nil {
+		options.PageNum = conversion.IntPtr(1)
+	} else {
+		*options.PageNum++
+	}
+	var list []admin.ServerlessInstanceDescription
+	serverlessInstances, _, err := connV2.ServerlessInstancesApi.ListServerlessInstancesWithParams(ctx, options).Execute()
 	if err != nil {
 		return list, fmt.Errorf("error getting serverless instances information: %s", err)
 	}
 
-	list = append(list, serverlessInstances.Results...)
-	obtainedItemsCount += len(serverlessInstances.Results)
+	list = append(list, serverlessInstances.GetResults()...)
+	obtainedItemsCount += len(serverlessInstances.GetResults())
 
-	if serverlessInstances.TotalCount > options.ItemsPerPage && obtainedItemsCount < serverlessInstances.TotalCount {
-		instances, err := getServerlessList(ctx, meta, projectID, options, obtainedItemsCount)
+	if serverlessInstances.GetTotalCount() > *options.ItemsPerPage && obtainedItemsCount < *serverlessInstances.TotalCount {
+		instances, err := getServerlessList(ctx, connV2, options, obtainedItemsCount)
 		if err != nil {
 			return list, fmt.Errorf("error getting serverless instances information: %s", err)
 		}
 		list = append(list, instances...)
 	}
-
 	return list, nil
 }
 
-func flattenServerlessInstances(serverlessInstances []*matlas.Cluster) []map[string]any {
+func flattenServerlessInstances(serverlessInstances []admin.ServerlessInstanceDescription) []map[string]any {
 	var serverlessInstancesMap []map[string]any
-
 	if len(serverlessInstances) == 0 {
 		return nil
 	}
@@ -95,21 +91,20 @@ func flattenServerlessInstances(serverlessInstances []*matlas.Cluster) []map[str
 
 	for i := range serverlessInstances {
 		serverlessInstancesMap[i] = map[string]any{
-			"connection_strings_standard_srv": serverlessInstances[i].ConnectionStrings.StandardSrv,
-			"create_date":                     serverlessInstances[i].CreateDate,
-			"id":                              serverlessInstances[i].ID,
-			"links":                           ldapverify.FlattenLinks(serverlessInstances[i].Links),
-			"mongo_db_version":                serverlessInstances[i].MongoDBVersion,
-			"name":                            serverlessInstances[i].Name,
-			"provider_settings_backing_provider_name": serverlessInstances[i].ProviderSettings.BackingProviderName,
-			"provider_settings_region_name":           serverlessInstances[i].ProviderSettings.RegionName,
-			"provider_settings_provider_name":         serverlessInstances[i].ProviderSettings.ProviderName,
-			"state_name":                              serverlessInstances[i].StateName,
-			"termination_protection_enabled":          serverlessInstances[i].TerminationProtectionEnabled,
-			"continuous_backup_enabled":               serverlessInstances[i].ServerlessBackupOptions.ServerlessContinuousBackupEnabled,
-			"tags":                                    advancedcluster.FlattenTags(serverlessInstances[i].Tags),
+			"connection_strings_standard_srv": serverlessInstances[i].ConnectionStrings.GetStandardSrv(),
+			"create_date":                     conversion.TimePtrToStringPtr(serverlessInstances[i].CreateDate),
+			"id":                              serverlessInstances[i].GetId(),
+			"links":                           conversion.FlattenLinks(serverlessInstances[i].GetLinks()),
+			"mongo_db_version":                serverlessInstances[i].GetMongoDBVersion(),
+			"name":                            serverlessInstances[i].GetName(),
+			"provider_settings_backing_provider_name": serverlessInstances[i].ProviderSettings.GetBackingProviderName(),
+			"provider_settings_region_name":           serverlessInstances[i].ProviderSettings.GetRegionName(),
+			"provider_settings_provider_name":         serverlessInstances[i].ProviderSettings.GetProviderName(),
+			"state_name":                              serverlessInstances[i].GetStateName(),
+			"termination_protection_enabled":          serverlessInstances[i].GetTerminationProtectionEnabled(),
+			"continuous_backup_enabled":               serverlessInstances[i].ServerlessBackupOptions.GetServerlessContinuousBackupEnabled(),
+			"tags":                                    conversion.FlattenTags(serverlessInstances[i].GetTags()),
 		}
 	}
-
 	return serverlessInstancesMap
 }
diff --git a/internal/service/serverlessinstance/resource_serverless_instance.go b/internal/service/serverlessinstance/resource_serverless_instance.go
index 7af0bd6896..ca167f7f3b 100644
--- a/internal/service/serverlessinstance/resource_serverless_instance.go
+++ b/internal/service/serverlessinstance/resource_serverless_instance.go
@@ -16,7 +16,7 @@ import (
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster"
 	"github.com/mwielbut/pointy"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 const (
@@ -25,63 +25,18 @@ const (
 
 func Resource() *schema.Resource {
 	return &schema.Resource{
-		CreateContext: resourceMongoDBAtlasServerlessInstanceCreate,
-		ReadContext:   resourceMongoDBAtlasServerlessInstanceRead,
-		UpdateContext: resourceMongoDBAtlasServerlessInstanceUpdate,
-		DeleteContext: resourceMongoDBAtlasServerlessInstanceDelete,
+		CreateContext: resourceCreate,
+		ReadContext:   resourceRead,
+		UpdateContext: resourceUpdate,
+		DeleteContext: resourceDelete,
 		Importer: &schema.ResourceImporter{
-			StateContext: resourceMongoDBAtlasServerlessInstanceImportState,
+			StateContext: resourceImport,
 		},
-		Schema: returnServerlessInstanceSchema(),
+		Schema: resourceSchema(),
 	}
 }
 
-func resourceMongoDBAtlasServerlessInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	// Get client connection.
-	conn := meta.(*config.MongoDBClient).Atlas
-	ids := conversion.DecodeStateID(d.Id())
-	projectID := ids["project_id"]
-	instanceName := ids["name"]
-
-	if d.HasChange("termination_protection_enabled") || d.HasChange("continuous_backup_enabled") || d.HasChange("tags") {
-		serverlessBackupOptions := &matlas.ServerlessBackupOptions{
-			ServerlessContinuousBackupEnabled: pointy.Bool(d.Get("continuous_backup_enabled").(bool)),
-		}
-
-		ServerlessUpdateRequestParams := &matlas.ServerlessUpdateRequestParams{
-			ServerlessBackupOptions:      serverlessBackupOptions,
-			TerminationProtectionEnabled: pointy.Bool(d.Get("termination_protection_enabled").(bool)),
-		}
-
-		if d.HasChange("tags") {
-			tags := advancedcluster.ExpandTagSliceFromSetSchema(d)
-			ServerlessUpdateRequestParams.Tag = &tags
-		}
-
-		_, _, err := conn.ServerlessInstances.Update(ctx, projectID, instanceName, ServerlessUpdateRequestParams)
-		if err != nil {
-			return diag.Errorf("error updating serverless instance: %s", err)
-		}
-
-		stateConf := &retry.StateChangeConf{
-			Pending:    []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"},
-			Target:     []string{"IDLE"},
-			Refresh:    resourceServerlessInstanceRefreshFunc(ctx, d.Get("name").(string), projectID, conn),
-			Timeout:    3 * time.Hour,
-			MinTimeout: 1 * time.Minute,
-			Delay:      3 * time.Minute,
-		}
-
-		// Wait, catching any errors
-		_, err = stateConf.WaitForStateContext(ctx)
-		if err != nil {
-			return diag.Errorf("error updating MongoDB Serverless Instance: %s", err)
-		}
-	}
-	return resourceMongoDBAtlasServerlessInstanceRead(ctx, d, meta)
-}
-
-func returnServerlessInstanceSchema() map[string]*schema.Schema {
+func resourceSchema() map[string]*schema.Schema {
 	return map[string]*schema.Schema{
 		"id": {
 			Type:     schema.TypeString,
@@ -165,214 +120,241 @@ func returnServerlessInstanceSchema() map[string]*schema.Schema {
 	}
 }
 
-func resourceMongoDBAtlasServerlessInstanceImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
+	projectID := d.Get("project_id").(string)
 
-	projectID, name, err := splitServerlessInstanceImportID(d.Id())
-	if err != nil {
-		return nil, err
-	}
+	name := d.Get("name").(string)
 
-	u, _, err := conn.ServerlessInstances.Get(ctx, *projectID, *name)
-	if err != nil {
-		return nil, fmt.Errorf("couldn't import cluster %s in project %s, error: %s", *name, *projectID, err)
+	serverlessProviderSettings := admin.ServerlessProviderSettings{
+		BackingProviderName: d.Get("provider_settings_backing_provider_name").(string),
+		ProviderName:        conversion.StringPtr(d.Get("provider_settings_provider_name").(string)),
+		RegionName:          d.Get("provider_settings_region_name").(string),
 	}
 
-	if err := d.Set("project_id", u.GroupID); err != nil {
-		log.Printf(advancedcluster.ErrorClusterSetting, "project_id", u.ID, err)
+	serverlessBackupOptions := &admin.ClusterServerlessBackupOptions{
+		ServerlessContinuousBackupEnabled: pointy.Bool(d.Get("continuous_backup_enabled").(bool)),
 	}
 
-	if err := d.Set("name", u.Name); err != nil {
-		log.Printf(advancedcluster.ErrorClusterSetting, "name", u.ID, err)
+	params := &admin.ServerlessInstanceDescriptionCreate{
+		Name:                         name,
+		ProviderSettings:             serverlessProviderSettings,
+		ServerlessBackupOptions:      serverlessBackupOptions,
+		TerminationProtectionEnabled: pointy.Bool(d.Get("termination_protection_enabled").(bool)),
 	}
 
-	if err := d.Set("continuous_backup_enabled", u.ServerlessBackupOptions.ServerlessContinuousBackupEnabled); err != nil {
-		log.Printf(advancedcluster.ErrorClusterSetting, "continuous_backup_enabled", u.ID, err)
+	if _, ok := d.GetOk("tags"); ok {
+		tags := conversion.ExpandTagsFromSetSchema(d)
+		params.Tags = &tags
 	}
 
-	d.SetId(conversion.EncodeStateID(map[string]string{
-		"project_id": *projectID,
-		"name":       u.Name,
-	}))
-
-	return []*schema.ResourceData{d}, nil
-}
-
-func resourceMongoDBAtlasServerlessInstanceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	// Get client connection.
-	conn := meta.(*config.MongoDBClient).Atlas
-	ids := conversion.DecodeStateID(d.Id())
-	projectID := ids["project_id"]
-	serverlessName := ids["name"]
-
-	_, err := conn.ServerlessInstances.Delete(ctx, projectID, serverlessName)
-
+	_, _, err := connV2.ServerlessInstancesApi.CreateServerlessInstance(ctx, projectID, params).Execute()
 	if err != nil {
-		return diag.FromErr(fmt.Errorf("error deleting MongoDB Serverless Instance (%s): %s", serverlessName, err))
+		return diag.Errorf("error creating serverless instance: %s", err)
 	}
 
-	log.Println("[INFO] Waiting for MongoDB Serverless Instance to be destroyed")
-
 	stateConf := &retry.StateChangeConf{
-		Pending:    []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"},
-		Target:     []string{"DELETED"},
-		Refresh:    resourceServerlessInstanceRefreshFunc(ctx, serverlessName, projectID, conn),
+		Pending:    []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"},
+		Target:     []string{"IDLE"},
+		Refresh:    resourceRefreshFunc(ctx, d.Get("name").(string), projectID, connV2),
 		Timeout:    3 * time.Hour,
-		MinTimeout: 30 * time.Second,
-		Delay:      1 * time.Minute, // Wait 30 secs before starting
+		MinTimeout: 1 * time.Minute,
+		Delay:      3 * time.Minute,
 	}
 
-	// Wait, catching any errors
 	_, err = stateConf.WaitForStateContext(ctx)
 	if err != nil {
-		return diag.FromErr(fmt.Errorf("error deleting MongoDB Serverless Instance (%s): %s", serverlessName, err))
+		return diag.Errorf("error creating MongoDB Serverless Instance: %s", err)
 	}
 
-	return nil
+	d.SetId(conversion.EncodeStateID(map[string]string{
+		"project_id": projectID,
+		"name":       name,
+	}))
+
+	return resourceRead(ctx, d, meta)
 }
 
-func resourceMongoDBAtlasServerlessInstanceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	// Get client connection.
-	conn := meta.(*config.MongoDBClient).Atlas
+func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
 	instanceName := ids["name"]
 
-	serverlessInstance, _, err := conn.ServerlessInstances.Get(ctx, projectID, instanceName)
+	instance, _, err := connV2.ServerlessInstancesApi.GetServerlessInstance(ctx, projectID, instanceName).Execute()
 	if err != nil {
-		// case 404
-		// deleted in the backend case
-		reset := strings.Contains(err.Error(), "404") && !d.IsNewResource()
-
-		if reset {
+		// case 404: deleted in the backend case
+		if strings.Contains(err.Error(), "404") && !d.IsNewResource() {
 			d.SetId("")
 			return nil
 		}
-
 		return diag.Errorf("error getting serverless instance information: %s", err)
 	}
 
-	if err := d.Set("id", serverlessInstance.ID); err != nil {
+	if err := d.Set("id", instance.GetId()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "id", d.Id(), err)
 	}
 
-	if err := d.Set("provider_settings_backing_provider_name", serverlessInstance.ProviderSettings.BackingProviderName); err != nil {
+	if err := d.Set("provider_settings_backing_provider_name", instance.ProviderSettings.GetBackingProviderName()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_backing_provider_name", d.Id(), err)
 	}
 
-	if err := d.Set("provider_settings_provider_name", serverlessInstance.ProviderSettings.ProviderName); err != nil {
+	if err := d.Set("provider_settings_provider_name", instance.ProviderSettings.GetProviderName()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_provider_name", d.Id(), err)
 	}
 
-	if err := d.Set("provider_settings_region_name", serverlessInstance.ProviderSettings.RegionName); err != nil {
+	if err := d.Set("provider_settings_region_name", instance.ProviderSettings.GetRegionName()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "provider_settings_region_name", d.Id(), err)
 	}
 
-	if err := d.Set("connection_strings_standard_srv", serverlessInstance.ConnectionStrings.StandardSrv); err != nil {
+	if err := d.Set("connection_strings_standard_srv", instance.ConnectionStrings.GetStandardSrv()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_standard_srv", d.Id(), err)
 	}
 
-	if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(serverlessInstance.ConnectionStrings.PrivateEndpoint)); err != nil {
+	if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(instance.ConnectionStrings.GetPrivateEndpoint())); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "connection_strings_private_endpoint_srv", d.Id(), err)
 	}
 
-	if err := d.Set("create_date", serverlessInstance.CreateDate); err != nil {
+	if err := d.Set("create_date", conversion.TimePtrToStringPtr(instance.CreateDate)); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "create_date", d.Id(), err)
 	}
 
-	if err := d.Set("mongo_db_version", serverlessInstance.MongoDBVersion); err != nil {
+	if err := d.Set("mongo_db_version", instance.GetMongoDBVersion()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "mongo_db_version", d.Id(), err)
 	}
 
-	if err := d.Set("links", flattenServerlessInstanceLinks(serverlessInstance.Links)); err != nil {
+	if err := d.Set("links", conversion.FlattenLinks(instance.GetLinks())); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "links", d.Id(), err)
 	}
 
-	if err := d.Set("state_name", serverlessInstance.StateName); err != nil {
+	if err := d.Set("state_name", instance.GetStateName()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "state_name", d.Id(), err)
 	}
 
-	if err := d.Set("termination_protection_enabled", serverlessInstance.TerminationProtectionEnabled); err != nil {
+	if err := d.Set("termination_protection_enabled", instance.GetTerminationProtectionEnabled()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "termination_protection_enabled", d.Id(), err)
 	}
 
-	if err := d.Set("continuous_backup_enabled", serverlessInstance.ServerlessBackupOptions.ServerlessContinuousBackupEnabled); err != nil {
+	if err := d.Set("continuous_backup_enabled", instance.ServerlessBackupOptions.GetServerlessContinuousBackupEnabled()); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "continuous_backup_enabled", d.Id(), err)
 	}
 
-	if err := d.Set("tags", advancedcluster.FlattenTags(serverlessInstance.Tags)); err != nil {
+	if err := d.Set("tags", conversion.FlattenTags(instance.GetTags())); err != nil {
 		return diag.Errorf(errorServerlessInstanceSetting, "tags", d.Id(), err)
 	}
 
 	return nil
 }
 
-func resourceMongoDBAtlasServerlessInstanceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	// Get client connection.
-	conn := meta.(*config.MongoDBClient).Atlas
-	projectID := d.Get("project_id").(string)
+func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
+	ids := conversion.DecodeStateID(d.Id())
+	projectID := ids["project_id"]
+	instanceName := ids["name"]
 
-	name := d.Get("name").(string)
+	if d.HasChange("termination_protection_enabled") || d.HasChange("continuous_backup_enabled") || d.HasChange("tags") {
+		serverlessBackupOptions := &admin.ClusterServerlessBackupOptions{
+			ServerlessContinuousBackupEnabled: pointy.Bool(d.Get("continuous_backup_enabled").(bool)),
+		}
 
-	serverlessProviderSettings := &matlas.ServerlessProviderSettings{
-		BackingProviderName: d.Get("provider_settings_backing_provider_name").(string),
-		ProviderName:        d.Get("provider_settings_provider_name").(string),
-		RegionName:          d.Get("provider_settings_region_name").(string),
-	}
+		params := &admin.ServerlessInstanceDescriptionUpdate{
+			ServerlessBackupOptions:      serverlessBackupOptions,
+			TerminationProtectionEnabled: pointy.Bool(d.Get("termination_protection_enabled").(bool)),
+		}
 
-	serverlessBackupOptions := &matlas.ServerlessBackupOptions{
-		ServerlessContinuousBackupEnabled: pointy.Bool(d.Get("continuous_backup_enabled").(bool)),
-	}
+		if d.HasChange("tags") {
+			tags := conversion.ExpandTagsFromSetSchema(d)
+			params.Tags = &tags
+		}
 
-	serverlessInstanceRequest := &matlas.ServerlessCreateRequestParams{
-		Name:                         name,
-		ProviderSettings:             serverlessProviderSettings,
-		ServerlessBackupOptions:      serverlessBackupOptions,
-		TerminationProtectionEnabled: pointy.Bool(d.Get("termination_protection_enabled").(bool)),
-	}
+		_, _, err := connV2.ServerlessInstancesApi.UpdateServerlessInstance(ctx, projectID, instanceName, params).Execute()
+		if err != nil {
+			return diag.Errorf("error updating serverless instance: %s", err)
+		}
 
-	if _, ok := d.GetOk("tags"); ok {
-		tagsSlice := advancedcluster.ExpandTagSliceFromSetSchema(d)
-		serverlessInstanceRequest.Tag = &tagsSlice
+		stateConf := &retry.StateChangeConf{
+			Pending:    []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"},
+			Target:     []string{"IDLE"},
+			Refresh:    resourceRefreshFunc(ctx, d.Get("name").(string), projectID, connV2),
+			Timeout:    3 * time.Hour,
+			MinTimeout: 1 * time.Minute,
+			Delay:      3 * time.Minute,
+		}
+
+		_, err = stateConf.WaitForStateContext(ctx)
+		if err != nil {
+			return diag.Errorf("error updating MongoDB Serverless Instance: %s", err)
+		}
 	}
+	return resourceRead(ctx, d, meta)
+}
 
-	_, _, err := conn.ServerlessInstances.Create(ctx, projectID, serverlessInstanceRequest)
+func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
+	ids := conversion.DecodeStateID(d.Id())
+	projectID := ids["project_id"]
+	serverlessName := ids["name"]
+
+	_, _, err := connV2.ServerlessInstancesApi.DeleteServerlessInstance(ctx, projectID, serverlessName).Execute()
 	if err != nil {
-		return diag.Errorf("error creating serverless instance: %s", err)
+		return diag.FromErr(fmt.Errorf("error deleting MongoDB Serverless Instance (%s): %s", serverlessName, err))
 	}
 
 	stateConf := &retry.StateChangeConf{
-		Pending:    []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"},
-		Target:     []string{"IDLE"},
-		Refresh:    resourceServerlessInstanceRefreshFunc(ctx, d.Get("name").(string), projectID, conn),
+		Pending:    []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"},
+		Target:     []string{"DELETED"},
+		Refresh:    resourceRefreshFunc(ctx, serverlessName, projectID, connV2),
 		Timeout:    3 * time.Hour,
-		MinTimeout: 1 * time.Minute,
-		Delay:      3 * time.Minute,
+		MinTimeout: 30 * time.Second,
+		Delay:      1 * time.Minute,
 	}
 
-	// Wait, catching any errors
 	_, err = stateConf.WaitForStateContext(ctx)
 	if err != nil {
-		return diag.Errorf("error creating MongoDB Serverless Instance: %s", err)
+		return diag.FromErr(fmt.Errorf("error deleting MongoDB Serverless Instance (%s): %s", serverlessName, err))
+	}
+	return nil
+}
+
+func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
+	projectID, name, err := splitImportID(d.Id())
+	if err != nil {
+		return nil, err
+	}
+
+	instance, _, err := connV2.ServerlessInstancesApi.GetServerlessInstance(ctx, *projectID, *name).Execute()
+	if err != nil {
+		return nil, fmt.Errorf("couldn't import cluster %s in project %s, error: %s", *name, *projectID, err)
+	}
+
+	if err := d.Set("project_id", instance.GetGroupId()); err != nil {
+		log.Printf(advancedcluster.ErrorClusterSetting, "project_id", instance.GetId(), err)
+	}
+
+	if err := d.Set("name", instance.GetName()); err != nil {
+		log.Printf(advancedcluster.ErrorClusterSetting, "name", instance.GetId(), err)
+	}
+
+	if err := d.Set("continuous_backup_enabled", instance.ServerlessBackupOptions.GetServerlessContinuousBackupEnabled()); err != nil {
+		log.Printf(advancedcluster.ErrorClusterSetting, "continuous_backup_enabled", instance.GetId(), err)
 	}
 
 	d.SetId(conversion.EncodeStateID(map[string]string{
-		"project_id": projectID,
-		"name":       name,
+		"project_id": *projectID,
+		"name":       instance.GetName(),
 	}))
 
-	return resourceMongoDBAtlasServerlessInstanceRead(ctx, d, meta)
+	return []*schema.ResourceData{d}, nil
 }
 
-func resourceServerlessInstanceRefreshFunc(ctx context.Context, name, projectID string, client *matlas.Client) retry.StateRefreshFunc {
+func resourceRefreshFunc(ctx context.Context, name, projectID string, connV2 *admin.APIClient) retry.StateRefreshFunc {
 	return func() (any, string, error) {
-		c, resp, err := client.ServerlessInstances.Get(ctx, projectID, name)
-
+		instance, resp, err := connV2.ServerlessInstancesApi.GetServerlessInstance(ctx, projectID, name).Execute()
 		if err != nil && strings.Contains(err.Error(), "reset by peer") {
 			return nil, "REPEATING", nil
 		}
-
-		if err != nil && c == nil && resp == nil {
+		if err != nil && instance == nil && resp == nil {
 			return nil, "", err
 		} else if err != nil {
 			if resp.StatusCode == 404 {
@@ -383,38 +365,20 @@ func resourceServerlessInstanceRefreshFunc(ctx context.Context, name, projectID
 			}
 			return nil, "", err
 		}
-
-		if c.StateName != "" {
-			log.Printf("[DEBUG] status for MongoDB Serverless Instance: %s: %s", name, c.StateName)
-		}
-
-		return c, c.StateName, nil
-	}
-}
-
-func flattenServerlessInstanceLinks(links []*matlas.Link) []map[string]any {
-	linksList := make([]map[string]any, 0)
-
-	for _, link := range links {
-		mLink := map[string]any{
-			"href": link.Href,
-			"rel":  link.Rel,
-		}
-		linksList = append(linksList, mLink)
+		stateName := instance.GetStateName()
+		return instance, stateName, nil
 	}
-
-	return linksList
 }
 
-func flattenSRVConnectionString(srvConnectionStringArray []matlas.PrivateEndpoint) []any {
-	srvconnections := make([]any, 0)
-	for _, v := range srvConnectionStringArray {
-		srvconnections = append(srvconnections, v.SRVConnectionString)
+func flattenSRVConnectionString(list []admin.ServerlessConnectionStringsPrivateEndpointList) []any {
+	ret := make([]any, len(list))
+	for i, elm := range list {
+		ret[i] = elm.GetSrvConnectionString()
 	}
-	return srvconnections
+	return ret
 }
 
-func splitServerlessInstanceImportID(id string) (projectID, instanceName *string, err error) {
+func splitImportID(id string) (projectID, instanceName *string, err error) {
 	var re = regexp.MustCompile(`(?s)^([0-9a-fA-F]{24})-(.*)$`)
 	parts := re.FindStringSubmatch(id)
 
diff --git a/internal/service/serverlessinstance/resource_serverless_instance_migration_test.go b/internal/service/serverlessinstance/resource_serverless_instance_migration_test.go
new file mode 100644
index 0000000000..2c134ba6ce
--- /dev/null
+++ b/internal/service/serverlessinstance/resource_serverless_instance_migration_test.go
@@ -0,0 +1,37 @@
+package serverlessinstance_test
+
+import (
+	"os"
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig"
+)
+
+func TestAccMigrationServerlessInstance_basic(t *testing.T) {
+	var (
+		instanceName = acctest.RandomWithPrefix("test-acc-serverless")
+		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
+		projectName  = acctest.RandomWithPrefix("test-acc-serverless")
+		config       = acc.ConfigServerlessInstanceBasic(orgID, projectName, instanceName, true)
+	)
+	resource.ParallelTest(t, resource.TestCase{
+		PreCheck:     func() { mig.PreCheckBasic(t) },
+		CheckDestroy: checkDestroy,
+		Steps: []resource.TestStep{
+			{
+				ExternalProviders: mig.ExternalProviders(),
+				Config:            config,
+				Check: resource.ComposeTestCheckFunc(
+					checkConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName),
+					checkExists(resourceName),
+					resource.TestCheckResourceAttr(resourceName, "name", instanceName),
+					resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
+				),
+			},
+			mig.TestStep(config),
+		},
+	})
+}
diff --git a/internal/service/serverlessinstance/resource_serverless_instance_test.go b/internal/service/serverlessinstance/resource_serverless_instance_test.go
index a3036df125..846d54819f 100644
--- a/internal/service/serverlessinstance/resource_serverless_instance_test.go
+++ b/internal/service/serverlessinstance/resource_serverless_instance_test.go
@@ -11,45 +11,47 @@ import (
 	"github.com/hashicorp/terraform-plugin-testing/terraform"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
+)
+
+const (
+	resourceName         = "mongodbatlas_serverless_instance.test"
+	dataSourceName       = "data.mongodbatlas_serverless_instance.test"
+	dataSourcePluralName = "data.mongodbatlas_serverless_instances.test"
 )
 
 func TestAccServerlessInstance_basic(t *testing.T) {
 	var (
-		serverlessInstance      matlas.Cluster
-		resourceName            = "mongodbatlas_serverless_instance.test"
-		instanceName            = acctest.RandomWithPrefix("test-acc-serverless")
-		orgID                   = os.Getenv("MONGODB_ATLAS_ORG_ID")
-		projectName             = acctest.RandomWithPrefix("test-acc-serverless")
-		datasourceName          = "data.mongodbatlas_serverless_instance.test"
-		datasourceInstancesName = "data.mongodbatlas_serverless_instances.test"
+		instanceName = acctest.RandomWithPrefix("test-acc-serverless")
+		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
+		projectName  = acctest.RandomWithPrefix("test-acc-serverless")
 	)
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasServerlessInstanceDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
 				Config: acc.ConfigServerlessInstanceBasic(orgID, projectName, instanceName, true),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName, &serverlessInstance),
+					checkConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(resourceName, "name", instanceName),
 					resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
-					resource.TestCheckResourceAttrSet(datasourceName, "name"),
-					resource.TestCheckResourceAttrSet(datasourceName, "project_id"),
-					resource.TestCheckResourceAttrSet(datasourceName, "state_name"),
-					resource.TestCheckResourceAttrSet(datasourceName, "create_date"),
-					resource.TestCheckResourceAttrSet(datasourceName, "mongo_db_version"),
-					resource.TestCheckResourceAttrSet(datasourceName, "continuous_backup_enabled"),
-					resource.TestCheckResourceAttrSet(datasourceName, "termination_protection_enabled"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "project_id"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.#"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.0.id"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.0.name"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.0.state_name"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.0.continuous_backup_enabled"),
-					resource.TestCheckResourceAttrSet(datasourceInstancesName, "results.0.termination_protection_enabled"),
-					testAccCheckConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName),
+					resource.TestCheckResourceAttrSet(dataSourceName, "name"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "project_id"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "state_name"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "create_date"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "mongo_db_version"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "continuous_backup_enabled"),
+					resource.TestCheckResourceAttrSet(dataSourceName, "termination_protection_enabled"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "project_id"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.id"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.state_name"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.continuous_backup_enabled"),
+					resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.termination_protection_enabled"),
 				),
 			},
 		},
@@ -58,43 +60,39 @@ func TestAccServerlessInstance_basic(t *testing.T) {
 
 func TestAccServerlessInstance_WithTags(t *testing.T) {
 	var (
-		serverlessInstance      matlas.Cluster
-		resourceName            = "mongodbatlas_serverless_instance.test"
-		instanceName            = acctest.RandomWithPrefix("test-acc-serverless")
-		orgID                   = os.Getenv("MONGODB_ATLAS_ORG_ID")
-		projectName             = acctest.RandomWithPrefix("test-acc-serverless")
-		dataSourceName          = "data.mongodbatlas_serverless_instance.test"
-		dataSourceInstancesName = "data.mongodbatlas_serverless_instances.test"
+		instanceName = acctest.RandomWithPrefix("test-acc-serverless")
+		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
+		projectName  = acctest.RandomWithPrefix("test-acc-serverless")
 	)
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasServerlessInstanceDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []matlas.Tag{}),
+				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []admin.ResourceTag{}),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName, &serverlessInstance),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(resourceName, "name", instanceName),
 					resource.TestCheckResourceAttr(resourceName, "tags.#", "0"),
 					resource.TestCheckResourceAttr(dataSourceName, "tags.#", "0"),
-					resource.TestCheckResourceAttr(dataSourceInstancesName, "results.0.tags.#", "0"),
+					resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "0"),
 				),
 			},
 			{
-				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []matlas.Tag{
+				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []admin.ResourceTag{
 					{
-						Key:   "key 1",
-						Value: "value 1",
+						Key:   conversion.StringPtr("key 1"),
+						Value: conversion.StringPtr("value 1"),
 					},
 					{
-						Key:   "key 2",
-						Value: "value 2",
+						Key:   conversion.StringPtr("key 2"),
+						Value: conversion.StringPtr("value 2"),
 					},
 				},
 				),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName, &serverlessInstance),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(resourceName, "name", instanceName),
 					resource.TestCheckResourceAttr(resourceName, "tags.#", "2"),
 					resource.TestCheckTypeSetElemNestedAttrs(resourceName, "tags.*", acc.ClusterTagsMap1),
@@ -102,28 +100,28 @@ func TestAccServerlessInstance_WithTags(t *testing.T) {
 					resource.TestCheckResourceAttr(dataSourceName, "tags.#", "2"),
 					resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap1),
 					resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap2),
-					resource.TestCheckResourceAttr(dataSourceInstancesName, "results.0.tags.#", "2"),
-					resource.TestCheckTypeSetElemNestedAttrs(dataSourceInstancesName, "results.0.tags.*", acc.ClusterTagsMap1),
-					resource.TestCheckTypeSetElemNestedAttrs(dataSourceInstancesName, "results.0.tags.*", acc.ClusterTagsMap2),
+					resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "2"),
+					resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap1),
+					resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap2),
 				),
 			},
 			{
-				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []matlas.Tag{
+				Config: acc.ConfigServerlessInstanceWithTags(orgID, projectName, instanceName, []admin.ResourceTag{
 					{
-						Key:   "key 3",
-						Value: "value 3",
+						Key:   conversion.StringPtr("key 3"),
+						Value: conversion.StringPtr("value 3"),
 					},
 				},
 				),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName, &serverlessInstance),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttr(resourceName, "name", instanceName),
 					resource.TestCheckResourceAttr(resourceName, "tags.#", "1"),
 					resource.TestCheckTypeSetElemNestedAttrs(resourceName, "tags.*", acc.ClusterTagsMap3),
 					resource.TestCheckResourceAttr(dataSourceName, "tags.#", "1"),
 					resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap3),
-					resource.TestCheckResourceAttr(dataSourceInstancesName, "results.0.tags.#", "1"),
-					resource.TestCheckTypeSetElemNestedAttrs(dataSourceInstancesName, "results.0.tags.*", acc.ClusterTagsMap3),
+					resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "1"),
+					resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap3),
 				),
 			},
 		},
@@ -132,7 +130,6 @@ func TestAccServerlessInstance_WithTags(t *testing.T) {
 
 func TestAccServerlessInstance_importBasic(t *testing.T) {
 	var (
-		resourceName = "mongodbatlas_serverless_instance.test"
 		instanceName = acctest.RandomWithPrefix("test-acc-serverless")
 		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
 		projectName  = acctest.RandomWithPrefix("test-acc-serverless")
@@ -141,14 +138,14 @@ func TestAccServerlessInstance_importBasic(t *testing.T) {
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasServerlessInstanceDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
 				Config: acc.ConfigServerlessInstanceBasic(orgID, projectName, instanceName, true),
 			},
 			{
 				ResourceName:      resourceName,
-				ImportStateIdFunc: testAccCheckMongoDBAtlasServerlessInstanceImportStateIDFunc(resourceName),
+				ImportStateIdFunc: importStateIDFunc(resourceName),
 				ImportState:       true,
 				ImportStateVerify: true,
 			},
@@ -156,7 +153,7 @@ func TestAccServerlessInstance_importBasic(t *testing.T) {
 	})
 }
 
-func testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName string, serverlessInstance *matlas.Cluster) resource.TestCheckFunc {
+func checkExists(resourceName string) resource.TestCheckFunc {
 	return func(s *terraform.State) error {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -166,22 +163,21 @@ func testAccCheckMongoDBAtlasServerlessInstanceExists(resourceName string, serve
 			return fmt.Errorf("no ID is set")
 		}
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		serverlessResponse, _, err := acc.Conn().ServerlessInstances.Get(context.Background(), ids["project_id"], ids["name"])
+		_, _, err := acc.ConnV2().ServerlessInstancesApi.GetServerlessInstance(context.Background(), ids["project_id"], ids["name"]).Execute()
 		if err == nil {
-			*serverlessInstance = *serverlessResponse
 			return nil
 		}
 		return fmt.Errorf("serverless instance (%s) does not exist", ids["name"])
 	}
 }
 
-func testAccCheckMongoDBAtlasServerlessInstanceDestroy(state *terraform.State) error {
+func checkDestroy(state *terraform.State) error {
 	for _, rs := range state.RootModule().Resources {
 		if rs.Type != "mongodbatlas_serverless_instance" {
 			continue
 		}
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		serverlessInstance, _, err := acc.Conn().ServerlessInstances.Get(context.Background(), ids["project_id"], ids["name"])
+		serverlessInstance, _, err := acc.ConnV2().ServerlessInstancesApi.GetServerlessInstance(context.Background(), ids["project_id"], ids["name"]).Execute()
 		if err == nil && serverlessInstance != nil {
 			return fmt.Errorf("serverless instance (%s) still exists", ids["name"])
 		}
@@ -189,7 +185,7 @@ func testAccCheckMongoDBAtlasServerlessInstanceDestroy(state *terraform.State) e
 	return nil
 }
 
-func testAccCheckMongoDBAtlasServerlessInstanceImportStateIDFunc(resourceName string) resource.ImportStateIdFunc {
+func importStateIDFunc(resourceName string) resource.ImportStateIdFunc {
 	return func(s *terraform.State) (string, error) {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -201,7 +197,7 @@ func testAccCheckMongoDBAtlasServerlessInstanceImportStateIDFunc(resourceName st
 	}
 }
 
-func testAccCheckConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName string) resource.TestCheckFunc {
+func checkConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName string) resource.TestCheckFunc {
 	return func(s *terraform.State) error {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
diff --git a/internal/testutil/acc/serverless.go b/internal/testutil/acc/serverless.go
index b182e5e688..dba0ff75db 100644
--- a/internal/testutil/acc/serverless.go
+++ b/internal/testutil/acc/serverless.go
@@ -3,7 +3,7 @@ package acc
 import (
 	"fmt"
 
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 func ConfigServerlessInstanceBasic(orgID, projectName, name string, ignoreConnectionStrings bool) string {
@@ -21,15 +21,15 @@ func ConfigServerlessInstanceBasic(orgID, projectName, name string, ignoreConnec
 	return fmt.Sprintf(serverlessConfig, orgID, projectName, name, lifecycle)
 }
 
-func ConfigServerlessInstanceWithTags(orgID, projectName, name string, tags []matlas.Tag) string {
+func ConfigServerlessInstanceWithTags(orgID, projectName, name string, tags []admin.ResourceTag) string {
 	var tagsConf string
 	for _, label := range tags {
 		tagsConf += fmt.Sprintf(`
 			tags {
-				key   = "%s"
-				value = "%s"
+				key   = %q
+				value = %q
 			}
-		`, label.Key, label.Value)
+		`, label.GetKey(), label.GetValue())
 	}
 	return fmt.Sprintf(serverlessConfig, orgID, projectName, name, tagsConf)
 }
diff --git a/internal/testutil/mig/test_step.go b/internal/testutil/mig/test_step.go
new file mode 100644
index 0000000000..806f349fbb
--- /dev/null
+++ b/internal/testutil/mig/test_step.go
@@ -0,0 +1,20 @@
+package mig
+
+import (
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+	"github.com/hashicorp/terraform-plugin-testing/plancheck"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
+)
+
+func TestStep(config string) resource.TestStep {
+	return resource.TestStep{
+		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
+		Config:                   config,
+		ConfigPlanChecks: resource.ConfigPlanChecks{
+			PreApply: []plancheck.PlanCheck{
+				acc.DebugPlan(),
+				plancheck.ExpectEmptyPlan(),
+			},
+		},
+	}
+}

From 7fd8e7875d6701c2f2831d3258d10ac440a75592 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 12:11:36 +0100
Subject: [PATCH 09/19] rename

---
 .../data_source_data_lake_pipeline.go         | 22 ++---
 .../data_source_data_lake_pipeline_run.go     |  4 +-
 ...data_source_data_lake_pipeline_run_test.go |  4 +-
 .../data_source_data_lake_pipeline_runs.go    |  8 +-
 ...ata_source_data_lake_pipeline_runs_test.go |  4 +-
 .../data_source_data_lake_pipeline_test.go    | 27 +-----
 .../data_source_data_lake_pipelines.go        | 12 +--
 .../data_source_data_lake_pipelines_test.go   |  8 +-
 .../resource_data_lake_pipeline.go            | 88 +++++++++----------
 .../resource_data_lake_pipeline_test.go       | 30 +++++--
 10 files changed, 102 insertions(+), 105 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
index bb154925ec..94b51fe426 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
@@ -13,7 +13,7 @@ import (
 
 func DataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasDataLakePipelineRead,
+		ReadContext: dataSourceRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -119,13 +119,13 @@ func DataSource() *schema.Resource {
 					},
 				},
 			},
-			"snapshots":           dataSourceSchemaDataLakePipelineSnapshots(),
-			"ingestion_schedules": dataSourceSchemaDataLakePipelineIngestionSchedules(),
+			"snapshots":           dataSourceSchemaSnapshots(),
+			"ingestion_schedules": dataSourceSchemaIngestionSchedules(),
 		},
 	}
 }
 
-func dataSourceSchemaDataLakePipelineIngestionSchedules() *schema.Schema {
+func dataSourceSchemaIngestionSchedules() *schema.Schema {
 	return &schema.Schema{
 		Type:     schema.TypeSet,
 		Computed: true,
@@ -156,7 +156,7 @@ func dataSourceSchemaDataLakePipelineIngestionSchedules() *schema.Schema {
 	}
 }
 
-func dataSourceSchemaDataLakePipelineSnapshots() *schema.Schema {
+func dataSourceSchemaSnapshots() *schema.Schema {
 	return &schema.Schema{
 		Type:     schema.TypeSet,
 		Computed: true,
@@ -222,7 +222,7 @@ func dataSourceSchemaDataLakePipelineSnapshots() *schema.Schema {
 	}
 }
 
-func dataSourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
@@ -266,23 +266,23 @@ func setDataLakeResourceData(
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "last_updated_date", pipeline.Name, err))
 	}
 
-	if err := d.Set("sink", flattenDataLakePipelineSink(pipeline.Sink)); err != nil {
+	if err := d.Set("sink", flattenSink(pipeline.Sink)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "sink", pipeline.Name, err))
 	}
 
-	if err := d.Set("source", flattenDataLakePipelineSource(pipeline.Source)); err != nil {
+	if err := d.Set("source", flattenSource(pipeline.Source)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "source", pipeline.Name, err))
 	}
 
-	if err := d.Set("transformations", flattenDataLakePipelineTransformations(pipeline.Transformations)); err != nil {
+	if err := d.Set("transformations", flattenTransformations(pipeline.Transformations)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "transformations", pipeline.Name, err))
 	}
 
-	if err := d.Set("snapshots", flattenDataLakePipelineSnapshots(snapshots.Results)); err != nil {
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "snapshots", pipeline.Name, err))
 	}
 
-	if err := d.Set("ingestion_schedules", flattenDataLakePipelineIngestionSchedules(ingestionSchedules)); err != nil {
+	if err := d.Set("ingestion_schedules", flattenIngestionSchedules(ingestionSchedules)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "ingestion_schedules", pipeline.Name, err))
 	}
 
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
index 05eb2abab4..f288db6fa4 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
@@ -16,7 +16,7 @@ const errorDataLakePipelineRunRead = "error reading MongoDB Atlas DataLake Run (
 
 func DataSourceRun() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasDataLakeRunRead,
+		ReadContext: dataSourceRunRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -87,7 +87,7 @@ func DataSourceRun() *schema.Resource {
 	}
 }
 
-func dataSourceMongoDBAtlasDataLakeRunRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func dataSourceRunRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 	name := d.Get("pipeline_name").(string)
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go
index a38c942395..5d819eb878 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go
@@ -23,7 +23,7 @@ func TestAccDataLakeRunDS_basic(t *testing.T) {
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccMongoDBAtlasDataLakeDataSourcePipelineRunConfig(projectID, pipelineName, runID),
+				Config: configRunDS(projectID, pipelineName, runID),
 				Check: resource.ComposeTestCheckFunc(
 					resource.TestCheckResourceAttrSet(dataSourceName, "project_id"),
 					resource.TestCheckResourceAttr(dataSourceName, "pipeline_name", pipelineName),
@@ -38,7 +38,7 @@ func TestAccDataLakeRunDS_basic(t *testing.T) {
 	})
 }
 
-func testAccMongoDBAtlasDataLakeDataSourcePipelineRunConfig(projectID, pipelineName, runID string) string {
+func configRunDS(projectID, pipelineName, runID string) string {
 	return fmt.Sprintf(`
 
 data "mongodbatlas_data_lake_pipeline_run" "test" {
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
index 57f74615c2..da892bacd9 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
@@ -15,7 +15,7 @@ const errorDataLakePipelineRunList = "error reading MongoDB Atlas DataLake Runs
 
 func PluralDataSourceRun() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasDataLakeRunsRead,
+		ReadContext: dataSourcePluralRunRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -90,7 +90,7 @@ func PluralDataSourceRun() *schema.Resource {
 	}
 }
 
-func dataSourceMongoDBAtlasDataLakeRunsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func dataSourcePluralRunRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 	name := d.Get("pipeline_name").(string)
@@ -100,7 +100,7 @@ func dataSourceMongoDBAtlasDataLakeRunsRead(ctx context.Context, d *schema.Resou
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRunList, projectID, err))
 	}
 
-	if err := d.Set("results", flattenDataLakePipelineRunResult(dataLakeRuns.Results)); err != nil {
+	if err := d.Set("results", flattenRunResult(dataLakeRuns.Results)); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "results", projectID, err))
 	}
 
@@ -109,7 +109,7 @@ func dataSourceMongoDBAtlasDataLakeRunsRead(ctx context.Context, d *schema.Resou
 	return nil
 }
 
-func flattenDataLakePipelineRunResult(datalakePipelineRuns []*matlas.DataLakePipelineRun) []map[string]any {
+func flattenRunResult(datalakePipelineRuns []*matlas.DataLakePipelineRun) []map[string]any {
 	var results []map[string]any
 
 	if len(datalakePipelineRuns) == 0 {
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go
index b261f0b439..cbf9a44826 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go
@@ -22,7 +22,7 @@ func TestAccDataLakeRunDSPlural_basic(t *testing.T) {
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccMongoDBAtlasDataLakeDataSourcePipelineRunsConfig(projectID, pipelineName),
+				Config: configRunDSPlural(projectID, pipelineName),
 				Check: resource.ComposeTestCheckFunc(
 					resource.TestCheckResourceAttrSet(dataSourceName, "project_id"),
 					resource.TestCheckResourceAttr(dataSourceName, "pipeline_name", pipelineName),
@@ -33,7 +33,7 @@ func TestAccDataLakeRunDSPlural_basic(t *testing.T) {
 	})
 }
 
-func testAccMongoDBAtlasDataLakeDataSourcePipelineRunsConfig(projectID, pipelineName string) string {
+func configRunDSPlural(projectID, pipelineName string) string {
 	return fmt.Sprintf(`
 
 data "mongodbatlas_data_lake_pipeline_runs" "test" {
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
index e79e0d2ddf..69dfad0ac4 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
@@ -1,15 +1,12 @@
 package datalakepipeline_test
 
 import (
-	"context"
 	"fmt"
 	"os"
 	"testing"
 
 	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
 	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
-	"github.com/hashicorp/terraform-plugin-testing/terraform"
-	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
 	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
@@ -26,12 +23,12 @@ func TestAccDataLakeDS_basic(t *testing.T) {
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasDataLakePipelineDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccDataSourceMongoDBAtlasDataLakePipelineConfig(orgID, projectName, clusterName, name),
+				Config: configDS(orgID, projectName, clusterName, name),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasDataLakePipelineExists(resourceName, &pipeline),
+					checkExists(resourceName, &pipeline),
 					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
 					resource.TestCheckResourceAttr(resourceName, "name", name),
 					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
@@ -41,23 +38,7 @@ func TestAccDataLakeDS_basic(t *testing.T) {
 	})
 }
 
-func testAccCheckMongoDBAtlasDataLakePipelineDestroy(s *terraform.State) error {
-	for _, rs := range s.RootModule().Resources {
-		if rs.Type != "mongodbatlas_data_lake_pipeline" {
-			continue
-		}
-
-		ids := conversion.DecodeStateID(rs.Primary.ID)
-		// Try to find the data lake pipeline
-		_, _, err := acc.Conn().DataLakePipeline.Get(context.Background(), ids["project_id"], ids["name"])
-		if err == nil {
-			return fmt.Errorf("datalake (%s) still exists", ids["project_id"])
-		}
-	}
-	return nil
-}
-
-func testAccDataSourceMongoDBAtlasDataLakePipelineConfig(orgID, projectName, clusterName, pipelineName string) string {
+func configDS(orgID, projectName, clusterName, pipelineName string) string {
 	return fmt.Sprintf(`
 		resource "mongodbatlas_project" "project" {
 			org_id = %[1]q
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
index 03999b6b92..a4ee56e6cd 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
@@ -15,7 +15,7 @@ const errorDataLakePipelineList = "error creating MongoDB Atlas DataLake Pipelin
 
 func PluralDataSource() *schema.Resource {
 	return &schema.Resource{
-		ReadContext: dataSourceMongoDBAtlasDataLakePipelinesRead,
+		ReadContext: dataSourcePluralRead,
 		Schema: map[string]*schema.Schema{
 			"project_id": {
 				Type:     schema.TypeString,
@@ -25,7 +25,7 @@ func PluralDataSource() *schema.Resource {
 				Type:     schema.TypeList,
 				Computed: true,
 				Elem: &schema.Resource{
-					ReadContext: dataSourceMongoDBAtlasDataLakePipelineRead,
+					ReadContext: dataSourceRead,
 					Schema: map[string]*schema.Schema{
 						"project_id": {
 							Type:     schema.TypeString,
@@ -138,7 +138,7 @@ func PluralDataSource() *schema.Resource {
 	}
 }
 
-func dataSourceMongoDBAtlasDataLakePipelinesRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 
@@ -170,9 +170,9 @@ func flattenDataLakePipelines(peers []*matlas.DataLakePipeline) []map[string]any
 			"created_date":      peers[i].CreatedDate,
 			"last_updated_date": peers[i].LastUpdatedDate,
 			"state":             peers[i].State,
-			"sink":              flattenDataLakePipelineSink(peers[i].Sink),
-			"source":            flattenDataLakePipelineSource(peers[i].Source),
-			"transformations":   flattenDataLakePipelineTransformations(peers[i].Transformations),
+			"sink":              flattenSink(peers[i].Sink),
+			"source":            flattenSource(peers[i].Source),
+			"transformations":   flattenTransformations(peers[i].Transformations),
 		}
 	}
 
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
index 26063917d1..1ad2b9cd60 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
@@ -26,12 +26,12 @@ func TestAccDataLakeDSPlural_basic(t *testing.T) {
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             testAccCheckMongoDBAtlasDataLakePipelineDestroy,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccDataSourceMongoDBAtlasDataLakePipelinesConfig(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName),
+				Config: configDSPlural(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasDataLakePipelineExists(resourceName, &pipeline),
+					checkExists(resourceName, &pipeline),
 					resource.TestCheckResourceAttrSet(dataSourceName, "results.#"),
 					resource.TestCheckResourceAttrSet(dataSourceName, "results.0.name"),
 					resource.TestCheckResourceAttrSet(dataSourceName, "results.0.state"),
@@ -45,7 +45,7 @@ func TestAccDataLakeDSPlural_basic(t *testing.T) {
 	})
 }
 
-func testAccDataSourceMongoDBAtlasDataLakePipelinesConfig(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName string) string {
+func configDSPlural(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName string) string {
 	return fmt.Sprintf(`
 
 		resource "mongodbatlas_project" "project" {
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
index 845994a69f..7b549cd0e4 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
@@ -27,12 +27,12 @@ const (
 
 func Resource() *schema.Resource {
 	return &schema.Resource{
-		CreateContext: resourceMongoDBAtlasDataLakePipelineCreate,
-		ReadContext:   resourceMongoDBAtlasDataLakePipelineRead,
-		UpdateContext: resourceMongoDBAtlasDataLakePipelineUpdate,
-		DeleteContext: resourceMongoDBAtlasDataLakePipelineDelete,
+		CreateContext: resourceCreate,
+		ReadContext:   resourceRead,
+		UpdateContext: resourceUpdate,
+		DeleteContext: resourceDelete,
 		Importer: &schema.ResourceImporter{
-			StateContext: resourceMongoDBAtlasDataLakePipelineImportState,
+			StateContext: resourceImport,
 		},
 		Schema: map[string]*schema.Schema{
 			"project_id": {
@@ -148,13 +148,13 @@ func Resource() *schema.Resource {
 					},
 				},
 			},
-			"snapshots":           schemaDataLakePipelineSnapshots(),
-			"ingestion_schedules": schemaDataLakePipelineIngestionSchedules(),
+			"snapshots":           schemaSnapshots(),
+			"ingestion_schedules": schemaSchedules(),
 		},
 	}
 }
 
-func schemaDataLakePipelineIngestionSchedules() *schema.Schema {
+func schemaSchedules() *schema.Schema {
 	return &schema.Schema{
 		Type:     schema.TypeSet,
 		Computed: true,
@@ -185,7 +185,7 @@ func schemaDataLakePipelineIngestionSchedules() *schema.Schema {
 	}
 }
 
-func schemaDataLakePipelineSnapshots() *schema.Schema {
+func schemaSnapshots() *schema.Schema {
 	return &schema.Schema{
 		Type:     schema.TypeSet,
 		Computed: true,
@@ -255,7 +255,7 @@ func schemaDataLakePipelineSnapshots() *schema.Schema {
 	}
 }
 
-func resourceMongoDBAtlasDataLakePipelineCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
@@ -263,9 +263,9 @@ func resourceMongoDBAtlasDataLakePipelineCreate(ctx context.Context, d *schema.R
 	dataLakePipelineReqBody := &matlas.DataLakePipeline{
 		GroupID:         projectID,
 		Name:            name,
-		Sink:            newDataLakePipelineSink(d),
-		Source:          newDataLakePipelineSource(d),
-		Transformations: newDataLakePipelineTransformation(d),
+		Sink:            newSink(d),
+		Source:          newSource(d),
+		Transformations: newTransformation(d),
 	}
 
 	dataLakePipeline, _, err := conn.DataLakePipeline.Create(ctx, projectID, dataLakePipelineReqBody)
@@ -278,10 +278,10 @@ func resourceMongoDBAtlasDataLakePipelineCreate(ctx context.Context, d *schema.R
 		"name":       dataLakePipeline.Name,
 	}))
 
-	return resourceMongoDBAtlasDataLakePipelineRead(ctx, d, meta)
+	return resourceRead(ctx, d, meta)
 }
 
-func resourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
@@ -313,15 +313,15 @@ func resourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.Res
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "last_updated_date", name, err))
 	}
 
-	if err := d.Set("sink", flattenDataLakePipelineSink(dataLakePipeline.Sink)); err != nil {
+	if err := d.Set("sink", flattenSink(dataLakePipeline.Sink)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "sink", name, err))
 	}
 
-	if err := d.Set("source", flattenDataLakePipelineSource(dataLakePipeline.Source)); err != nil {
+	if err := d.Set("source", flattenSource(dataLakePipeline.Source)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "source", name, err))
 	}
 
-	if err := d.Set("transformations", flattenDataLakePipelineTransformations(dataLakePipeline.Transformations)); err != nil {
+	if err := d.Set("transformations", flattenTransformations(dataLakePipeline.Transformations)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "transformations", name, err))
 	}
 
@@ -330,7 +330,7 @@ func resourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.Res
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	if err := d.Set("snapshots", flattenDataLakePipelineSnapshots(snapshots.Results)); err != nil {
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "snapshots", name, err))
 	}
 
@@ -339,7 +339,7 @@ func resourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.Res
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	if err := d.Set("ingestion_schedules", flattenDataLakePipelineIngestionSchedules(ingestionSchedules)); err != nil {
+	if err := d.Set("ingestion_schedules", flattenIngestionSchedules(ingestionSchedules)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "ingestion_schedules", name, err))
 	}
 
@@ -351,7 +351,7 @@ func resourceMongoDBAtlasDataLakePipelineRead(ctx context.Context, d *schema.Res
 	return nil
 }
 
-func resourceMongoDBAtlasDataLakePipelineUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
@@ -359,9 +359,9 @@ func resourceMongoDBAtlasDataLakePipelineUpdate(ctx context.Context, d *schema.R
 	dataLakePipelineReqBody := &matlas.DataLakePipeline{
 		GroupID:         projectID,
 		Name:            name,
-		Sink:            newDataLakePipelineSink(d),
-		Source:          newDataLakePipelineSource(d),
-		Transformations: newDataLakePipelineTransformation(d),
+		Sink:            newSink(d),
+		Source:          newSource(d),
+		Transformations: newTransformation(d),
 	}
 
 	_, _, err := conn.DataLakePipeline.Update(ctx, projectID, name, dataLakePipelineReqBody)
@@ -369,10 +369,10 @@ func resourceMongoDBAtlasDataLakePipelineUpdate(ctx context.Context, d *schema.R
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineUpdate, err))
 	}
 
-	return resourceMongoDBAtlasDataLakePipelineRead(ctx, d, meta)
+	return resourceRead(ctx, d, meta)
 }
 
-func resourceMongoDBAtlasDataLakePipelineDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
+func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
 	conn := meta.(*config.MongoDBClient).Atlas
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
@@ -386,7 +386,7 @@ func resourceMongoDBAtlasDataLakePipelineDelete(ctx context.Context, d *schema.R
 	return nil
 }
 
-func resourceMongoDBAtlasDataLakePipelineImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
+func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
 	conn := meta.(*config.MongoDBClient).Atlas
 
 	projectID, name, err := splitDataLakePipelineImportID(d.Id())
@@ -423,15 +423,15 @@ func resourceMongoDBAtlasDataLakePipelineImportState(ctx context.Context, d *sch
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "last_updated_date", name, err)
 	}
 
-	if err := d.Set("sink", flattenDataLakePipelineSink(dataLakePipeline.Sink)); err != nil {
+	if err := d.Set("sink", flattenSink(dataLakePipeline.Sink)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "sink", name, err)
 	}
 
-	if err := d.Set("source", flattenDataLakePipelineSource(dataLakePipeline.Source)); err != nil {
+	if err := d.Set("source", flattenSource(dataLakePipeline.Source)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "source", name, err)
 	}
 
-	if err := d.Set("transformations", flattenDataLakePipelineTransformations(dataLakePipeline.Transformations)); err != nil {
+	if err := d.Set("transformations", flattenTransformations(dataLakePipeline.Transformations)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "transformations", name, err)
 	}
 
@@ -445,7 +445,7 @@ func resourceMongoDBAtlasDataLakePipelineImportState(ctx context.Context, d *sch
 		return nil, fmt.Errorf(errorDataLakePipelineImport, name, err)
 	}
 
-	if err := d.Set("snapshots", flattenDataLakePipelineSnapshots(snapshots.Results)); err != nil {
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "snapshots", name, err)
 	}
 
@@ -454,7 +454,7 @@ func resourceMongoDBAtlasDataLakePipelineImportState(ctx context.Context, d *sch
 		return nil, fmt.Errorf(errorDataLakePipelineImport, name, err)
 	}
 
-	if err := d.Set("ingestion_schedules", flattenDataLakePipelineIngestionSchedules(ingestionSchedules)); err != nil {
+	if err := d.Set("ingestion_schedules", flattenIngestionSchedules(ingestionSchedules)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "ingestion_schedules", name, err)
 	}
 
@@ -475,7 +475,7 @@ func splitDataLakePipelineImportID(id string) (projectID, name string, err error
 	return
 }
 
-func newDataLakePipelineSink(d *schema.ResourceData) *matlas.DataLakePipelineSink {
+func newSink(d *schema.ResourceData) *matlas.DataLakePipelineSink {
 	if sink, ok := d.Get("sink").([]any); ok && len(sink) == 1 {
 		sinkMap := sink[0].(map[string]any)
 		dataLakePipelineSink := &matlas.DataLakePipelineSink{}
@@ -492,14 +492,14 @@ func newDataLakePipelineSink(d *schema.ResourceData) *matlas.DataLakePipelineSin
 			dataLakePipelineSink.MetadataRegion = region
 		}
 
-		dataLakePipelineSink.PartitionFields = newDataLakePipelinePartitionField(sinkMap)
+		dataLakePipelineSink.PartitionFields = newPartitionField(sinkMap)
 		return dataLakePipelineSink
 	}
 
 	return nil
 }
 
-func newDataLakePipelinePartitionField(sinkMap map[string]any) []*matlas.DataLakePipelinePartitionField {
+func newPartitionField(sinkMap map[string]any) []*matlas.DataLakePipelinePartitionField {
 	partitionFields, ok := sinkMap["partition_fields"].([]any)
 	if !ok || len(partitionFields) == 0 {
 		return nil
@@ -517,7 +517,7 @@ func newDataLakePipelinePartitionField(sinkMap map[string]any) []*matlas.DataLak
 	return fields
 }
 
-func newDataLakePipelineSource(d *schema.ResourceData) *matlas.DataLakePipelineSource {
+func newSource(d *schema.ResourceData) *matlas.DataLakePipelineSource {
 	source, ok := d.Get("source").([]any)
 	if !ok || len(source) == 0 {
 		return nil
@@ -549,7 +549,7 @@ func newDataLakePipelineSource(d *schema.ResourceData) *matlas.DataLakePipelineS
 	return dataLakePipelineSource
 }
 
-func newDataLakePipelineTransformation(d *schema.ResourceData) []*matlas.DataLakePipelineTransformation {
+func newTransformation(d *schema.ResourceData) []*matlas.DataLakePipelineTransformation {
 	trasformations, ok := d.Get("transformations").([]any)
 	if !ok || len(trasformations) == 0 {
 		return nil
@@ -576,7 +576,7 @@ func newDataLakePipelineTransformation(d *schema.ResourceData) []*matlas.DataLak
 	return dataLakePipelineTransformations
 }
 
-func flattenDataLakePipelineSource(atlasPipelineSource *matlas.DataLakePipelineSource) []map[string]any {
+func flattenSource(atlasPipelineSource *matlas.DataLakePipelineSource) []map[string]any {
 	if atlasPipelineSource == nil {
 		return nil
 	}
@@ -592,7 +592,7 @@ func flattenDataLakePipelineSource(atlasPipelineSource *matlas.DataLakePipelineS
 	}
 }
 
-func flattenDataLakePipelineSink(atlasPipelineSink *matlas.DataLakePipelineSink) []map[string]any {
+func flattenSink(atlasPipelineSink *matlas.DataLakePipelineSink) []map[string]any {
 	if atlasPipelineSink == nil {
 		return nil
 	}
@@ -602,12 +602,12 @@ func flattenDataLakePipelineSink(atlasPipelineSink *matlas.DataLakePipelineSink)
 			"type":             atlasPipelineSink.Type,
 			"provider":         atlasPipelineSink.MetadataProvider,
 			"region":           atlasPipelineSink.MetadataRegion,
-			"partition_fields": flattenDataLakePipelinePartitionFields(atlasPipelineSink.PartitionFields),
+			"partition_fields": flattenPartitionFields(atlasPipelineSink.PartitionFields),
 		},
 	}
 }
 
-func flattenDataLakePipelineIngestionSchedules(atlasPipelineIngestionSchedules []*matlas.DataLakePipelineIngestionSchedule) []map[string]any {
+func flattenIngestionSchedules(atlasPipelineIngestionSchedules []*matlas.DataLakePipelineIngestionSchedule) []map[string]any {
 	if len(atlasPipelineIngestionSchedules) == 0 {
 		return nil
 	}
@@ -626,7 +626,7 @@ func flattenDataLakePipelineIngestionSchedules(atlasPipelineIngestionSchedules [
 	return out
 }
 
-func flattenDataLakePipelineSnapshots(snapshots []*matlas.DataLakePipelineSnapshot) []map[string]any {
+func flattenSnapshots(snapshots []*matlas.DataLakePipelineSnapshot) []map[string]any {
 	if len(snapshots) == 0 {
 		return nil
 	}
@@ -652,7 +652,7 @@ func flattenDataLakePipelineSnapshots(snapshots []*matlas.DataLakePipelineSnapsh
 	return out
 }
 
-func flattenDataLakePipelineTransformations(atlasPipelineTransformation []*matlas.DataLakePipelineTransformation) []map[string]any {
+func flattenTransformations(atlasPipelineTransformation []*matlas.DataLakePipelineTransformation) []map[string]any {
 	if len(atlasPipelineTransformation) == 0 {
 		return nil
 	}
@@ -667,7 +667,7 @@ func flattenDataLakePipelineTransformations(atlasPipelineTransformation []*matla
 	return out
 }
 
-func flattenDataLakePipelinePartitionFields(atlasDataLakePipelinePartitionFields []*matlas.DataLakePipelinePartitionField) []map[string]any {
+func flattenPartitionFields(atlasDataLakePipelinePartitionFields []*matlas.DataLakePipelinePartitionField) []map[string]any {
 	if len(atlasDataLakePipelinePartitionFields) == 0 {
 		return nil
 	}
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
index d26152ccbb..6aa6e17ff5 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
@@ -26,12 +26,12 @@ func TestAccDataLakePipeline_basic(t *testing.T) {
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:                 func() { acc.PreCheckBasic(t) },
 		ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-		CheckDestroy:             acc.CheckDestroySearchIndex,
+		CheckDestroy:             checkDestroy,
 		Steps: []resource.TestStep{
 			{
-				Config: testAccMongoDBAtlasDataLakePipelineConfig(orgID, projectName, clusterName, name),
+				Config: configBasic(orgID, projectName, clusterName, name),
 				Check: resource.ComposeTestCheckFunc(
-					testAccCheckMongoDBAtlasDataLakePipelineExists(resourceName, &pipeline),
+					checkExists(resourceName, &pipeline),
 					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
 					resource.TestCheckResourceAttr(resourceName, "name", name),
 					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
@@ -39,7 +39,7 @@ func TestAccDataLakePipeline_basic(t *testing.T) {
 			},
 			{
 				ResourceName:      resourceName,
-				ImportStateIdFunc: testAccCheckMongoDBAtlasDataLakePipelineImportStateIDFunc(resourceName),
+				ImportStateIdFunc: importStateIDFunc(resourceName),
 				ImportState:       true,
 				ImportStateVerify: true,
 			},
@@ -47,7 +47,7 @@ func TestAccDataLakePipeline_basic(t *testing.T) {
 	})
 }
 
-func testAccCheckMongoDBAtlasDataLakePipelineImportStateIDFunc(resourceName string) resource.ImportStateIdFunc {
+func importStateIDFunc(resourceName string) resource.ImportStateIdFunc {
 	return func(s *terraform.State) (string, error) {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -60,7 +60,23 @@ func testAccCheckMongoDBAtlasDataLakePipelineImportStateIDFunc(resourceName stri
 	}
 }
 
-func testAccCheckMongoDBAtlasDataLakePipelineExists(resourceName string, pipeline *matlas.DataLakePipeline) resource.TestCheckFunc {
+func checkDestroy(s *terraform.State) error {
+	for _, rs := range s.RootModule().Resources {
+		if rs.Type != "mongodbatlas_data_lake_pipeline" {
+			continue
+		}
+
+		ids := conversion.DecodeStateID(rs.Primary.ID)
+		// Try to find the data lake pipeline
+		_, _, err := acc.Conn().DataLakePipeline.Get(context.Background(), ids["project_id"], ids["name"])
+		if err == nil {
+			return fmt.Errorf("datalake (%s) still exists", ids["project_id"])
+		}
+	}
+	return nil
+}
+
+func checkExists(resourceName string, pipeline *matlas.DataLakePipeline) resource.TestCheckFunc {
 	return func(s *terraform.State) error {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -79,7 +95,7 @@ func testAccCheckMongoDBAtlasDataLakePipelineExists(resourceName string, pipelin
 	}
 }
 
-func testAccMongoDBAtlasDataLakePipelineConfig(orgID, projectName, clusterName, pipelineName string) string {
+func configBasic(orgID, projectName, clusterName, pipelineName string) string {
 	return fmt.Sprintf(`
 
 		resource "mongodbatlas_project" "project" {

From d3f3dcdd8333f262302463fbe5fabdc28b13bc5a Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 12:39:04 +0100
Subject: [PATCH 10/19] change region to avoid out of capacity errors

---
 .../datalakepipeline/data_source_data_lake_pipeline_test.go   | 2 +-
 .../datalakepipeline/data_source_data_lake_pipelines_test.go  | 4 ++--
 .../datalakepipeline/resource_data_lake_pipeline_test.go      | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
index 69dfad0ac4..fb163e8c34 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
@@ -58,7 +58,7 @@ func configDS(orgID, projectName, clusterName, pipelineName string) string {
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "US_EAST_1"
+				region_name   = "EU_WEST_1"
 			}
 			}
 			backup_enabled               = true
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
index 1ad2b9cd60..875e91d6f9 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
@@ -67,7 +67,7 @@ func configDSPlural(orgID, projectName, firstClusterName, secondClusterName, fir
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "US_EAST_1"
+				region_name   = "EU_WEST_1"
 			}
 			}
 			backup_enabled               = true
@@ -86,7 +86,7 @@ func configDSPlural(orgID, projectName, firstClusterName, secondClusterName, fir
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "US_EAST_1"
+				region_name   = "EU_WEST_1"
 			}
 			}
 			backup_enabled               = true
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
index 6aa6e17ff5..f7b559c74d 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
@@ -116,7 +116,7 @@ func configBasic(orgID, projectName, clusterName, pipelineName string) string {
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "US_EAST_1"
+				region_name   = "EU_WEST_1"
 			  }
 			}
 			backup_enabled               = true

From fe59660c2e07b93d494ef4c82a46e82b40b3967f Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 15:48:10 +0100
Subject: [PATCH 11/19] plural datasource doesn't depend on resources so we can
 ensure the 2 resources are created when ds is executed

---
 .../data_source_data_lake_pipelines_test.go                 | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
index 875e91d6f9..2ec923a000 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
@@ -33,12 +33,6 @@ func TestAccDataLakeDSPlural_basic(t *testing.T) {
 				Check: resource.ComposeTestCheckFunc(
 					checkExists(resourceName, &pipeline),
 					resource.TestCheckResourceAttrSet(dataSourceName, "results.#"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.0.name"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.0.state"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.0.project_id"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.1.name"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.1.state"),
-					resource.TestCheckResourceAttrSet(dataSourceName, "results.1.project_id"),
 				),
 			},
 		},

From 72d3ae7ca7a92973ba4b95800a718877bcf078b0 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 17:10:08 +0100
Subject: [PATCH 12/19] Revert "change region to avoid out of capacity errors"

This reverts commit 7c7882306d8f62ddd7f2ea4c97cc8b8a2523aac7.
---
 .../datalakepipeline/data_source_data_lake_pipeline_test.go   | 2 +-
 .../datalakepipeline/data_source_data_lake_pipelines_test.go  | 4 ++--
 .../datalakepipeline/resource_data_lake_pipeline_test.go      | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
index fb163e8c34..69dfad0ac4 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
@@ -58,7 +58,7 @@ func configDS(orgID, projectName, clusterName, pipelineName string) string {
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "EU_WEST_1"
+				region_name   = "US_EAST_1"
 			}
 			}
 			backup_enabled               = true
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
index 2ec923a000..61e70a2894 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
@@ -61,7 +61,7 @@ func configDSPlural(orgID, projectName, firstClusterName, secondClusterName, fir
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "EU_WEST_1"
+				region_name   = "US_EAST_1"
 			}
 			}
 			backup_enabled               = true
@@ -80,7 +80,7 @@ func configDSPlural(orgID, projectName, firstClusterName, secondClusterName, fir
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "EU_WEST_1"
+				region_name   = "US_EAST_1"
 			}
 			}
 			backup_enabled               = true
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
index f7b559c74d..6aa6e17ff5 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
@@ -116,7 +116,7 @@ func configBasic(orgID, projectName, clusterName, pipelineName string) string {
 				}
 				provider_name = "AWS"
 				priority      = 7
-				region_name   = "EU_WEST_1"
+				region_name   = "US_EAST_1"
 			  }
 			}
 			backup_enabled               = true

From 9c6f7356db377181f8276143e39050ed1b9f52d3 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 17:18:04 +0100
Subject: [PATCH 13/19] connv2 in tests

---
 .../data_source_data_lake_pipeline_test.go           |  4 +---
 .../data_source_data_lake_pipelines_test.go          |  4 +---
 .../resource_data_lake_pipeline_test.go              | 12 ++++--------
 3 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
index 69dfad0ac4..e216352de2 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_test.go
@@ -8,12 +8,10 @@ import (
 	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
 	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
 func TestAccDataLakeDS_basic(t *testing.T) {
 	var (
-		pipeline     matlas.DataLakePipeline
 		resourceName = "mongodbatlas_data_lake_pipeline.test"
 		clusterName  = acctest.RandomWithPrefix("test-acc-index")
 		name         = acctest.RandomWithPrefix("test-acc-index")
@@ -28,7 +26,7 @@ func TestAccDataLakeDS_basic(t *testing.T) {
 			{
 				Config: configDS(orgID, projectName, clusterName, name),
 				Check: resource.ComposeTestCheckFunc(
-					checkExists(resourceName, &pipeline),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
 					resource.TestCheckResourceAttr(resourceName, "name", name),
 					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
index 61e70a2894..1558523789 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines_test.go
@@ -8,12 +8,10 @@ import (
 	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
 	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
 func TestAccDataLakeDSPlural_basic(t *testing.T) {
 	var (
-		pipeline           matlas.DataLakePipeline
 		resourceName       = "mongodbatlas_data_lake_pipeline.test"
 		dataSourceName     = "data.mongodbatlas_data_lake_pipelines.testDataSource"
 		firstClusterName   = acctest.RandomWithPrefix("test-acc-index")
@@ -31,7 +29,7 @@ func TestAccDataLakeDSPlural_basic(t *testing.T) {
 			{
 				Config: configDSPlural(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName),
 				Check: resource.ComposeTestCheckFunc(
-					checkExists(resourceName, &pipeline),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttrSet(dataSourceName, "results.#"),
 				),
 			},
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
index 6aa6e17ff5..82584490ed 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go
@@ -11,12 +11,10 @@ import (
 	"github.com/hashicorp/terraform-plugin-testing/terraform"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
 func TestAccDataLakePipeline_basic(t *testing.T) {
 	var (
-		pipeline     matlas.DataLakePipeline
 		resourceName = "mongodbatlas_data_lake_pipeline.test"
 		clusterName  = acctest.RandomWithPrefix("test-acc-index")
 		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
@@ -31,7 +29,7 @@ func TestAccDataLakePipeline_basic(t *testing.T) {
 			{
 				Config: configBasic(orgID, projectName, clusterName, name),
 				Check: resource.ComposeTestCheckFunc(
-					checkExists(resourceName, &pipeline),
+					checkExists(resourceName),
 					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
 					resource.TestCheckResourceAttr(resourceName, "name", name),
 					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
@@ -67,8 +65,7 @@ func checkDestroy(s *terraform.State) error {
 		}
 
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		// Try to find the data lake pipeline
-		_, _, err := acc.Conn().DataLakePipeline.Get(context.Background(), ids["project_id"], ids["name"])
+		_, _, err := acc.ConnV2().DataLakePipelinesApi.GetPipeline(context.Background(), ids["project_id"], ids["name"]).Execute()
 		if err == nil {
 			return fmt.Errorf("datalake (%s) still exists", ids["project_id"])
 		}
@@ -76,7 +73,7 @@ func checkDestroy(s *terraform.State) error {
 	return nil
 }
 
-func checkExists(resourceName string, pipeline *matlas.DataLakePipeline) resource.TestCheckFunc {
+func checkExists(resourceName string) resource.TestCheckFunc {
 	return func(s *terraform.State) error {
 		rs, ok := s.RootModule().Resources[resourceName]
 		if !ok {
@@ -86,9 +83,8 @@ func checkExists(resourceName string, pipeline *matlas.DataLakePipeline) resourc
 			return fmt.Errorf("no ID is set")
 		}
 		ids := conversion.DecodeStateID(rs.Primary.ID)
-		response, _, err := acc.Conn().DataLakePipeline.Get(context.Background(), ids["project_id"], ids["name"])
+		_, _, err := acc.ConnV2().DataLakePipelinesApi.GetPipeline(context.Background(), ids["project_id"], ids["name"]).Execute()
 		if err == nil {
-			*pipeline = *response
 			return nil
 		}
 		return fmt.Errorf("DataLake pipeline (%s) does not exist", ids["name"])

From 9eecfd98b5ca89606e20e7a9cf6292856dc2f067 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 18:04:40 +0100
Subject: [PATCH 14/19] data sources, read, import

---
 .../data_source_data_lake_pipeline.go         |  51 +++---
 .../data_source_data_lake_pipelines.go        |  31 ++--
 .../resource_data_lake_pipeline.go            | 159 +++++++++---------
 3 files changed, 112 insertions(+), 129 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
index 94b51fe426..ff7a6f4cc4 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline.go
@@ -8,7 +8,6 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
 func DataSource() *schema.Resource {
@@ -223,72 +222,66 @@ func dataSourceSchemaSnapshots() *schema.Schema {
 }
 
 func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
 
-	dataLakePipeline, _, err := conn.DataLakePipeline.Get(ctx, projectID, name)
+	pipeline, _, err := connV2.DataLakePipelinesApi.GetPipeline(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	snapshots, _, err := conn.DataLakePipeline.ListSnapshots(ctx, projectID, name, nil)
+	snapshots, _, err := connV2.DataLakePipelinesApi.ListPipelineSnapshots(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	ingestionSchedules, _, err := conn.DataLakePipeline.ListIngestionSchedules(ctx, projectID, name)
+	ingestionSchedules, _, err := connV2.DataLakePipelinesApi.ListPipelineSchedules(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	return setDataLakeResourceData(d, dataLakePipeline, snapshots, ingestionSchedules)
-}
+	pipelineName := pipeline.GetName()
 
-func setDataLakeResourceData(
-	d *schema.ResourceData,
-	pipeline *matlas.DataLakePipeline,
-	snapshots *matlas.DataLakePipelineSnapshotsResponse,
-	ingestionSchedules []*matlas.DataLakePipelineIngestionSchedule) diag.Diagnostics {
-	if err := d.Set("id", pipeline.ID); err != nil {
-		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "id", pipeline.Name, err))
+	if err := d.Set("id", pipeline.GetId()); err != nil {
+		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "id", pipelineName, err))
 	}
 
-	if err := d.Set("state", pipeline.State); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "state", pipeline.Name, err))
+	if err := d.Set("state", pipeline.GetState()); err != nil {
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "state", pipelineName, err))
 	}
 
-	if err := d.Set("created_date", pipeline.CreatedDate); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "created_date", pipeline.Name, err))
+	if err := d.Set("created_date", conversion.TimePtrToStringPtr(pipeline.CreatedDate)); err != nil {
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "created_date", pipelineName, err))
 	}
 
-	if err := d.Set("last_updated_date", pipeline.LastUpdatedDate); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "last_updated_date", pipeline.Name, err))
+	if err := d.Set("last_updated_date", conversion.TimePtrToStringPtr(pipeline.LastUpdatedDate)); err != nil {
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "last_updated_date", pipelineName, err))
 	}
 
 	if err := d.Set("sink", flattenSink(pipeline.Sink)); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "sink", pipeline.Name, err))
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "sink", pipelineName, err))
 	}
 
 	if err := d.Set("source", flattenSource(pipeline.Source)); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "source", pipeline.Name, err))
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "source", pipelineName, err))
 	}
 
-	if err := d.Set("transformations", flattenTransformations(pipeline.Transformations)); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "transformations", pipeline.Name, err))
+	if err := d.Set("transformations", flattenTransformations(pipeline.GetTransformations())); err != nil {
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "transformations", pipelineName, err))
 	}
 
-	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "snapshots", pipeline.Name, err))
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.GetResults())); err != nil {
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "snapshots", pipelineName, err))
 	}
 
 	if err := d.Set("ingestion_schedules", flattenIngestionSchedules(ingestionSchedules)); err != nil {
-		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "ingestion_schedules", pipeline.Name, err))
+		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "ingestion_schedules", pipelineName, err))
 	}
 
 	d.SetId(conversion.EncodeStateID(map[string]string{
-		"project_id": pipeline.GroupID,
-		"name":       pipeline.Name,
+		"project_id": pipeline.GetGroupId(),
+		"name":       pipelineName,
 	}))
 
 	return nil
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
index a4ee56e6cd..d8bd9fa118 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go
@@ -7,8 +7,9 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/id"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 const errorDataLakePipelineList = "error creating MongoDB Atlas DataLake Pipelines: %s"
@@ -139,42 +140,36 @@ func PluralDataSource() *schema.Resource {
 }
 
 func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 
-	dataLakePipelines, _, err := conn.DataLakePipeline.List(ctx, projectID)
+	pipelines, _, err := connV2.DataLakePipelinesApi.ListPipelines(ctx, projectID).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineList, err))
 	}
 
-	if err := d.Set("results", flattenDataLakePipelines(dataLakePipelines)); err != nil {
+	if err := d.Set("results", flattenDataLakePipelines(pipelines)); err != nil {
 		return diag.FromErr(fmt.Errorf("error setting `result` for DataLake Pipelines: %s", err))
 	}
 
 	d.SetId(id.UniqueId())
-
 	return nil
 }
 
-func flattenDataLakePipelines(peers []*matlas.DataLakePipeline) []map[string]any {
-	if len(peers) == 0 {
-		return nil
-	}
-
+func flattenDataLakePipelines(peers []admin.DataLakeIngestionPipeline) []map[string]any {
 	pipelines := make([]map[string]any, len(peers))
 	for i := range peers {
 		pipelines[i] = map[string]any{
-			"project_id":        peers[i].GroupID,
-			"name":              peers[i].Name,
-			"id":                peers[i].ID,
-			"created_date":      peers[i].CreatedDate,
-			"last_updated_date": peers[i].LastUpdatedDate,
-			"state":             peers[i].State,
+			"project_id":        peers[i].GetGroupId(),
+			"name":              peers[i].GetName(),
+			"id":                peers[i].GetId(),
+			"created_date":      conversion.TimePtrToStringPtr(peers[i].CreatedDate),
+			"last_updated_date": conversion.TimePtrToStringPtr(peers[i].LastUpdatedDate),
+			"state":             peers[i].GetState(),
 			"sink":              flattenSink(peers[i].Sink),
 			"source":            flattenSource(peers[i].Source),
-			"transformations":   flattenTransformations(peers[i].Transformations),
+			"transformations":   flattenTransformations(peers[i].GetTransformations()),
 		}
 	}
-
 	return pipelines
 }
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
index 7b549cd0e4..be60ec042d 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
@@ -11,6 +11,7 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
@@ -282,12 +283,12 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
 }
 
 func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
 	name := ids["name"]
 
-	dataLakePipeline, resp, err := conn.DataLakePipeline.Get(ctx, projectID, name)
+	pipeline, resp, err := connV2.DataLakePipelinesApi.GetPipeline(ctx, projectID, name).Execute()
 	if resp != nil && resp.StatusCode == http.StatusNotFound {
 		d.SetId("")
 		return nil
@@ -297,44 +298,44 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	if err := d.Set("id", dataLakePipeline.ID); err != nil {
+	if err := d.Set("id", pipeline.GetId()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "id", name, err))
 	}
 
-	if err := d.Set("state", dataLakePipeline.State); err != nil {
+	if err := d.Set("state", pipeline.GetState()); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "state", name, err))
 	}
 
-	if err := d.Set("created_date", dataLakePipeline.CreatedDate); err != nil {
+	if err := d.Set("created_date", conversion.TimePtrToStringPtr(pipeline.CreatedDate)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "created_date", name, err))
 	}
 
-	if err := d.Set("last_updated_date", dataLakePipeline.LastUpdatedDate); err != nil {
+	if err := d.Set("last_updated_date", conversion.TimePtrToStringPtr(pipeline.LastUpdatedDate)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "last_updated_date", name, err))
 	}
 
-	if err := d.Set("sink", flattenSink(dataLakePipeline.Sink)); err != nil {
+	if err := d.Set("sink", flattenSink(pipeline.Sink)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "sink", name, err))
 	}
 
-	if err := d.Set("source", flattenSource(dataLakePipeline.Source)); err != nil {
+	if err := d.Set("source", flattenSource(pipeline.Source)); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "source", name, err))
 	}
 
-	if err := d.Set("transformations", flattenTransformations(dataLakePipeline.Transformations)); err != nil {
+	if err := d.Set("transformations", flattenTransformations(pipeline.GetTransformations())); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "transformations", name, err))
 	}
 
-	snapshots, _, err := conn.DataLakePipeline.ListSnapshots(ctx, projectID, name, nil)
+	snapshots, _, err := connV2.DataLakePipelinesApi.ListPipelineSnapshots(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
 
-	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.GetResults())); err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineSetting, "snapshots", name, err))
 	}
 
-	ingestionSchedules, _, err := conn.DataLakePipeline.ListIngestionSchedules(ctx, projectID, name)
+	ingestionSchedules, _, err := connV2.DataLakePipelinesApi.ListPipelineSchedules(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRead, name, err))
 	}
@@ -345,7 +346,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
 
 	d.SetId(conversion.EncodeStateID(map[string]string{
 		"project_id": projectID,
-		"name":       dataLakePipeline.Name,
+		"name":       pipeline.GetName(),
 	}))
 
 	return nil
@@ -387,14 +388,14 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.
 }
 
 func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 
 	projectID, name, err := splitDataLakePipelineImportID(d.Id())
 	if err != nil {
 		return nil, err
 	}
 
-	dataLakePipeline, _, err := conn.DataLakePipeline.Get(ctx, projectID, name)
+	pipeline, _, err := connV2.DataLakePipelinesApi.GetPipeline(ctx, projectID, name).Execute()
 	if err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImport, name, err)
 	}
@@ -407,49 +408,49 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "project_id", name, err)
 	}
 
-	if err := d.Set("id", dataLakePipeline.ID); err != nil {
+	if err := d.Set("id", pipeline.GetId()); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "id", name, err)
 	}
 
-	if err := d.Set("state", dataLakePipeline.State); err != nil {
+	if err := d.Set("state", pipeline.GetState()); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "state", name, err)
 	}
 
-	if err := d.Set("created_date", dataLakePipeline.CreatedDate); err != nil {
+	if err := d.Set("created_date", conversion.TimePtrToStringPtr(pipeline.CreatedDate)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "created_date", name, err)
 	}
 
-	if err := d.Set("last_updated_date", dataLakePipeline.LastUpdatedDate); err != nil {
+	if err := d.Set("last_updated_date", conversion.TimePtrToStringPtr(pipeline.LastUpdatedDate)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "last_updated_date", name, err)
 	}
 
-	if err := d.Set("sink", flattenSink(dataLakePipeline.Sink)); err != nil {
+	if err := d.Set("sink", flattenSink(pipeline.Sink)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "sink", name, err)
 	}
 
-	if err := d.Set("source", flattenSource(dataLakePipeline.Source)); err != nil {
+	if err := d.Set("source", flattenSource(pipeline.Source)); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "source", name, err)
 	}
 
-	if err := d.Set("transformations", flattenTransformations(dataLakePipeline.Transformations)); err != nil {
+	if err := d.Set("transformations", flattenTransformations(pipeline.GetTransformations())); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "transformations", name, err)
 	}
 
 	d.SetId(conversion.EncodeStateID(map[string]string{
 		"project_id": projectID,
-		"name":       dataLakePipeline.Name,
+		"name":       pipeline.GetName(),
 	}))
 
-	snapshots, _, err := conn.DataLakePipeline.ListSnapshots(ctx, projectID, name, nil)
+	snapshots, _, err := connV2.DataLakePipelinesApi.ListPipelineSnapshots(ctx, projectID, name).Execute()
 	if err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImport, name, err)
 	}
 
-	if err := d.Set("snapshots", flattenSnapshots(snapshots.Results)); err != nil {
+	if err := d.Set("snapshots", flattenSnapshots(snapshots.GetResults())); err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImportField, "snapshots", name, err)
 	}
 
-	ingestionSchedules, _, err := conn.DataLakePipeline.ListIngestionSchedules(ctx, projectID, name)
+	ingestionSchedules, _, err := connV2.DataLakePipelinesApi.ListPipelineSchedules(ctx, projectID, name).Execute()
 	if err != nil {
 		return nil, fmt.Errorf(errorDataLakePipelineImport, name, err)
 	}
@@ -576,107 +577,101 @@ func newTransformation(d *schema.ResourceData) []*matlas.DataLakePipelineTransfo
 	return dataLakePipelineTransformations
 }
 
-func flattenSource(atlasPipelineSource *matlas.DataLakePipelineSource) []map[string]any {
-	if atlasPipelineSource == nil {
+func flattenSource(source *admin.IngestionSource) []map[string]any {
+	if source == nil {
 		return nil
 	}
-
 	return []map[string]any{
 		{
-			"type":            atlasPipelineSource.Type,
-			"cluster_name":    atlasPipelineSource.ClusterName,
-			"collection_name": atlasPipelineSource.CollectionName,
-			"database_name":   atlasPipelineSource.DatabaseName,
-			"project_id":      atlasPipelineSource.GroupID,
+			"type":            source.GetType(),
+			"cluster_name":    source.GetClusterName(),
+			"collection_name": source.GetCollectionName(),
+			"database_name":   source.GetDatabaseName(),
+			"project_id":      source.GetGroupId(),
 		},
 	}
 }
 
-func flattenSink(atlasPipelineSink *matlas.DataLakePipelineSink) []map[string]any {
-	if atlasPipelineSink == nil {
+func flattenSink(sink *admin.IngestionSink) []map[string]any {
+	if sink == nil {
 		return nil
 	}
-
 	return []map[string]any{
 		{
-			"type":             atlasPipelineSink.Type,
-			"provider":         atlasPipelineSink.MetadataProvider,
-			"region":           atlasPipelineSink.MetadataRegion,
-			"partition_fields": flattenPartitionFields(atlasPipelineSink.PartitionFields),
+			"type":             sink.GetType(),
+			"provider":         sink.GetMetadataProvider(),
+			"region":           sink.GetMetadataRegion(),
+			"partition_fields": flattenPartitionFields(sink.GetPartitionFields()),
 		},
 	}
 }
 
-func flattenIngestionSchedules(atlasPipelineIngestionSchedules []*matlas.DataLakePipelineIngestionSchedule) []map[string]any {
-	if len(atlasPipelineIngestionSchedules) == 0 {
+func flattenIngestionSchedules(schedules []admin.DiskBackupApiPolicyItem) []map[string]any {
+	if len(schedules) == 0 {
 		return nil
 	}
-
-	out := make([]map[string]any, len(atlasPipelineIngestionSchedules))
-	for i, schedule := range atlasPipelineIngestionSchedules {
+	out := make([]map[string]any, len(schedules))
+	for i, schedule := range schedules {
 		out[i] = map[string]any{
-			"id":                 schedule.ID,
-			"frequency_type":     schedule.FrequencyType,
-			"frequency_interval": schedule.FrequencyInterval,
-			"retention_unit":     schedule.RetentionUnit,
-			"retention_value":    schedule.RetentionValue,
+			"id":                 schedule.GetId(),
+			"frequency_type":     schedule.GetFrequencyType(),
+			"frequency_interval": schedule.GetFrequencyInterval(),
+			"retention_unit":     schedule.GetRetentionUnit(),
+			"retention_value":    schedule.GetRetentionValue(),
 		}
 	}
-
 	return out
 }
 
-func flattenSnapshots(snapshots []*matlas.DataLakePipelineSnapshot) []map[string]any {
+func flattenSnapshots(snapshots []admin.DiskBackupSnapshot) []map[string]any {
 	if len(snapshots) == 0 {
 		return nil
 	}
-
 	out := make([]map[string]any, len(snapshots))
-	for i, snapshot := range snapshots {
+	for i := range snapshots {
+		snapshot := &snapshots[i]
 		out[i] = map[string]any{
-			"id":               snapshot.ID,
-			"provider":         snapshot.CloudProvider,
-			"created_at":       snapshot.CreatedAt,
-			"expires_at":       snapshot.ExpiresAt,
-			"frequency_yype":   snapshot.FrequencyType,
-			"master_key":       snapshot.MasterKeyUUID,
-			"mongod_version":   snapshot.MongodVersion,
-			"replica_set_name": snapshot.ReplicaSetName,
-			"type":             snapshot.Type,
-			"snapshot_type":    snapshot.SnapshotType,
-			"status":           snapshot.Status,
-			"size":             snapshot.StorageSizeBytes,
-			"policies":         snapshot.PolicyItems,
+			"id":               snapshot.GetId(),
+			"provider":         snapshot.GetCloudProvider(),
+			"created_at":       conversion.TimePtrToStringPtr(snapshot.CreatedAt),
+			"expires_at":       conversion.TimePtrToStringPtr(snapshot.ExpiresAt),
+			"frequency_yype":   snapshot.GetFrequencyType(),
+			"master_key":       snapshot.GetMasterKeyUUID(),
+			"mongod_version":   snapshot.GetMongodVersion(),
+			"replica_set_name": snapshot.GetReplicaSetName(),
+			"type":             snapshot.GetType(),
+			"snapshot_type":    snapshot.GetSnapshotType(),
+			"status":           snapshot.GetStatus(),
+			"size":             snapshot.GetStorageSizeBytes(),
+			"policies":         snapshot.GetPolicyItems(),
 		}
 	}
 	return out
 }
 
-func flattenTransformations(atlasPipelineTransformation []*matlas.DataLakePipelineTransformation) []map[string]any {
-	if len(atlasPipelineTransformation) == 0 {
+func flattenTransformations(transformations []admin.FieldTransformation) []map[string]any {
+	if len(transformations) == 0 {
 		return nil
 	}
-
-	out := make([]map[string]any, len(atlasPipelineTransformation))
-	for i, atlasPipelineTransformation := range atlasPipelineTransformation {
+	out := make([]map[string]any, len(transformations))
+	for i, transformation := range transformations {
 		out[i] = map[string]any{
-			"type":  atlasPipelineTransformation.Type,
-			"field": atlasPipelineTransformation.Field,
+			"type":  transformation.GetType(),
+			"field": transformation.GetField(),
 		}
 	}
 	return out
 }
 
-func flattenPartitionFields(atlasDataLakePipelinePartitionFields []*matlas.DataLakePipelinePartitionField) []map[string]any {
-	if len(atlasDataLakePipelinePartitionFields) == 0 {
+func flattenPartitionFields(fields []admin.DataLakePipelinesPartitionField) []map[string]any {
+	if len(fields) == 0 {
 		return nil
 	}
-
-	out := make([]map[string]any, len(atlasDataLakePipelinePartitionFields))
-	for i, atlasDataLakePipelinePartitionField := range atlasDataLakePipelinePartitionFields {
+	out := make([]map[string]any, len(fields))
+	for i, field := range fields {
 		out[i] = map[string]any{
-			"field_name": atlasDataLakePipelinePartitionField.FieldName,
-			"order":      atlasDataLakePipelinePartitionField.Order,
+			"field_name": field.GetFieldName(),
+			"order":      field.GetOrder(),
 		}
 	}
 	return out

From a66472ca7dee65704f84b3efa92f87db78020006 Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 20:16:15 +0100
Subject: [PATCH 15/19] data source runs

---
 .../data_source_data_lake_pipeline_run.go     | 37 +++++++++---------
 .../data_source_data_lake_pipeline_runs.go    | 39 ++++++++-----------
 2 files changed, 34 insertions(+), 42 deletions(-)

diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
index f288db6fa4..8f225ecdda 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go
@@ -9,7 +9,7 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 const errorDataLakePipelineRunRead = "error reading MongoDB Atlas DataLake Run (%s): %s"
@@ -88,12 +88,12 @@ func DataSourceRun() *schema.Resource {
 }
 
 func dataSourceRunRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	name := d.Get("pipeline_name").(string)
 	pipelineRunID := d.Get("pipeline_run_id").(string)
 
-	dataLakeRun, resp, err := conn.DataLakePipeline.GetRun(ctx, projectID, name, pipelineRunID)
+	run, resp, err := connV2.DataLakePipelinesApi.GetPipelineRun(ctx, projectID, name, pipelineRunID).Execute()
 	if err != nil {
 		if resp != nil && resp.StatusCode == http.StatusNotFound {
 			d.SetId("")
@@ -103,47 +103,47 @@ func dataSourceRunRead(ctx context.Context, d *schema.ResourceData, meta any) di
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRunRead, name, err))
 	}
 
-	if err := d.Set("id", dataLakeRun.ID); err != nil {
+	if err := d.Set("id", run.GetId()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "hostnames", name, err))
 	}
 
-	if err := d.Set("project_id", dataLakeRun.GroupID); err != nil {
+	if err := d.Set("project_id", run.GetGroupId()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "state", name, err))
 	}
 
-	if err := d.Set("created_date", dataLakeRun.CreatedDate); err != nil {
+	if err := d.Set("created_date", conversion.TimePtrToStringPtr(run.CreatedDate)); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_databases", name, err))
 	}
 
-	if err := d.Set("last_updated_date", dataLakeRun.LastUpdatedDate); err != nil {
+	if err := d.Set("last_updated_date", conversion.TimePtrToStringPtr(run.LastUpdatedDate)); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_databases", name, err))
 	}
 
-	if err := d.Set("state", dataLakeRun.State); err != nil {
+	if err := d.Set("state", run.GetState()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_databases", name, err))
 	}
 
-	if err := d.Set("phase", dataLakeRun.Phase); err != nil {
+	if err := d.Set("phase", run.GetPhase()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_databases", name, err))
 	}
 
-	if err := d.Set("pipeline_id", dataLakeRun.PipelineID); err != nil {
+	if err := d.Set("pipeline_id", run.GetPipelineId()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_stores", name, err))
 	}
 
-	if err := d.Set("dataset_name", dataLakeRun.DatasetName); err != nil {
+	if err := d.Set("dataset_name", run.GetDatasetName()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_stores", name, err))
 	}
 
-	if err := d.Set("snapshot_id", dataLakeRun.SnapshotID); err != nil {
+	if err := d.Set("snapshot_id", run.GetSnapshotId()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_stores", name, err))
 	}
 
-	if err := d.Set("backup_frequency_type", dataLakeRun.BackupFrequencyType); err != nil {
+	if err := d.Set("backup_frequency_type", run.GetBackupFrequencyType()); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_stores", name, err))
 	}
 
-	if err := d.Set("stats", flattenDataLakePipelineRunStats(dataLakeRun.Stats)); err != nil {
+	if err := d.Set("stats", flattenRunStats(run.Stats)); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "storage_stores", name, err))
 	}
 
@@ -156,15 +156,14 @@ func dataSourceRunRead(ctx context.Context, d *schema.ResourceData, meta any) di
 	return nil
 }
 
-func flattenDataLakePipelineRunStats(datalakeRunStats *matlas.DataLakePipelineRunStats) []map[string]any {
-	if datalakeRunStats == nil {
+func flattenRunStats(stats *admin.PipelineRunStats) []map[string]any {
+	if stats == nil {
 		return nil
 	}
-
 	maps := make([]map[string]any, 1)
 	maps[0] = map[string]any{
-		"bytes_exported": datalakeRunStats.BytesExported,
-		"num_docs":       datalakeRunStats.NumDocs,
+		"bytes_exported": stats.GetBytesExported(),
+		"num_docs":       stats.GetNumDocs(),
 	}
 	return maps
 }
diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
index da892bacd9..c5b33cd284 100644
--- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
+++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go
@@ -7,8 +7,9 @@ import (
 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/id"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
+	"go.mongodb.org/atlas-sdk/v20231115005/admin"
 )
 
 const errorDataLakePipelineRunList = "error reading MongoDB Atlas DataLake Runs (%s): %s"
@@ -91,45 +92,37 @@ func PluralDataSourceRun() *schema.Resource {
 }
 
 func dataSourcePluralRunRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	name := d.Get("pipeline_name").(string)
-
-	dataLakeRuns, _, err := conn.DataLakePipeline.ListRuns(ctx, projectID, name)
+	runs, _, err := connV2.DataLakePipelinesApi.ListPipelineRuns(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineRunList, projectID, err))
 	}
-
-	if err := d.Set("results", flattenRunResult(dataLakeRuns.Results)); err != nil {
+	if err := d.Set("results", flattenRunResults(runs.GetResults())); err != nil {
 		return diag.FromErr(fmt.Errorf(ErrorDataLakeSetting, "results", projectID, err))
 	}
-
 	d.SetId(id.UniqueId())
-
 	return nil
 }
 
-func flattenRunResult(datalakePipelineRuns []*matlas.DataLakePipelineRun) []map[string]any {
-	var results []map[string]any
-
+func flattenRunResults(datalakePipelineRuns []admin.IngestionPipelineRun) []map[string]any {
 	if len(datalakePipelineRuns) == 0 {
-		return results
+		return nil
 	}
-
-	results = make([]map[string]any, len(datalakePipelineRuns))
+	results := make([]map[string]any, len(datalakePipelineRuns))
 
 	for k, run := range datalakePipelineRuns {
 		results[k] = map[string]any{
-			"id":                    run.ID,
-			"created_date":          run.CreatedDate,
-			"last_updated_date":     run.LastUpdatedDate,
-			"state":                 run.State,
-			"pipeline_id":           run.PipelineID,
-			"snapshot_id":           run.SnapshotID,
-			"backup_frequency_type": run.BackupFrequencyType,
-			"stats":                 flattenDataLakePipelineRunStats(run.Stats),
+			"id":                    run.GetId(),
+			"created_date":          conversion.TimePtrToStringPtr(run.CreatedDate),
+			"last_updated_date":     conversion.TimePtrToStringPtr(run.LastUpdatedDate),
+			"state":                 run.GetState(),
+			"pipeline_id":           run.GetPipelineId(),
+			"snapshot_id":           run.GetSnapshotId(),
+			"backup_frequency_type": run.GetBackupFrequencyType(),
+			"stats":                 flattenRunStats(run.Stats),
 		}
 	}
-
 	return results
 }

From a353a587dfdeaf3d5ac05026d55252759651ae8b Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 20:20:52 +0100
Subject: [PATCH 16/19] delete

---
 .../service/datalakepipeline/resource_data_lake_pipeline.go   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
index be60ec042d..e928c2a600 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
@@ -374,12 +374,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
 }
 
 func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	ids := conversion.DecodeStateID(d.Id())
 	projectID := ids["project_id"]
 	name := ids["name"]
 
-	_, err := conn.DataLakePipeline.Delete(ctx, projectID, name)
+	_, _, err := connV2.DataLakePipelinesApi.DeletePipeline(ctx, projectID, name).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineDelete, name, err))
 	}

From c0bdd6b14895c5ef3d2e34a4e3ae8ac2e5a3c95b Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 20:58:24 +0100
Subject: [PATCH 17/19] create

---
 .../resource_data_lake_pipeline.go            | 83 +++++++++----------
 1 file changed, 37 insertions(+), 46 deletions(-)

diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
index e928c2a600..f5c183084d 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go
@@ -12,7 +12,6 @@ import (
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
 	"go.mongodb.org/atlas-sdk/v20231115005/admin"
-	matlas "go.mongodb.org/atlas/mongodbatlas"
 )
 
 const (
@@ -257,26 +256,26 @@ func schemaSnapshots() *schema.Schema {
 }
 
 func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
 
-	dataLakePipelineReqBody := &matlas.DataLakePipeline{
-		GroupID:         projectID,
-		Name:            name,
+	params := &admin.DataLakeIngestionPipeline{
+		GroupId:         conversion.StringPtr(projectID),
+		Name:            conversion.StringPtr(name),
 		Sink:            newSink(d),
 		Source:          newSource(d),
 		Transformations: newTransformation(d),
 	}
 
-	dataLakePipeline, _, err := conn.DataLakePipeline.Create(ctx, projectID, dataLakePipelineReqBody)
+	pipeline, _, err := connV2.DataLakePipelinesApi.CreatePipeline(ctx, projectID, params).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineCreate, err))
 	}
 
 	d.SetId(conversion.EncodeStateID(map[string]string{
 		"project_id": projectID,
-		"name":       dataLakePipeline.Name,
+		"name":       pipeline.GetName(),
 	}))
 
 	return resourceRead(ctx, d, meta)
@@ -353,23 +352,22 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
 }
 
 func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
-	conn := meta.(*config.MongoDBClient).Atlas
+	connV2 := meta.(*config.MongoDBClient).AtlasV2
 	projectID := d.Get("project_id").(string)
 	name := d.Get("name").(string)
 
-	dataLakePipelineReqBody := &matlas.DataLakePipeline{
-		GroupID:         projectID,
-		Name:            name,
+	params := &admin.DataLakeIngestionPipeline{
+		GroupId:         conversion.StringPtr(projectID),
+		Name:            conversion.StringPtr(name),
 		Sink:            newSink(d),
 		Source:          newSource(d),
 		Transformations: newTransformation(d),
 	}
 
-	_, _, err := conn.DataLakePipeline.Update(ctx, projectID, name, dataLakePipelineReqBody)
+	_, _, err := connV2.DataLakePipelinesApi.UpdatePipeline(ctx, projectID, name, params).Execute()
 	if err != nil {
 		return diag.FromErr(fmt.Errorf(errorDataLakePipelineUpdate, err))
 	}
-
 	return resourceRead(ctx, d, meta)
 }
 
@@ -476,105 +474,98 @@ func splitDataLakePipelineImportID(id string) (projectID, name string, err error
 	return
 }
 
-func newSink(d *schema.ResourceData) *matlas.DataLakePipelineSink {
+func newSink(d *schema.ResourceData) *admin.IngestionSink {
 	if sink, ok := d.Get("sink").([]any); ok && len(sink) == 1 {
 		sinkMap := sink[0].(map[string]any)
-		dataLakePipelineSink := &matlas.DataLakePipelineSink{}
+		dataLakePipelineSink := &admin.IngestionSink{}
 
 		if sinkType, ok := sinkMap["type"].(string); ok {
-			dataLakePipelineSink.Type = sinkType
+			dataLakePipelineSink.Type = conversion.StringPtr(sinkType)
 		}
-
 		if provider, ok := sinkMap["provider"].(string); ok {
-			dataLakePipelineSink.MetadataProvider = provider
+			dataLakePipelineSink.MetadataProvider = conversion.StringPtr(provider)
 		}
-
 		if region, ok := sinkMap["region"].(string); ok {
-			dataLakePipelineSink.MetadataRegion = region
+			dataLakePipelineSink.MetadataRegion = conversion.StringPtr(region)
 		}
-
 		dataLakePipelineSink.PartitionFields = newPartitionField(sinkMap)
 		return dataLakePipelineSink
 	}
-
 	return nil
 }
 
-func newPartitionField(sinkMap map[string]any) []*matlas.DataLakePipelinePartitionField {
+func newPartitionField(sinkMap map[string]any) *[]admin.DataLakePipelinesPartitionField {
 	partitionFields, ok := sinkMap["partition_fields"].([]any)
 	if !ok || len(partitionFields) == 0 {
 		return nil
 	}
-
-	fields := make([]*matlas.DataLakePipelinePartitionField, len(partitionFields))
+	fields := make([]admin.DataLakePipelinesPartitionField, len(partitionFields))
 	for i, partitionField := range partitionFields {
 		fieldMap := partitionField.(map[string]any)
-		fields[i] = &matlas.DataLakePipelinePartitionField{
+		fields[i] = admin.DataLakePipelinesPartitionField{
 			FieldName: fieldMap["field_name"].(string),
-			Order:     int32(fieldMap["order"].(int)),
+			Order:     fieldMap["order"].(int),
 		}
 	}
-
-	return fields
+	return &fields
 }
 
-func newSource(d *schema.ResourceData) *matlas.DataLakePipelineSource {
+func newSource(d *schema.ResourceData) *admin.IngestionSource {
 	source, ok := d.Get("source").([]any)
 	if !ok || len(source) == 0 {
 		return nil
 	}
 
 	sourceMap := source[0].(map[string]any)
-	dataLakePipelineSource := &matlas.DataLakePipelineSource{}
+	dataLakePipelineSource := new(admin.IngestionSource)
 
 	if sourceType, ok := sourceMap["type"].(string); ok {
-		dataLakePipelineSource.Type = sourceType
+		dataLakePipelineSource.Type = conversion.StringPtr(sourceType)
 	}
 
 	if clusterName, ok := sourceMap["cluster_name"].(string); ok {
-		dataLakePipelineSource.ClusterName = clusterName
+		dataLakePipelineSource.ClusterName = conversion.StringPtr(clusterName)
 	}
 
 	if collectionName, ok := sourceMap["collection_name"].(string); ok {
-		dataLakePipelineSource.CollectionName = collectionName
+		dataLakePipelineSource.CollectionName = conversion.StringPtr(collectionName)
 	}
 
 	if databaseName, ok := sourceMap["database_name"].(string); ok {
-		dataLakePipelineSource.DatabaseName = databaseName
+		dataLakePipelineSource.DatabaseName = conversion.StringPtr(databaseName)
 	}
 
 	if policyID, ok := sourceMap["policy_item_id"].(string); ok {
-		dataLakePipelineSource.PolicyItemID = policyID
+		dataLakePipelineSource.PolicyItemId = conversion.StringPtr(policyID)
 	}
 
 	return dataLakePipelineSource
 }
 
-func newTransformation(d *schema.ResourceData) []*matlas.DataLakePipelineTransformation {
+func newTransformation(d *schema.ResourceData) *[]admin.FieldTransformation {
 	trasformations, ok := d.Get("transformations").([]any)
 	if !ok || len(trasformations) == 0 {
 		return nil
 	}
 
-	dataLakePipelineTransformations := make([]*matlas.DataLakePipelineTransformation, len(trasformations))
-	for i, trasformation := range trasformations {
+	dataLakePipelineTransformations := make([]admin.FieldTransformation, 0)
+	for _, trasformation := range trasformations {
 		trasformationMap := trasformation.(map[string]any)
-		dataLakeTransformation := &matlas.DataLakePipelineTransformation{}
+		dataLakeTransformation := admin.FieldTransformation{}
 
 		if transformationType, ok := trasformationMap["type"].(string); ok {
-			dataLakeTransformation.Type = transformationType
+			dataLakeTransformation.Type = conversion.StringPtr(transformationType)
 		}
 
 		if transformationField, ok := trasformationMap["field"].(string); ok {
-			dataLakeTransformation.Field = transformationField
+			dataLakeTransformation.Field = conversion.StringPtr(transformationField)
 		}
 
-		if dataLakeTransformation.Field != "" || dataLakeTransformation.Type != "" {
-			dataLakePipelineTransformations[i] = dataLakeTransformation
+		if conversion.SafeString(dataLakeTransformation.Field) != "" || conversion.SafeString(dataLakeTransformation.Type) != "" {
+			dataLakePipelineTransformations = append(dataLakePipelineTransformations, dataLakeTransformation)
 		}
 	}
-
-	return dataLakePipelineTransformations
+	return &dataLakePipelineTransformations
 }
 
 func flattenSource(source *admin.IngestionSource) []map[string]any {

From 5422b240d9c38cf6dfd9ef917d2e4dfde556883f Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Tue, 6 Feb 2024 21:43:31 +0100
Subject: [PATCH 18/19] migration test

---
 .github/workflows/migration-tests.yml         | 28 ++++++++++-
 ...ource_data_lake_pipeline_migration_test.go | 48 +++++++++++++++++++
 2 files changed, 75 insertions(+), 1 deletion(-)
 create mode 100644 internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go

diff --git a/.github/workflows/migration-tests.yml b/.github/workflows/migration-tests.yml
index 663ff934f5..189b9bdb14 100644
--- a/.github/workflows/migration-tests.yml
+++ b/.github/workflows/migration-tests.yml
@@ -65,6 +65,7 @@ jobs:
       network: ${{ steps.filter.outputs.network == 'true' || env.mustTrigger == 'true' }}
       encryption: ${{ steps.filter.outputs.encryption == 'true' || env.mustTrigger == 'true' }}
       serverless: ${{ steps.filter.outputs.serverless == 'true' || env.mustTrigger == 'true' }}
+      data_lake: ${{ steps.filter.outputs.data_lake == 'true' || env.mustTrigger == 'true' }}
     steps:
     - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
     - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd
@@ -129,6 +130,8 @@ jobs:
             - 'internal/service/serverlessinstance/*.go'
             - 'internal/service/privatelinkendpointserverless/*.go'
             - 'internal/service/privatelinkendpointserviceserverless/*.go'
+          data_lake:
+            - 'internal/service/datalakepipeline/*.go'
   
   project: 
     needs: [ change-detection, get-provider-version ]
@@ -435,4 +438,27 @@ jobs:
           MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }}
           TEST_REGEX: "^TestAccMigrationServerless"
         run: make testacc
-      
\ No newline at end of file
+  data_lake:
+    needs: [ change-detection, get-provider-version ]
+    if: ${{ needs.change-detection.outputs.data_lake == 'true' || inputs.test_group == 'data_lake' }}
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+      - name: Set up Go
+        uses: actions/setup-go@v5
+        with:
+          go-version-file: 'go.mod'
+      - uses: hashicorp/setup-terraform@v3
+        with:
+          terraform_version: ${{ env.terraform_version }}
+          terraform_wrapper: false    
+      - name: Migration Tests
+        env:
+          MONGODB_ATLAS_PUBLIC_KEY: ${{ secrets.MONGODB_ATLAS_PUBLIC_KEY_CLOUD_DEV }}
+          MONGODB_ATLAS_PRIVATE_KEY: ${{ secrets.MONGODB_ATLAS_PRIVATE_KEY_CLOUD_DEV }}
+          MONGODB_ATLAS_ORG_ID: ${{ vars.MONGODB_ATLAS_ORG_ID_CLOUD_DEV }}
+          MONGODB_ATLAS_BASE_URL: ${{ vars.MONGODB_ATLAS_BASE_URL }}
+          MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }}
+          TEST_REGEX: "^TestAccMigrationcDataLake"
+        run: make testacc
diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go
new file mode 100644
index 0000000000..b55bfdc1dd
--- /dev/null
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go
@@ -0,0 +1,48 @@
+package datalakepipeline_test
+
+import (
+	"os"
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+	"github.com/hashicorp/terraform-plugin-testing/plancheck"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
+	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig"
+)
+
+func TestAccMigrationcDataLakePipeline_basic(t *testing.T) {
+	var (
+		resourceName = "mongodbatlas_data_lake_pipeline.test"
+		clusterName  = acctest.RandomWithPrefix("test-acc-index")
+		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
+		projectName  = acctest.RandomWithPrefix("test-acc")
+		name         = acctest.RandomWithPrefix("test-acc-index")
+	)
+	resource.ParallelTest(t, resource.TestCase{
+		PreCheck:     func() { mig.PreCheckBasic(t) },
+		CheckDestroy: checkDestroy,
+		Steps: []resource.TestStep{
+			{
+				ExternalProviders: mig.ExternalProviders(),
+				Config:            configBasic(orgID, projectName, clusterName, name),
+				Check: resource.ComposeTestCheckFunc(
+					checkExists(resourceName),
+					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
+					resource.TestCheckResourceAttr(resourceName, "name", name),
+					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
+				),
+			},
+			{
+				ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
+				Config:                   configBasic(orgID, projectName, clusterName, name),
+				ConfigPlanChecks: resource.ConfigPlanChecks{
+					PreApply: []plancheck.PlanCheck{
+						acc.DebugPlan(),
+						plancheck.ExpectEmptyPlan(),
+					},
+				},
+			},
+		},
+	})
+}

From df9d305968389a00f35b1c815e5f154ff08d1dba Mon Sep 17 00:00:00 2001
From: Leo Antoli <430982+lantoli@users.noreply.github.com>
Date: Thu, 8 Feb 2024 07:11:24 +0100
Subject: [PATCH 19/19] mig test step

---
 ...resource_data_lake_pipeline_migration_test.go | 16 +++-------------
 1 file changed, 3 insertions(+), 13 deletions(-)

diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go
index b55bfdc1dd..c0c489796f 100644
--- a/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go
+++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go
@@ -6,8 +6,6 @@ import (
 
 	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
 	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
-	"github.com/hashicorp/terraform-plugin-testing/plancheck"
-	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc"
 	"github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig"
 )
 
@@ -18,6 +16,7 @@ func TestAccMigrationcDataLakePipeline_basic(t *testing.T) {
 		orgID        = os.Getenv("MONGODB_ATLAS_ORG_ID")
 		projectName  = acctest.RandomWithPrefix("test-acc")
 		name         = acctest.RandomWithPrefix("test-acc-index")
+		config       = configBasic(orgID, projectName, clusterName, name)
 	)
 	resource.ParallelTest(t, resource.TestCase{
 		PreCheck:     func() { mig.PreCheckBasic(t) },
@@ -25,7 +24,7 @@ func TestAccMigrationcDataLakePipeline_basic(t *testing.T) {
 		Steps: []resource.TestStep{
 			{
 				ExternalProviders: mig.ExternalProviders(),
-				Config:            configBasic(orgID, projectName, clusterName, name),
+				Config:            config,
 				Check: resource.ComposeTestCheckFunc(
 					checkExists(resourceName),
 					resource.TestCheckResourceAttrSet(resourceName, "project_id"),
@@ -33,16 +32,7 @@ func TestAccMigrationcDataLakePipeline_basic(t *testing.T) {
 					resource.TestCheckResourceAttr(resourceName, "state", "ACTIVE"),
 				),
 			},
-			{
-				ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
-				Config:                   configBasic(orgID, projectName, clusterName, name),
-				ConfigPlanChecks: resource.ConfigPlanChecks{
-					PreApply: []plancheck.PlanCheck{
-						acc.DebugPlan(),
-						plancheck.ExpectEmptyPlan(),
-					},
-				},
-			},
+			mig.TestStep(config),
 		},
 	})
 }