Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added overloads that do not overwrite by default #6774

Merged
merged 7 commits into from
Dec 12, 2019
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sdk/storage/azure-storage-file-datalake/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

## Version XXXX-X-X-beta.X (XXXX-XX-XX)
- Added support for exists method on FileClients and DirectoryClients
- Added support for no overwrite by default on min create method on FileClients and DirectoryClients and flush method on FileClients

## Version 12.0.0-beta.7 (2019-12-04)
This package's
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import com.azure.core.util.logging.ClientLogger;
import com.azure.storage.blob.BlobContainerAsyncClient;
import com.azure.storage.blob.specialized.BlockBlobAsyncClient;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.models.PathResourceType;
Expand Down Expand Up @@ -216,6 +217,7 @@ Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset,
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* By default this method will not overwrite existing data.
*
* <p><strong>Code Samples>Code Samples</strong></p>
*
Expand All @@ -231,7 +233,37 @@ Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset,
*/
public Mono<PathInfo> flush(long position) {
try {
return flushWithResponse(position, false, false, null, null).flatMap(FluxUtil::toMono);
return flush(position, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}

/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long-boolean}
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update">Azure
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether or not to overwrite, should data exist on the file.
*
* @return A reactive response containing the information of the created resource.
*/
public Mono<PathInfo> flush(long position, boolean overwrite) {
try {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
requestConditions = new DataLakeRequestConditions()
.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import com.azure.storage.blob.models.BlobDownloadResponse;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
Expand Down Expand Up @@ -187,6 +188,7 @@ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* By default this method will not overwrite existing data.
*
* <p><strong>Code Samples>Code Samples</strong></p>
*
Expand All @@ -201,7 +203,32 @@ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long
* @return Information about the created resource.
*/
public PathInfo flush(long position) {
return flushWithResponse(position, false, false, null, null, null, Context.NONE).getValue();
return flush(position, false);
}

/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush#long-boolean}
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update">Azure
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether or not to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import com.azure.storage.blob.specialized.SpecializedBlobClientBuilder;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.file.datalake.implementation.DataLakeStorageClientBuilder;
import com.azure.storage.file.datalake.implementation.DataLakeStorageClientImpl;
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
Expand Down Expand Up @@ -193,7 +194,7 @@ public DataLakeServiceVersion getServiceVersion() {
}

/**
* Creates a resource.
* Creates a resource. By default this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
Expand All @@ -207,7 +208,34 @@ public DataLakeServiceVersion getServiceVersion() {
*/
public Mono<PathInfo> create() {
try {
return createWithResponse(null, null, null, null, null).flatMap(FluxUtil::toMono);
return create(false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}

/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.create#boolean}
*
* <p>For more information see the
* <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create">Azure
* Docs</a></p>
*
* @param overwrite Whether or not to overwrite, should data exist on the file.
*
* @return A reactive response containing information about the created resource.
*/
public Mono<PathInfo> create(boolean overwrite) {
try {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(null, null, null, null, requestConditions).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import com.azure.storage.blob.models.BlobProperties;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
Expand Down Expand Up @@ -110,7 +111,7 @@ public DataLakeServiceVersion getServiceVersion() {
}

/**
* Creates a resource.
* Creates a resource. By default this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
Expand All @@ -123,7 +124,30 @@ public DataLakeServiceVersion getServiceVersion() {
* @return Information about the created resource.
*/
public PathInfo create() {
return createWithResponse(null, null, null, null, null, null, Context.NONE).getValue();
return create(false);
}

/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.create#boolean}
*
* <p>For more information see the
* <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create">Azure
* Docs</a></p>
*
* @param overwrite Whether or not to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(null, null, null, null, requestConditions, null, Context.NONE).getValue();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,12 @@ public void flushCodeSnippets() {
System.out.println("Flush data completed"));
// END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long

// BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long-boolean
boolean overwrite = true;
client.flush(position, overwrite).subscribe(response ->
System.out.println("Flush data completed"));
// END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long-boolean

// BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions
FileRange range = new FileRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,12 @@ public void flushCodeSnippets() {
System.out.println("Flush data completed");
// END: com.azure.storage.file.datalake.DataLakeFileClient.flush#long

// BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.flush#long-boolean
boolean overwrite = true;
client.flush(position, overwrite);
System.out.println("Flush data completed");
// END: com.azure.storage.file.datalake.DataLakeFileClient.flush#long-boolean

// BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions-Duration-Context
FileRange range = new FileRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,12 @@ public void createCodeSnippets() {
System.out.printf("Last Modified Time:%s", response.getLastModified()));
// END: com.azure.storage.file.datalake.DataLakePathAsyncClient.create

// BEGIN: com.azure.storage.file.datalake.DataLakePathAsyncClient.create#boolean
boolean overwrite = true;
client.create(overwrite).subscribe(response ->
System.out.printf("Last Modified Time:%s", response.getLastModified()));
// END: com.azure.storage.file.datalake.DataLakePathAsyncClient.create#boolean

// BEGIN: com.azure.storage.file.datalake.DataLakePathAsyncClient.createWithResponse#String-String-PathHttpHeaders-Map-DataLakeRequestConditions
PathHttpHeaders httpHeaders = new PathHttpHeaders()
.setContentLanguage("en-US")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ public void createCodeSnippets() {
System.out.printf("Last Modified Time:%s", client.create().getLastModified());
// END: com.azure.storage.file.datalake.DataLakePathClient.create

// BEGIN: com.azure.storage.file.datalake.DataLakePathClient.create#boolean
boolean overwrite = true;
System.out.printf("Last Modified Time:%s", client.create(true).getLastModified());
// END: com.azure.storage.file.datalake.DataLakePathClient.create#boolean

// BEGIN: com.azure.storage.file.datalake.DataLakePathClient.createWithResponse#String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context
PathHttpHeaders httpHeaders = new PathHttpHeaders()
.setContentLanguage("en-US")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class APISpec extends Specification {

static final String garbageLeaseID = UUID.randomUUID().toString()

public static final String defaultEndpointTemplate = "https://%s.dfs.core.windows.net/"
public static final String defaultEndpointTemplate = "http://%s.dfs.core.windows.net/"

static def AZURE_TEST_MODE = "AZURE_TEST_MODE"
static def DATA_LAKE_STORAGE = "STORAGE_DATA_LAKE_"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,18 @@ class DirectoryAPITest extends APISpec {
thrown(Exception)
}

def "Create overwrite"() {
when:
dc = fsc.getDirectoryClient(generatePathName())
dc.create()

// Try to create the resource again
dc.create(false)

then:
thrown(StorageErrorException)
}

def "Exists"() {
when:
dc = fsc.getDirectoryClient(generatePathName())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,18 @@ class FileAPITest extends APISpec {
thrown(StorageErrorException)
}

def "Create overwrite"() {
when:
fc = fsc.getFileClient(generatePathName())
fc.create()

// Try to create the resource again
fc.create(false)

then:
thrown(StorageErrorException)
}

def "Exists"() {
when:
fc = fsc.getFileClient(generatePathName())
Expand Down Expand Up @@ -1402,6 +1414,18 @@ class FileAPITest extends APISpec {
thrown(StorageErrorException)
}

def "Flush data overwrite"() {
when:
fc.append(new ByteArrayInputStream(defaultData.array()), 0, defaultDataSize)
fc.flush(defaultDataSize)
fc.append(new ByteArrayInputStream(defaultData.array()), 0, defaultDataSize)
// Attempt to write data without overwrite enabled
fc.flush(defaultDataSize, false)

then:
thrown(StorageErrorException)
}

def "Get File Name and Build Client"() {
when:
DataLakeFileClient client = fsc.getFileClient(originalFileName)
Expand Down
Loading