Skip to content

Commit

Permalink
Merge pull request #1476 from dhermes/bigtable-update-generated
Browse files Browse the repository at this point in the history
Updating Bigtable generated files.
  • Loading branch information
dhermes committed Feb 17, 2016
2 parents 09c3c98 + 3252643 commit 4ca596c
Show file tree
Hide file tree
Showing 8 changed files with 357 additions and 72 deletions.
9 changes: 4 additions & 5 deletions gcloud/bigtable/_generated/_bigtable_cluster_data.proto
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,6 @@ message Cluster {
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
string name = 1;

// If this cluster has been deleted, the time at which its backup will
// be irrevocably destroyed. Omitted otherwise.
// This cannot be set directly, only through DeleteCluster.
google.protobuf.Timestamp delete_time = 2;

// The operation currently running on the cluster, if any.
// This cannot be set directly, only through CreateCluster, UpdateCluster,
// or UndeleteCluster. Calls to these methods will be rejected if
Expand All @@ -91,4 +86,8 @@ enum StorageType {

// Data will be stored in SSD, providing low and consistent latencies.
STORAGE_SSD = 1;

// Data will be stored in HDD, providing high and less predictable
// latencies.
STORAGE_HDD = 2;
}
15 changes: 15 additions & 0 deletions gcloud/bigtable/_generated/_bigtable_data.proto
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,15 @@ message RowRange {
bytes end_key = 3;
}

// Specifies a non-contiguous set of rows.
message RowSet {
// Single rows included in the set.
repeated bytes row_keys = 1;

// Contiguous row ranges included in the set.
repeated RowRange row_ranges = 2;
}

// Specifies a contiguous range of columns within a single column family.
// The range spans from <column_family>:<start_qualifier> to
// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
Expand Down Expand Up @@ -374,15 +383,21 @@ message RowFilter {
ValueRange value_range_filter = 15;

// Skips the first N cells of each row, matching all subsequent cells.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_offset_filter = 10;

// Matches only the first N cells of each row.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_limit_filter = 11;

// Matches only the most recent N cells within each column. For example,
// if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
// skip all earlier cells in "foo:bar", and then begin matching again in
// column "foo:bar2".
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_column_limit_filter = 12;

// Replaces each cell's value with the empty string.
Expand Down
7 changes: 7 additions & 0 deletions gcloud/bigtable/_generated/_bigtable_service.proto
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,13 @@ service BigtableService {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
}

// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
}

// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
Expand Down
37 changes: 37 additions & 0 deletions gcloud/bigtable/_generated/_bigtable_service_messages.proto
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ syntax = "proto3";
package google.bigtable.v1;

import "google/bigtable/v1/bigtable_data.proto";
import "google/rpc/status.proto";

option java_multiple_files = true;
option java_outer_classname = "BigtableServiceMessagesProto";
Expand All @@ -35,6 +36,11 @@ message ReadRowsRequest {

// A range of rows from which to read.
RowRange row_range = 3;

// A set of rows from which to read. Entries need not be in order, and will
// be deduplicated before reading.
// The total serialized size of the set must not exceed 1MB.
RowSet row_set = 8;
}

// The filter to apply to the contents of the specified row(s). If unset,
Expand Down Expand Up @@ -124,6 +130,37 @@ message MutateRowRequest {
repeated Mutation mutations = 3;
}

// Request message for BigtableService.MutateRows.
message MutateRowsRequest {
message Entry {
// The key of the row to which the `mutations` should be applied.
bytes row_key = 1;

// Changes to be atomically applied to the specified row. Mutations are
// applied in order, meaning that earlier mutations can be masked by
// later ones.
// At least one mutation must be specified.
repeated Mutation mutations = 2;
}

// The unique name of the table to which the mutations should be applied.
string table_name = 1;

// The row keys/mutations to be applied in bulk.
// Each entry is applied as an atomic mutation, but the entries may be
// applied in arbitrary order (even between entries for the same row).
// At least one entry must be specified, and in total the entries may
// contain at most 100000 mutations.
repeated Entry entries = 2;
}

// Response message for BigtableService.MutateRows.
message MutateRowsResponse {
// The results for each Entry from the request, presented in the order
// in which the entries were originally given.
repeated google.rpc.Status statuses = 1;
}

// Request message for BigtableService.CheckAndMutateRowRequest
message CheckAndMutateRowRequest {
// The unique name of the table to which the conditional mutation should be
Expand Down
29 changes: 13 additions & 16 deletions gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 4ca596c

Please sign in to comment.