diff --git a/README.rst b/README.rst index f481b2d86486..05ed976daebd 100644 --- a/README.rst +++ b/README.rst @@ -95,9 +95,9 @@ to Cloud Storage using this Client Library. import gcloud.storage bucket = gcloud.storage.get_bucket('bucket-id-here', 'project-id') # Then do other things... - key = bucket.get_key('/remote/path/to/file.txt') - print key.get_contents_as_string() - key.set_contents_from_string('New contents!') + blob = bucket.get_blob('/remote/path/to/file.txt') + print blob.get_contents_as_string() + blob.set_contents_from_string('New contents!') bucket.upload_file('/remote/path/storage.txt', '/local/path.txt') Contributing diff --git a/docs/_components/storage-getting-started.rst b/docs/_components/storage-getting-started.rst index 47ad25c9169f..046caac9a979 100644 --- a/docs/_components/storage-getting-started.rst +++ b/docs/_components/storage-getting-started.rst @@ -4,7 +4,7 @@ Getting started with Cloud Storage This tutorial focuses on using ``gcloud`` to access Google Cloud Storage. We'll go through the basic concepts, -how to operate on buckets and keys, +how to operate on buckets and blobs, and how to handle access control, among other things. @@ -114,32 +114,31 @@ so if you want to group data into "directories", you can do that. The fundamental container for a file in Cloud Storage -is called an Object, -however ``gcloud`` uses the term ``Key`` -to avoid confusion between ``object`` and ``Object``. +is called an Object, however ``gcloud`` uses the term ``Blob`` +to avoid confusion with the Python built-in ``object``. If you want to set some data, -you just create a ``Key`` inside your bucket -and store your data inside the key:: +you just create a ``Blob`` inside your bucket +and store your data inside the blob:: - >>> key = bucket.new_key('greeting.txt') - >>> key.set_contents_from_string('Hello world!') + >>> blob = bucket.new_blob('greeting.txt') + >>> blob.set_contents_from_string('Hello world!') -:func:`new_key ` -creates a :class:`Key ` object locally +:func:`new_blob ` +creates a :class:`Blob ` object locally and -:func:`set_contents_from_string ` -allows you to put a string into the key. +:func:`set_contents_from_string ` +allows you to put a string into the blob. Now we can test if it worked:: - >>> key = bucket.get_key('greeting.txt') - >>> print key.get_contents_as_string() + >>> blob = bucket.get_blob('greeting.txt') + >>> print blob.get_contents_as_string() Hello world! What if you want to save the contents to a file? - >>> key.get_contents_to_filename('greetings.txt') + >>> blob.get_contents_to_filename('greetings.txt') Then you can look at the file in a terminal:: @@ -149,32 +148,32 @@ Then you can look at the file in a terminal:: And what about when you're not dealing with text? That's pretty simple too:: - >>> key = bucket.new_key('kitten.jpg') - >>> key.set_contents_from_filename('kitten.jpg') + >>> blob = bucket.new_blob('kitten.jpg') + >>> blob.set_contents_from_filename('kitten.jpg') And to test whether it worked? - >>> key = bucket.get_key('kitten.jpg') - >>> key.get_contents_to_filename('kitten2.jpg') + >>> blob = bucket.get_blob('kitten.jpg') + >>> blob.get_contents_to_filename('kitten2.jpg') and check if they are the same in a terminal:: $ diff kitten.jpg kitten2.jpg Notice that we're using -:func:`get_key ` -to retrieve a key we know exists remotely. -If the key doesn't exist, it will return ``None``. +:func:`get_blob ` +to retrieve a blob we know exists remotely. +If the blob doesn't exist, it will return ``None``. -.. note:: ``get_key`` is **not** retrieving the entire object's data. +.. note:: ``get_blob`` is **not** retrieving the entire object's data. -If you want to "get-or-create" the key +If you want to "get-or-create" the blob (that is, overwrite it if it already exists), -you can use :func:`new_key `. -However, keep in mind, the key is not created +you can use :func:`new_blob `. +However, keep in mind, the blob is not created until you store some data inside of it. -If you want to check whether a key exists, +If you want to check whether a blob exists, you can use the ``in`` operator in Python:: >>> print 'kitten.jpg' in bucket @@ -191,17 +190,17 @@ to retrieve the bucket object:: >>> bucket = connection.get_bucket('my-bucket') -If you want to get all the keys in the bucket, +If you want to get all the blobs in the bucket, you can use -:func:`get_all_keys `:: +:func:`get_all_blobs `:: - >>> keys = bucket.get_all_keys() + >>> blobs = bucket.get_all_blobs() -However, if you're looking to iterate through the keys, +However, if you're looking to iterate through the blobs, you can use the bucket itself as an iterator:: - >>> for key in bucket: - ... print key + >>> for blob in bucket: + ... print blob Deleting a bucket ----------------- @@ -234,7 +233,7 @@ Managing access control ----------------------- Cloud storage provides fine-grained access control -for both buckets and keys. +for both buckets and blobs. `gcloud` tries to simplify access control by working with entities and "grants". On any ACL, diff --git a/docs/_components/storage-quickstart.rst b/docs/_components/storage-quickstart.rst index 563055998d7c..2b0487ce18b5 100644 --- a/docs/_components/storage-quickstart.rst +++ b/docs/_components/storage-quickstart.rst @@ -53,22 +53,22 @@ and instantiating the demo connection:: >>> connection = demo.get_connection() Once you have the connection, -you can create buckets and keys:: +you can create buckets and blobs:: >>> connection.get_all_buckets() [, ...] >>> bucket = connection.create_bucket('my-new-bucket') >>> print bucket - >>> key = bucket.new_key('my-test-file.txt') - >>> print key - - >>> key = key.set_contents_from_string('this is test content!') - >>> print key.get_contents_as_string() + >>> blob = bucket.new_blob('my-test-file.txt') + >>> print blob + + >>> blob = blob.set_contents_from_string('this is test content!') + >>> print blob.get_contents_as_string() 'this is test content!' - >>> print bucket.get_all_keys() - [] - >>> key.delete() + >>> print bucket.get_all_blobs() + [] + >>> blob.delete() >>> bucket.delete() .. note:: diff --git a/docs/index.rst b/docs/index.rst index a5b3618a5022..e6d7008f8998 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,8 +10,8 @@ datastore-transactions datastore-batches storage-api + storage-blobs storage-buckets - storage-keys storage-acl @@ -48,5 +48,5 @@ Cloud Storage from gcloud import storage bucket = storage.get_bucket('', '') - key = bucket.new_key('my-test-file.txt') - key = key.upload_contents_from_string('this is test content!') + blob = bucket.new_blob('my-test-file.txt') + blob = blob.upload_contents_from_string('this is test content!') diff --git a/docs/storage-api.rst b/docs/storage-api.rst index f6849f56862c..edf609844b22 100644 --- a/docs/storage-api.rst +++ b/docs/storage-api.rst @@ -1,6 +1,6 @@ .. toctree:: :maxdepth: 0 - :hidden: + :hidden: Storage ------- diff --git a/docs/storage-blobs.rst b/docs/storage-blobs.rst new file mode 100644 index 000000000000..384806d6e3d8 --- /dev/null +++ b/docs/storage-blobs.rst @@ -0,0 +1,7 @@ +Blobs / Objects +~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.storage.blob + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/storage-keys.rst b/docs/storage-keys.rst deleted file mode 100644 index 3c75de8175f2..000000000000 --- a/docs/storage-keys.rst +++ /dev/null @@ -1,7 +0,0 @@ -Keys -~~~~ - -.. automodule:: gcloud.storage.key - :members: - :undoc-members: - :show-inheritance: \ No newline at end of file diff --git a/gcloud/storage/__init__.py b/gcloud/storage/__init__.py index c91b0bc1c9ab..7fcafe044e9b 100644 --- a/gcloud/storage/__init__.py +++ b/gcloud/storage/__init__.py @@ -19,9 +19,9 @@ >>> import gcloud.storage >>> bucket = gcloud.storage.get_bucket('bucket-id-here', 'project-id') >>> # Then do other things... ->>> key = bucket.get_key('/remote/path/to/file.txt') ->>> print key.get_contents_as_string() ->>> key.set_contents_from_string('New contents!') +>>> blob = bucket.get_blob('/remote/path/to/file.txt') +>>> print blob.get_contents_as_string() +>>> blob.set_contents_from_string('New contents!') >>> bucket.upload_file('/remote/path/storage.txt', '/local/path.txt') The main concepts with this API are: @@ -32,7 +32,7 @@ - :class:`gcloud.storage.bucket.Bucket` which represents a particular bucket (akin to a mounted disk on a computer). -- :class:`gcloud.storage.key.Key` which represents a pointer to a +- :class:`gcloud.storage.blob.Blob` which represents a pointer to a particular entity in Cloud Storage (akin to a file path on a remote machine). """ diff --git a/gcloud/storage/_helpers.py b/gcloud/storage/_helpers.py index cdf080aa69d2..b9e902deb526 100644 --- a/gcloud/storage/_helpers.py +++ b/gcloud/storage/_helpers.py @@ -79,11 +79,11 @@ def batch(self): ... bucket.enable_versioning() ... bucket.disable_website() - or for a key:: + or for a blob:: - >>> with key.batch: - ... key.content_type = 'image/jpeg' - ... key.content_encoding = 'gzip' + >>> with blob.batch: + ... blob.content_type = 'image/jpeg' + ... blob.content_encoding = 'gzip' Updates will be aggregated and sent as a single call to :meth:`_patch_properties` IFF the ``with`` block exits without diff --git a/gcloud/storage/acl.py b/gcloud/storage/acl.py index f6671e524c98..28de64ed2f45 100644 --- a/gcloud/storage/acl.py +++ b/gcloud/storage/acl.py @@ -491,15 +491,15 @@ class DefaultObjectACL(BucketACL): class ObjectACL(ACL): - """An ACL specifically for a key.""" + """An ACL specifically for a Cloud Storage object / blob. - def __init__(self, key): - """ - :type key: :class:`gcloud.storage.key.Key` - :param key: The key that this ACL corresponds to. - """ + :type blob: :class:`gcloud.storage.blob.Blob` + :param blob: The blob that this ACL corresponds to. + """ + + def __init__(self, blob): super(ObjectACL, self).__init__() - self.key = key + self.blob = blob def reload(self): """Reload the ACL data from Cloud Storage. @@ -509,8 +509,8 @@ def reload(self): """ self.entities.clear() - url_path = '%s/acl' % self.key.path - found = self.key.connection.api_request(method='GET', path=url_path) + url_path = '%s/acl' % self.blob.path + found = self.blob.connection.api_request(method='GET', path=url_path) self.loaded = True for entry in found['items']: self.add_entity(self.entity_from_dict(entry)) @@ -518,7 +518,7 @@ def reload(self): return self def save(self, acl=None): - """Save the ACL data for this key. + """Save the ACL data for this blob. :type acl: :class:`gcloud.storage.acl.ACL` :param acl: The ACL object to save. If left blank, this will @@ -531,8 +531,8 @@ def save(self, acl=None): save_to_backend = True if save_to_backend: - result = self.key.connection.api_request( - method='PATCH', path=self.key.path, data={'acl': list(acl)}, + result = self.blob.connection.api_request( + method='PATCH', path=self.blob.path, data={'acl': list(acl)}, query_params={'projection': 'full'}) self.entities.clear() for entry in result['acl']: @@ -542,11 +542,11 @@ def save(self, acl=None): return self def clear(self): - """Remove all ACL rules from the key. + """Remove all ACL rules from the blob. Note that this won't actually remove *ALL* the rules, but it will remove all the non-default rules. In short, you'll still - have access to a key that you created even after you clear ACL + have access to a blob that you created even after you clear ACL rules with this method. """ return self.save([]) diff --git a/gcloud/storage/key.py b/gcloud/storage/blob.py similarity index 84% rename from gcloud/storage/key.py rename to gcloud/storage/blob.py index 7f49887819e9..9103f77f3606 100644 --- a/gcloud/storage/key.py +++ b/gcloud/storage/blob.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Create / interact with gcloud storage keys.""" +"""Create / interact with Google Cloud Storage blobs.""" import copy import mimetypes @@ -30,7 +30,7 @@ from gcloud.storage.acl import ObjectACL -class Key(_PropertyMixin): +class Blob(_PropertyMixin): """A wrapper around Cloud Storage's concept of an ``Object``.""" CUSTOM_PROPERTY_ACCESSORS = { @@ -65,19 +65,19 @@ class Key(_PropertyMixin): _acl = None def __init__(self, bucket=None, name=None, properties=None): - """Key constructor. + """Blob constructor. :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket to which this key belongs. + :param bucket: The bucket to which this blob belongs. :type name: string - :param name: The name of the key. This corresponds to the + :param name: The name of the blob. This corresponds to the unique path of the object in the bucket. :type properties: dict :param properties: All the other data provided by Cloud Storage. """ - super(Key, self).__init__(name=name, properties=properties) + super(Blob, self).__init__(name=name, properties=properties) self.bucket = bucket @property @@ -88,22 +88,22 @@ def acl(self): return self._acl @classmethod - def from_dict(cls, key_dict, bucket=None): - """Instantiate a :class:`Key` from data returned by the JSON API. + def from_dict(cls, blob_dict, bucket=None): + """Instantiate a :class:`Blob` from data returned by the JSON API. - :type key_dict: dict - :param key_dict: A dictionary of data returned from getting an - Cloud Storage object. + :type blob_dict: dict + :param blob_dict: A dictionary of data returned from getting an + Cloud Storage object. :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket to which this key belongs (and by + :param bucket: The bucket to which this blob belongs (and by proxy, which connection to use). - :rtype: :class:`Key` - :returns: A key based on the data provided. + :rtype: :class:`Blob` + :returns: A blob based on the data provided. """ - return cls(bucket=bucket, name=key_dict['name'], properties=key_dict) + return cls(bucket=bucket, name=blob_dict['name'], properties=blob_dict) def __repr__(self): if self.bucket: @@ -111,11 +111,11 @@ def __repr__(self): else: bucket_name = None - return '' % (bucket_name, self.name) + return '' % (bucket_name, self.name) @property def connection(self): - """Getter property for the connection to use with this Key. + """Getter property for the connection to use with this Blob. :rtype: :class:`gcloud.storage.connection.Connection` or None :returns: The connection to use, or None if no connection is set. @@ -125,24 +125,24 @@ def connection(self): @property def path(self): - """Getter property for the URL path to this Key. + """Getter property for the URL path to this Blob. :rtype: string - :returns: The URL path to this Key. + :returns: The URL path to this Blob. """ if not self.bucket: raise ValueError('Cannot determine path without a bucket defined.') elif not self.name: - raise ValueError('Cannot determine path without a key name.') + raise ValueError('Cannot determine path without a blob name.') return self.bucket.path + '/o/' + urllib.quote(self.name, safe='') @property def public_url(self): - """The public URL for this key's object. + """The public URL for this blob's object. :rtype: `string` - :returns: The public URL for this key. + :returns: The public URL for this blob. """ return '{storage_base_url}/{bucket_name}/{quoted_name}'.format( storage_base_url='http://commondatastorage.googleapis.com', @@ -150,14 +150,14 @@ def public_url(self): quoted_name=urllib.quote(self.name, safe='')) def generate_signed_url(self, expiration, method='GET'): - """Generates a signed URL for this key. + """Generates a signed URL for this blob. - If you have a key that you want to allow access to for a set + If you have a blob that you want to allow access to for a set amount of time, you can use this method to generate a URL that is only valid within a certain time period. This is particularly useful if you don't want publicly - accessible keys, but don't want to require users to explicitly + accessible blobs, but don't want to require users to explicitly log in. :type expiration: int, long, datetime.datetime, datetime.timedelta @@ -178,50 +178,50 @@ def generate_signed_url(self, expiration, method='GET'): method=method) def exists(self): - """Determines whether or not this key exists. + """Determines whether or not this blob exists. :rtype: boolean - :returns: True if the key exists in Cloud Storage. + :returns: True if the blob exists in Cloud Storage. """ - return self.bucket.get_key(self.name) is not None + return self.bucket.get_blob(self.name) is not None def rename(self, new_name): - """Renames this key using copy and delete operations. + """Renames this blob using copy and delete operations. - Effectively, copies key to the same bucket with a new name, then - deletes the key. + Effectively, copies blob to the same bucket with a new name, then + deletes the blob. .. warning:: This method will first duplicate the data and then delete the - old key. This means that with very large objects renaming + old blob. This means that with very large objects renaming could be a very (temporarily) costly or a very slow operation. :type new_name: string - :param new_name: The new name for this key. + :param new_name: The new name for this blob. - :rtype: :class:`Key` - :returns: The newly-copied key. + :rtype: :class:`Blob` + :returns: The newly-copied blob. """ - new_key = self.bucket.copy_key(self, self.bucket, new_name) - self.bucket.delete_key(self) - return new_key + new_blob = self.bucket.copy_blob(self, self.bucket, new_name) + self.bucket.delete_blob(self) + return new_blob def delete(self): - """Deletes a key from Cloud Storage. + """Deletes a blob from Cloud Storage. - :rtype: :class:`Key` - :returns: The key that was just deleted. + :rtype: :class:`Blob` + :returns: The blob that was just deleted. :raises: :class:`gcloud.storage.exceptions.NotFound` (propagated from - :meth:`gcloud.storage.bucket.Bucket.delete_key`). + :meth:`gcloud.storage.bucket.Bucket.delete_blob`). """ - return self.bucket.delete_key(self) + return self.bucket.delete_blob(self) def download_to_file(self, file_obj): - """Download the contents of this key into a file-like object. + """Download the contents of this blob into a file-like object. :type file_obj: file - :param file_obj: A file handle to which to write the key's data. + :param file_obj: A file handle to which to write the blob's data. :raises: :class:`gcloud.storage.exceptions.NotFound` """ @@ -246,7 +246,7 @@ def download_to_file(self, file_obj): get_contents_to_file = download_to_file def download_to_filename(self, filename): - """Download the contents of this key into a named file. + """Download the contents of this blob into a named file. :type filename: string :param filename: A filename to be passed to ``open``. @@ -267,10 +267,10 @@ def download_to_filename(self, filename): get_contents_to_filename = download_to_filename def download_as_string(self): - """Download the contents of this key as a string. + """Download the contents of this blob as a string. :rtype: string - :returns: The data stored in this key. + :returns: The data stored in this blob. :raises: :class:`gcloud.storage.exceptions.NotFound` """ string_buffer = StringIO() @@ -282,11 +282,11 @@ def download_as_string(self): def upload_from_file(self, file_obj, rewind=False, size=None, content_type=None, num_retries=6): - """Upload the contents of this key from a file-like object. + """Upload the contents of this blob from a file-like object. .. note:: - The effect of uploading to an existing key depends on the - "versioning" and "lifecycle" policies defined on the key's + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. @@ -356,11 +356,11 @@ def upload_from_file(self, file_obj, rewind=False, size=None, set_contents_from_file = upload_from_file def upload_from_filename(self, filename): - """Upload this key's contents from the content of f named file. + """Upload this blob's contents from the content of a named file. .. note:: - The effect of uploading to an existing key depends on the - "versioning" and "lifecycle" policies defined on the key's + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. @@ -381,11 +381,11 @@ def upload_from_filename(self, filename): set_contents_from_filename = upload_from_filename def upload_from_string(self, data, content_type='text/plain'): - """Upload contents of this key from the provided string. + """Upload contents of this blob from the provided string. .. note:: - The effect of uploading to an existing key depends on the - "versioning" and "lifecycle" policies defined on the key's + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's bucket. In the absence of those policies, upload will overwrite any existing contents. @@ -395,10 +395,10 @@ def upload_from_string(self, data, content_type='text/plain'): API documents for details. :type data: string - :param data: The data to store in this key. + :param data: The data to store in this blob. - :rtype: :class:`Key` - :returns: The updated Key object. + :rtype: :class:`Blob` + :returns: The updated Blob object. """ string_buffer = StringIO() string_buffer.write(data) @@ -411,7 +411,7 @@ def upload_from_string(self, data, content_type='text/plain'): set_contents_from_string = upload_from_string def make_public(self): - """Make this key public giving all users read access. + """Make this blob public giving all users read access. :returns: The current object. """ diff --git a/gcloud/storage/bucket.py b/gcloud/storage/bucket.py index 021ee8c47c33..bbc8ca197d05 100644 --- a/gcloud/storage/bucket.py +++ b/gcloud/storage/bucket.py @@ -22,35 +22,35 @@ from gcloud.storage.acl import BucketACL from gcloud.storage.acl import DefaultObjectACL from gcloud.storage.iterator import Iterator -from gcloud.storage.key import Key +from gcloud.storage.blob import Blob import six -class _KeyIterator(Iterator): - """An iterator listing keys in a bucket +class _BlobIterator(Iterator): + """An iterator listing blobs in a bucket You shouldn't have to use this directly, but instead should use the - helper methods on :class:`gcloud.storage.key.Bucket` objects. + helper methods on :class:`gcloud.storage.blob.Bucket` objects. :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket from which to list keys. + :param bucket: The bucket from which to list blobs. """ def __init__(self, bucket, extra_params=None): self.bucket = bucket self.prefixes = () - super(_KeyIterator, self).__init__( + super(_BlobIterator, self).__init__( connection=bucket.connection, path=bucket.path + '/o', extra_params=extra_params) def get_items_from_response(self, response): - """Yield :class:`.storage.key.Key` items from response. + """Yield :class:`.storage.blob.Blob` items from response. :type response: dict - :param response: The JSON API response for a page of keys. + :param response: The JSON API response for a page of blobs. """ self.prefixes = tuple(response.get('prefixes', ())) for item in response.get('items', []): - yield Key.from_dict(item, bucket=self.bucket) + yield Blob.from_dict(item, bucket=self.bucket) class Bucket(_PropertyMixin): @@ -62,7 +62,7 @@ class Bucket(_PropertyMixin): :type name: string :param name: The name of the bucket. """ - _iterator_class = _KeyIterator + _iterator_class = _BlobIterator CUSTOM_PROPERTY_ACCESSORS = { 'acl': 'acl', @@ -110,8 +110,8 @@ def __repr__(self): def __iter__(self): return iter(self._iterator_class(bucket=self)) - def __contains__(self, key): - return self.get_key(key) is not None + def __contains__(self, blob): + return self.get_blob(blob) is not None @property def acl(self): @@ -144,68 +144,69 @@ def path(self): return '/b/' + self.name - def get_key(self, key): - """Get a key object by name. + def get_blob(self, blob): + """Get a blob object by name. - This will return None if the key doesn't exist:: + This will return None if the blob doesn't exist:: >>> from gcloud import storage >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') - >>> print bucket.get_key('/path/to/key.txt') - - >>> print bucket.get_key('/does-not-exist.txt') + >>> print bucket.get_blob('/path/to/blob.txt') + + >>> print bucket.get_blob('/does-not-exist.txt') None - :type key: string or :class:`gcloud.storage.key.Key` - :param key: The name of the key to retrieve. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: The name of the blob to retrieve. - :rtype: :class:`gcloud.storage.key.Key` or None - :returns: The key object if it exists, otherwise None. + :rtype: :class:`gcloud.storage.blob.Blob` or None + :returns: The blob object if it exists, otherwise None. """ - # Coerce this to a key object (either from a Key or a string). - key = self.new_key(key) + # Coerce this -- either from a Blob or a string. + blob = self.new_blob(blob) try: - response = self.connection.api_request(method='GET', path=key.path) - return Key.from_dict(response, bucket=self) + response = self.connection.api_request(method='GET', + path=blob.path) + return Blob.from_dict(response, bucket=self) except exceptions.NotFound: return None - def get_all_keys(self): - """List all the keys in this bucket. + def get_all_blobs(self): + """List all the blobs in this bucket. - This will **not** retrieve all the data for all the keys, it - will only retrieve the keys. + This will **not** retrieve all the data for all the blobs, it + will only retrieve the blob paths. This is equivalent to:: - keys = [key for key in bucket] + blobs = [blob for blob in bucket] - :rtype: list of :class:`gcloud.storage.key.Key` - :returns: A list of all the Key objects in this bucket. + :rtype: list of :class:`gcloud.storage.blob.Blob` + :returns: A list of all the Blob objects in this bucket. """ return list(self) def iterator(self, prefix=None, delimiter=None, max_results=None, versions=None): - """Return an iterator used to find keys in the bucket. + """Return an iterator used to find blobs in the bucket. :type prefix: string or None - :param prefix: optional prefix used to filter keys. + :param prefix: optional prefix used to filter blobs. :type delimiter: string or None :param delimiter: optional delimter, used with ``prefix`` to emulate hierarchy. :type max_results: integer or None - :param max_results: maximum number of keys to return. + :param max_results: maximum number of blobs to return. :type versions: boolean or None :param versions: whether object versions should be returned as - separate keys. + separate blobs. - :rtype: :class:`_KeyIterator` + :rtype: :class:`_BlobIterator` """ extra_params = {} @@ -223,26 +224,26 @@ def iterator(self, prefix=None, delimiter=None, max_results=None, return self._iterator_class(self, extra_params=extra_params) - def new_key(self, key): - """Given path name (or Key), return a :class:`.storage.key.Key` object. + def new_blob(self, blob): + """Given path name (or Blob), return a :class:`Blob` object. - This is really useful when you're not sure if you have a Key - object or a string path name. Given either of those types, this - returns the corresponding Key object. + This is really useful when you're not sure if you have a ``Blob`` + instance or a string path name. Given either of those types, this + returns the corresponding ``Blob``. - :type key: string or :class:`gcloud.storage.key.Key` - :param key: A path name or actual key object. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: A path name or actual blob object. - :rtype: :class:`gcloud.storage.key.Key` - :returns: A Key object with the path provided. + :rtype: :class:`gcloud.storage.blob.Blob` + :returns: A Blob object with the path provided. """ - if isinstance(key, Key): - return key + if isinstance(blob, Blob): + return blob - if isinstance(key, six.string_types): - return Key(bucket=self, name=key) + if isinstance(blob, six.string_types): + return Blob(bucket=self, name=blob) - raise TypeError('Invalid key: %s' % key) + raise TypeError('Invalid blob: %s' % blob) def delete(self, force=False): """Delete this bucket. @@ -253,23 +254,23 @@ def delete(self, force=False): not empty, this will raise an Exception. If you want to delete a non-empty bucket you can pass in a force - parameter set to true. This will iterate through the bucket's - keys and delete the related objects, before deleting the bucket. + parameter set to ``True``. This will iterate through and delete the + bucket's objects, before deleting the bucket. :type force: boolean - :param full: If True, empties the bucket's objects then deletes it. + :param force: If True, empties the bucket's objects then deletes it. :raises: :class:`gcloud.storage.exceptions.NotFound` if the bucket does not exist, or :class:`gcloud.storage.exceptions.Conflict` if the - bucket has keys and `force` is not passed. + bucket has blobs and `force` is not passed. """ return self.connection.delete_bucket(self.name, force=force) - def delete_key(self, key): - """Deletes a key from the current bucket. + def delete_blob(self, blob): + """Deletes a blob from the current bucket. - If the key isn't found, raise a + If the blob isn't found, raise a :class:`gcloud.storage.exceptions.NotFound`. For example:: @@ -278,79 +279,79 @@ def delete_key(self, key): >>> from gcloud.storage import exceptions >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') - >>> print bucket.get_all_keys() - [] - >>> bucket.delete_key('my-file.txt') + >>> print bucket.get_all_blobs() + [] + >>> bucket.delete_blob('my-file.txt') >>> try: - ... bucket.delete_key('doesnt-exist') + ... bucket.delete_blob('doesnt-exist') ... except exceptions.NotFound: ... pass - :type key: string or :class:`gcloud.storage.key.Key` - :param key: A key name or Key object to delete. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: A blob name or Blob object to delete. - :rtype: :class:`gcloud.storage.key.Key` - :returns: The key that was just deleted. + :rtype: :class:`gcloud.storage.blob.Blob` + :returns: The blob that was just deleted. :raises: :class:`gcloud.storage.exceptions.NotFound` (to suppress - the exception, call ``delete_keys``, passing a no-op + the exception, call ``delete_blobs``, passing a no-op ``on_error`` callback, e.g.:: - >>> bucket.delete_keys([key], on_error=lambda key: pass) + >>> bucket.delete_blobs([blob], on_error=lambda blob: pass) """ - key = self.new_key(key) - self.connection.api_request(method='DELETE', path=key.path) - return key + blob = self.new_blob(blob) + self.connection.api_request(method='DELETE', path=blob.path) + return blob - def delete_keys(self, keys, on_error=None): - """Deletes a list of keys from the current bucket. + def delete_blobs(self, blobs, on_error=None): + """Deletes a list of blobs from the current bucket. - Uses :func:`Bucket.delete_key` to delete each individual key. + Uses :func:`Bucket.delete_blob` to delete each individual blob. - :type keys: list of string or :class:`gcloud.storage.key.Key` - :param keys: A list of key names or Key objects to delete. + :type blobs: list of string or :class:`gcloud.storage.blob.Blob` + :param blobs: A list of blob names or Blob objects to delete. - :type on_error: a callable taking (key) - :param on_error: If not ``None``, called once for each key raising + :type on_error: a callable taking (blob) + :param on_error: If not ``None``, called once for each blob raising :class:`gcloud.storage.exceptions.NotFound`; otherwise, the exception is propagated. :raises: :class:`gcloud.storage.exceptions.NotFound` (if `on_error` is not passed). """ - for key in keys: + for blob in blobs: try: - self.delete_key(key) + self.delete_blob(blob) except exceptions.NotFound: if on_error is not None: - on_error(key) + on_error(blob) else: raise - def copy_key(self, key, destination_bucket, new_name=None): - """Copy the given key to the given bucket, optionally with a new name. + def copy_blob(self, blob, destination_bucket, new_name=None): + """Copy the given blob to the given bucket, optionally with a new name. - :type key: string or :class:`gcloud.storage.key.Key` - :param key: The key to be copied. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: The blob to be copied. :type destination_bucket: :class:`gcloud.storage.bucket.Bucket` - :param destination_bucket: The bucket into which the key should be + :param destination_bucket: The bucket into which the blob should be copied. :type new_name: string :param new_name: (optional) the new name for the copied file. - :rtype: :class:`gcloud.storage.key.Key` - :returns: The new Key. + :rtype: :class:`gcloud.storage.blob.Blob` + :returns: The new Blob. """ if new_name is None: - new_name = key.name - new_key = destination_bucket.new_key(new_name) - api_path = key.path + '/copyTo' + new_key.path + new_name = blob.name + new_blob = destination_bucket.new_blob(new_name) + api_path = blob.path + '/copyTo' + new_blob.path self.connection.api_request(method='POST', path=api_path) - return new_key + return new_blob - def upload_file(self, filename, key=None): + def upload_file(self, filename, blob=None): """Shortcut method to upload a file into this bucket. Use this method to quickly put a local file in Cloud Storage. @@ -361,36 +362,36 @@ def upload_file(self, filename, key=None): >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') >>> bucket.upload_file('~/my-file.txt', 'remote-text-file.txt') - >>> print bucket.get_all_keys() - [] + >>> print bucket.get_all_blobs() + [] - If you don't provide a key value, we will try to upload the file - using the local filename as the key (**not** the complete + If you don't provide a blob value, we will try to upload the file + using the local filename as the blob (**not** the complete path):: >>> from gcloud import storage >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') >>> bucket.upload_file('~/my-file.txt') - >>> print bucket.get_all_keys() - [] + >>> print bucket.get_all_blobs() + [] :type filename: string :param filename: Local path to the file you want to upload. - :type key: string or :class:`gcloud.storage.key.Key` - :param key: The key (either an object or a remote path) of where - to put the file. If this is blank, we will try to - upload the file to the root of the bucket with the - same name as on your local file system. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: The blob (either an object or a remote path) of where + to put the file. If this is blank, we will try to + upload the file to the root of the bucket with the + same name as on your local file system. """ - if key is None: - key = os.path.basename(filename) - key = self.new_key(key) - key.upload_from_filename(filename) - return key + if blob is None: + blob = os.path.basename(filename) + blob = self.new_blob(blob) + blob.upload_from_filename(filename) + return blob - def upload_file_object(self, file_obj, key=None): + def upload_file_object(self, file_obj, blob=None): """Shortcut method to upload a file object into this bucket. Use this method to quickly put a local file in Cloud Storage. @@ -401,34 +402,34 @@ def upload_file_object(self, file_obj, key=None): >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') >>> bucket.upload_file(open('~/my-file.txt'), 'remote-text-file.txt') - >>> print bucket.get_all_keys() - [] + >>> print bucket.get_all_blobs() + [] - If you don't provide a key value, we will try to upload the file - using the local filename as the key (**not** the complete + If you don't provide a blob value, we will try to upload the file + using the local filename as the blob (**not** the complete path):: >>> from gcloud import storage >>> connection = storage.get_connection(project) >>> bucket = connection.get_bucket('my-bucket') >>> bucket.upload_file(open('~/my-file.txt')) - >>> print bucket.get_all_keys() - [] + >>> print bucket.get_all_blobs() + [] :type file_obj: file :param file_obj: A file handle open for reading. - :type key: string or :class:`gcloud.storage.key.Key` - :param key: The key (either an object or a remote path) of where - to put the file. If this is blank, we will try to - upload the file to the root of the bucket with the - same name as on your local file system. + :type blob: string or :class:`gcloud.storage.blob.Blob` + :param blob: The blob (either an object or a remote path) of where + to put the file. If this is blank, we will try to + upload the file to the root of the bucket with the + same name as on your local file system. """ - if key: - key = self.new_key(key) + if blob: + blob = self.new_blob(blob) else: - key = self.new_key(os.path.basename(file_obj.name)) - return key.upload_from_file(file_obj) + blob = self.new_blob(os.path.basename(file_obj.name)) + return blob.upload_from_file(file_obj) def get_cors(self): """Retrieve CORS policies configured for this bucket. @@ -652,7 +653,7 @@ def configure_website(self, main_page_suffix=None, not_found_page=None): (and to do that, you need to get approved somehow...). If you want this bucket to host a website, just provide the name - of an index page and a page to use when a key isn't found:: + of an index page and a page to use when a blob isn't found:: >>> from gcloud import storage >>> connection = storage.get_connection(project) @@ -695,7 +696,7 @@ def make_public(self, recursive=False, future=False): """Make a bucket public. :type recursive: boolean - :param recursive: If True, this will make all keys inside the bucket + :param recursive: If True, this will make all blobs inside the bucket public as well. :type future: boolean @@ -711,6 +712,6 @@ def make_public(self, recursive=False, future=False): doa.save() if recursive: - for key in self: - key.acl.all().grant_read() - key.save_acl() + for blob in self: + blob.acl.all().grant_read() + blob.save_acl() diff --git a/gcloud/storage/connection.py b/gcloud/storage/connection.py index 5abe7f57ef65..9820f54538e7 100644 --- a/gcloud/storage/connection.py +++ b/gcloud/storage/connection.py @@ -333,7 +333,7 @@ def api_request(self, method, path, query_params=None, def get_all_buckets(self): """Get all buckets in the project. - This will not populate the list of keys available in each + This will not populate the list of blobs available in each bucket. You can also iterate over the connection object, so these two @@ -469,14 +469,14 @@ def delete_bucket(self, bucket, force=False): :raises: :class:`gcloud.storage.exceptions.NotFound` if the bucket doesn't exist, or :class:`gcloud.storage.exceptions.Conflict` if the - bucket has keys and `force` is not passed. + bucket has blobs and `force` is not passed. """ bucket = self.new_bucket(bucket) # This force delete operation is slow. if force: - for key in bucket: - key.delete() + for blob in bucket: + blob.delete() self.api_request(method='DELETE', path=bucket.path) return True @@ -513,7 +513,7 @@ def generate_signed_url(self, resource, expiration, :type resource: string :param resource: A pointer to a specific resource - (typically, ``/bucket-name/path/to/key.txt``). + (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: int, long, datetime.datetime, datetime.timedelta :param expiration: When the signed URL should expire. diff --git a/gcloud/storage/demo/demo.py b/gcloud/storage/demo/demo.py index 541a0cc9be1d..45e81b2fd45a 100644 --- a/gcloud/storage/demo/demo.py +++ b/gcloud/storage/demo/demo.py @@ -35,17 +35,17 @@ # Let's look at all of the buckets again... print(connection.get_all_buckets()) -# How about we create a new key inside this bucket. -key = bucket.new_key("my-new-file.txt") +# How about we create a new blob inside this bucket. +blob = bucket.new_blob("my-new-file.txt") # Now let's put some data in there. -key.set_contents_from_string("this is some data!") +blob.set_contents_from_string("this is some data!") # ... and we can read that data back again. -print(key.get_contents_as_string()) +print(blob.get_contents_as_string()) -# Now let's delete that key. -print(key.delete()) +# Now let's delete that blob. +print(blob.delete()) # And now that we're done, let's delete that bucket... print(bucket.delete()) diff --git a/gcloud/storage/iterator.py b/gcloud/storage/iterator.py index f59e569c3869..a1c68e32b1b3 100644 --- a/gcloud/storage/iterator.py +++ b/gcloud/storage/iterator.py @@ -126,7 +126,7 @@ def get_items_from_response(self, response): accept the API response of a request for the next page of items, and return a list (or other iterable) of items. - Typically this method will construct a Bucket or a Key from the + Typically this method will construct a Bucket or a Blob from the page of results in the response. :type response: dict diff --git a/gcloud/storage/test_acl.py b/gcloud/storage/test_acl.py index b0ad5d1dd0b4..0028f853019b 100644 --- a/gcloud/storage/test_acl.py +++ b/gcloud/storage/test_acl.py @@ -384,7 +384,7 @@ def test_add_entity_hit(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' - KEY = '%s-%s' % (TYPE, ID) + ENTITY_VAL = '%s-%s' % (TYPE, ID) ROLE = 'role' entity = _ACLEntity(TYPE, ID) entity.grant(ROLE) @@ -393,8 +393,8 @@ def test_add_entity_hit(self): before = acl.entity(TYPE, ID) acl.add_entity(entity) self.assertTrue(acl.loaded) - self.assertFalse(acl.get_entity(KEY) is before) - self.assertTrue(acl.get_entity(KEY) is entity) + self.assertFalse(acl.get_entity(ENTITY_VAL) is before) + self.assertTrue(acl.get_entity(ENTITY_VAL) is entity) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) @@ -659,38 +659,38 @@ def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor(self): - key = object() - acl = self._makeOne(key) + blob = object() + acl = self._makeOne(blob) self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) - self.assertTrue(acl.key is key) + self.assertTrue(acl.blob is blob) def test_reload_eager_empty(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' ROLE = 'role' after = {'items': [{'entity': 'allUsers', 'role': ROLE}]} connection = _Connection(after) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) acl.loaded = True self.assertTrue(acl.reload() is acl) self.assertEqual(list(acl), after['items']) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/b/name/o/%s/acl' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s/acl' % BLOB_NAME) def test_reload_eager_nonempty(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' ROLE = 'role' after = {'items': []} connection = _Connection(after) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) acl.loaded = True acl.entity('allUsers', ROLE) self.assertTrue(acl.reload() is acl) @@ -698,57 +698,57 @@ def test_reload_eager_nonempty(self): def test_reload_lazy(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' ROLE = 'role' after = {'items': [{'entity': 'allUsers', 'role': ROLE}]} connection = _Connection(after) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) self.assertTrue(acl.reload() is acl) self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}]) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/b/name/o/%s/acl' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s/acl' % BLOB_NAME) def test_save_none_set_none_passed(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) self.assertTrue(acl.save() is acl) kw = connection._requested self.assertEqual(len(kw), 0) def test_save_existing_set_none_passed(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection({'foo': 'Foo', 'acl': []}) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) acl.loaded = True self.assertTrue(acl.save() is acl) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_save_existing_set_new_passed(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' ROLE = 'role' new_acl = [{'entity': 'allUsers', 'role': ROLE}] connection = _Connection({'foo': 'Foo', 'acl': new_acl}) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) acl.loaded = True acl.entity('allUsers', 'other-role') self.assertTrue(acl.save(new_acl) is acl) @@ -756,18 +756,18 @@ def test_save_existing_set_new_passed(self): kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'acl': new_acl}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_clear(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' ROLE = 'role' connection = _Connection({'foo': 'Foo', 'acl': []}) bucket = _Bucket(connection, NAME) - key = _Key(bucket, KEY) - acl = self._makeOne(key) + blob = _Blob(bucket, BLOB_NAME) + acl = self._makeOne(blob) acl.loaded = True acl.entity('allUsers', ROLE) self.assertTrue(acl.clear() is acl) @@ -775,16 +775,16 @@ def test_clear(self): kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) -class _Key(object): +class _Blob(object): - def __init__(self, bucket, key): + def __init__(self, bucket, blob): self.bucket = bucket - self.key = key + self.blob = blob @property def connection(self): @@ -792,7 +792,7 @@ def connection(self): @property def path(self): - return '%s/o/%s' % (self.bucket.path, self.key) + return '%s/o/%s' % (self.bucket.path, self.blob) class _Bucket(object): diff --git a/gcloud/storage/test_key.py b/gcloud/storage/test_blob.py similarity index 67% rename from gcloud/storage/test_key.py rename to gcloud/storage/test_blob.py index 466a529435ad..94b8faab24b2 100644 --- a/gcloud/storage/test_key.py +++ b/gcloud/storage/test_blob.py @@ -15,187 +15,190 @@ import unittest2 -class Test_Key(unittest2.TestCase): +class Test_Blob(unittest2.TestCase): def _getTargetClass(self): - from gcloud.storage.key import Key - return Key + from gcloud.storage.blob import Blob + return Blob def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor_defaults(self): - key = self._makeOne() - self.assertEqual(key.bucket, None) - self.assertEqual(key.connection, None) - self.assertEqual(key.name, None) - self.assertEqual(key._properties, {}) - self.assertTrue(key._acl is None) + blob = self._makeOne() + self.assertEqual(blob.bucket, None) + self.assertEqual(blob.connection, None) + self.assertEqual(blob.name, None) + self.assertEqual(blob._properties, {}) + self.assertTrue(blob._acl is None) def test_ctor_explicit(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) properties = {'key': 'value'} - key = self._makeOne(bucket, KEY, properties) - self.assertTrue(key.bucket is bucket) - self.assertTrue(key.connection is connection) - self.assertEqual(key.name, KEY) - self.assertEqual(key.properties, properties) - self.assertTrue(key._acl is None) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertTrue(blob.bucket is bucket) + self.assertTrue(blob.connection is connection) + self.assertEqual(blob.name, BLOB_NAME) + self.assertEqual(blob.properties, properties) + self.assertTrue(blob._acl is None) def test_from_dict_defaults(self): - KEY = 'key' - properties = {'key': 'value', 'name': KEY} + BLOB_NAME = 'blob-name' + properties = {'key': 'value', 'name': BLOB_NAME} klass = self._getTargetClass() - key = klass.from_dict(properties) - self.assertEqual(key.bucket, None) - self.assertEqual(key.connection, None) - self.assertEqual(key.name, KEY) - self.assertEqual(key.properties, properties) - self.assertTrue(key._acl is None) + blob = klass.from_dict(properties) + self.assertEqual(blob.bucket, None) + self.assertEqual(blob.connection, None) + self.assertEqual(blob.name, BLOB_NAME) + self.assertEqual(blob.properties, properties) + self.assertTrue(blob._acl is None) def test_from_dict_explicit(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) - properties = {'key': 'value', 'name': KEY} + properties = {'key': 'value', 'name': BLOB_NAME} klass = self._getTargetClass() - key = klass.from_dict(properties, bucket) - self.assertTrue(key.bucket is bucket) - self.assertTrue(key.connection is connection) - self.assertEqual(key.name, KEY) - self.assertEqual(key.properties, properties) - self.assertTrue(key._acl is None) + blob = klass.from_dict(properties, bucket) + self.assertTrue(blob.bucket is bucket) + self.assertTrue(blob.connection is connection) + self.assertEqual(blob.name, BLOB_NAME) + self.assertEqual(blob.properties, properties) + self.assertTrue(blob._acl is None) def test_acl_property(self): from gcloud.storage.acl import ObjectACL - key = self._makeOne() - acl = key.acl + blob = self._makeOne() + acl = blob.acl self.assertTrue(isinstance(acl, ObjectACL)) - self.assertTrue(acl is key._acl) + self.assertTrue(acl is blob._acl) def test_path_no_bucket(self): - key = self._makeOne() - self.assertRaises(ValueError, getattr, key, 'path') + blob = self._makeOne() + self.assertRaises(ValueError, getattr, blob, 'path') def test_path_no_name(self): connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket) - self.assertRaises(ValueError, getattr, key, 'path') + blob = self._makeOne(bucket) + self.assertRaises(ValueError, getattr, blob, 'path') def test_path_normal(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.path, '/b/name/o/%s' % KEY) + blob = self._makeOne(bucket, BLOB_NAME) + self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME) def test_path_w_slash_in_name(self): - KEY = 'parent/child' + BLOB_NAME = 'parent/child' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.path, '/b/name/o/parent%2Fchild') + blob = self._makeOne(bucket, BLOB_NAME) + self.assertEqual(blob.path, '/b/name/o/parent%2Fchild') def test_public_url(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.public_url, + blob = self._makeOne(bucket, BLOB_NAME) + self.assertEqual(blob.public_url, 'http://commondatastorage.googleapis.com/name/%s' % - KEY) + BLOB_NAME) def test_public_url_w_slash_in_name(self): - KEY = 'parent/child' + BLOB_NAME = 'parent/child' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) + blob = self._makeOne(bucket, BLOB_NAME) self.assertEqual( - key.public_url, + blob.public_url, 'http://commondatastorage.googleapis.com/name/parent%2Fchild') def test_generate_signed_url_w_default_method(self): - KEY = 'key' + BLOB_NAME = 'blob-name' EXPIRATION = '2014-10-16T20:34:37Z' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.generate_signed_url(EXPIRATION), - 'http://example.com/abucket/akey?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37Z') + blob = self._makeOne(bucket, BLOB_NAME) + URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' + '&Expiration=2014-10-16T20:34:37Z') + self.assertEqual(blob.generate_signed_url(EXPIRATION), URI) + PATH = '/name/%s' % (BLOB_NAME,) self.assertEqual(connection._signed, - [('/name/key', EXPIRATION, {'method': 'GET'})]) + [(PATH, EXPIRATION, {'method': 'GET'})]) def test_generate_signed_url_w_slash_in_name(self): - KEY = 'parent/child' + BLOB_NAME = 'parent/child' EXPIRATION = '2014-10-16T20:34:37Z' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.generate_signed_url(EXPIRATION), - 'http://example.com/abucket/akey?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37Z') + blob = self._makeOne(bucket, BLOB_NAME) + URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' + '&Expiration=2014-10-16T20:34:37Z') + self.assertEqual(blob.generate_signed_url(EXPIRATION), URI) self.assertEqual(connection._signed, [('/name/parent%2Fchild', EXPIRATION, {'method': 'GET'})]) def test_generate_signed_url_w_explicit_method(self): - KEY = 'key' + BLOB_NAME = 'blob-name' EXPIRATION = '2014-10-16T20:34:37Z' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - self.assertEqual(key.generate_signed_url(EXPIRATION, method='POST'), - 'http://example.com/abucket/akey?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37Z') + blob = self._makeOne(bucket, BLOB_NAME) + URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' + '&Expiration=2014-10-16T20:34:37Z') + self.assertEqual(blob.generate_signed_url(EXPIRATION, method='POST'), + URI) + PATH = '/name/%s' % (BLOB_NAME,) self.assertEqual(connection._signed, - [('/name/key', EXPIRATION, {'method': 'POST'})]) + [(PATH, EXPIRATION, {'method': 'POST'})]) def test_exists_miss(self): NONESUCH = 'nonesuch' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, NONESUCH) - self.assertFalse(key.exists()) + blob = self._makeOne(bucket, NONESUCH) + self.assertFalse(blob.exists()) def test_exists_hit(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - bucket._keys[KEY] = 1 - self.assertTrue(key.exists()) + blob = self._makeOne(bucket, BLOB_NAME) + bucket._blobs[BLOB_NAME] = 1 + self.assertTrue(blob.exists()) def test_rename(self): - KEY = 'key' + BLOB_NAME = 'blob-name' NEW_NAME = 'new-name' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - bucket._keys[KEY] = 1 - new_key = key.rename(NEW_NAME) - self.assertEqual(key.name, KEY) - self.assertEqual(new_key.name, NEW_NAME) - self.assertFalse(KEY in bucket._keys) - self.assertTrue(KEY in bucket._deleted) - self.assertTrue(NEW_NAME in bucket._keys) + blob = self._makeOne(bucket, BLOB_NAME) + bucket._blobs[BLOB_NAME] = 1 + new_blob = blob.rename(NEW_NAME) + self.assertEqual(blob.name, BLOB_NAME) + self.assertEqual(new_blob.name, NEW_NAME) + self.assertFalse(BLOB_NAME in bucket._blobs) + self.assertTrue(BLOB_NAME in bucket._deleted) + self.assertTrue(NEW_NAME in bucket._blobs) def test_delete(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - bucket._keys[KEY] = 1 - key.delete() - self.assertFalse(key.exists()) + blob = self._makeOne(bucket, BLOB_NAME) + bucket._blobs[BLOB_NAME] = 1 + blob.delete() + self.assertFalse(blob.exists()) def test_download_to_file(self): import httplib from StringIO import StringIO - KEY = 'key' + BLOB_NAME = 'blob-name' chunk1_response = {'status': httplib.PARTIAL_CONTENT, 'content-range': 'bytes 0-2/6'} chunk2_response = {'status': httplib.OK, @@ -207,10 +210,10 @@ def test_download_to_file(self): bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' properties = {'mediaLink': MEDIA_LINK} - key = self._makeOne(bucket, KEY, properties) - key.CHUNK_SIZE = 3 + blob = self._makeOne(bucket, BLOB_NAME, properties) + blob.CHUNK_SIZE = 3 fh = StringIO() - key.download_to_file(fh) + blob.download_to_file(fh) self.assertEqual(fh.getvalue(), 'abcdef') def test_download_to_filename(self): @@ -219,7 +222,7 @@ def test_download_to_filename(self): import time import datetime from tempfile import NamedTemporaryFile - KEY = 'key' + BLOB_NAME = 'blob-name' chunk1_response = {'status': httplib.PARTIAL_CONTENT, 'content-range': 'bytes 0-2/6'} chunk2_response = {'status': httplib.OK, @@ -232,17 +235,17 @@ def test_download_to_filename(self): MEDIA_LINK = 'http://example.com/media/' properties = {'mediaLink': MEDIA_LINK, 'updated': '2014-12-06T13:13:50.690Z'} - key = self._makeOne(bucket, KEY, properties) - key.CHUNK_SIZE = 3 + blob = self._makeOne(bucket, BLOB_NAME, properties) + blob.CHUNK_SIZE = 3 with NamedTemporaryFile() as f: - key.download_to_filename(f.name) + blob.download_to_filename(f.name) f.flush() with open(f.name) as g: wrote = g.read() mtime = os.path.getmtime(f.name) updatedTime = time.mktime( datetime.datetime.strptime( - key.properties['updated'], + blob.properties['updated'], '%Y-%m-%dT%H:%M:%S.%fz').timetuple() ) self.assertEqual(wrote, 'abcdef') @@ -250,7 +253,7 @@ def test_download_to_filename(self): def test_download_as_string(self): import httplib - KEY = 'key' + BLOB_NAME = 'blob-name' chunk1_response = {'status': httplib.PARTIAL_CONTENT, 'content-range': 'bytes 0-2/6'} chunk2_response = {'status': httplib.OK, @@ -262,9 +265,9 @@ def test_download_as_string(self): bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' properties = {'mediaLink': MEDIA_LINK} - key = self._makeOne(bucket, KEY, properties) - key.CHUNK_SIZE = 3 - fetched = key.download_as_string() + blob = self._makeOne(bucket, BLOB_NAME, properties) + blob.CHUNK_SIZE = 3 + fetched = blob.download_as_string() self.assertEqual(fetched, 'abcdef') def test_upload_from_file_simple(self): @@ -272,19 +275,19 @@ def test_upload_from_file_simple(self): from tempfile import NamedTemporaryFile from urlparse import parse_qsl from urlparse import urlsplit - KEY = 'key' + BLOB_NAME = 'blob-name' DATA = 'ABCDEF' response = {'status': httplib.OK} connection = _Connection( (response, ''), ) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.CHUNK_SIZE = 5 + blob = self._makeOne(bucket, BLOB_NAME) + blob.CHUNK_SIZE = 5 with NamedTemporaryFile() as fh: fh.write(DATA) fh.flush() - key.upload_from_file(fh, rewind=True) + blob.upload_from_file(fh, rewind=True) rq = connection.http._requested self.assertEqual(len(rq), 1) self.assertEqual(rq[0]['method'], 'POST') @@ -294,7 +297,7 @@ def test_upload_from_file_simple(self): self.assertEqual(netloc, 'example.com') self.assertEqual(path, '/b/name/o') self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': 'key'}) + {'uploadType': 'media', 'name': BLOB_NAME}) headers = dict( [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) self.assertEqual(headers['Content-Length'], '6') @@ -308,7 +311,7 @@ def test_upload_from_file_resumable(self): from gcloud._testing import _Monkey from _gcloud_vendor.apitools.base.py import http_wrapper from _gcloud_vendor.apitools.base.py import transfer - KEY = 'key' + BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = 'ABCDEF' loc_response = {'status': httplib.OK, 'location': UPLOAD_URL} @@ -321,14 +324,14 @@ def test_upload_from_file_resumable(self): (chunk2_response, ''), ) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.CHUNK_SIZE = 5 + blob = self._makeOne(bucket, BLOB_NAME) + blob.CHUNK_SIZE = 5 # Set the threshhold low enough that we force a resumable uploada. with _Monkey(transfer, _RESUMABLE_UPLOAD_THRESHOLD=5): with NamedTemporaryFile() as fh: fh.write(DATA) fh.flush() - key.upload_from_file(fh, rewind=True) + blob.upload_from_file(fh, rewind=True) rq = connection.http._requested self.assertEqual(len(rq), 3) self.assertEqual(rq[0]['method'], 'POST') @@ -338,7 +341,7 @@ def test_upload_from_file_resumable(self): self.assertEqual(netloc, 'example.com') self.assertEqual(path, '/b/name/o') self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'resumable', 'name': 'key'}) + {'uploadType': 'resumable', 'name': BLOB_NAME}) headers = dict( [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) self.assertEqual(headers['X-Upload-Content-Length'], '6') @@ -365,7 +368,7 @@ def test_upload_from_file_w_slash_in_name(self): from urlparse import parse_qsl from urlparse import urlsplit from _gcloud_vendor.apitools.base.py import http_wrapper - KEY = 'parent/child' + BLOB_NAME = 'parent/child' UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild' DATA = 'ABCDEF' loc_response = {'status': httplib.OK, 'location': UPLOAD_URL} @@ -378,12 +381,12 @@ def test_upload_from_file_w_slash_in_name(self): (chunk2_response, ''), ) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.CHUNK_SIZE = 5 + blob = self._makeOne(bucket, BLOB_NAME) + blob.CHUNK_SIZE = 5 with NamedTemporaryFile() as fh: fh.write(DATA) fh.flush() - key.upload_from_file(fh, rewind=True) + blob.upload_from_file(fh, rewind=True) rq = connection.http._requested self.assertEqual(len(rq), 1) self.assertEqual(rq[0]['method'], 'POST') @@ -405,7 +408,7 @@ def test_upload_from_filename(self): from urlparse import parse_qsl from urlparse import urlsplit from _gcloud_vendor.apitools.base.py import http_wrapper - KEY = 'key' + BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = 'ABCDEF' loc_response = {'status': httplib.OK, 'location': UPLOAD_URL} @@ -418,12 +421,12 @@ def test_upload_from_filename(self): (chunk2_response, ''), ) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.CHUNK_SIZE = 5 + blob = self._makeOne(bucket, BLOB_NAME) + blob.CHUNK_SIZE = 5 with NamedTemporaryFile(suffix='.jpeg') as fh: fh.write(DATA) fh.flush() - key.upload_from_filename(fh.name) + blob.upload_from_filename(fh.name) rq = connection.http._requested self.assertEqual(len(rq), 1) self.assertEqual(rq[0]['method'], 'POST') @@ -433,7 +436,7 @@ def test_upload_from_filename(self): self.assertEqual(netloc, 'example.com') self.assertEqual(path, '/b/name/o') self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': 'key'}) + {'uploadType': 'media', 'name': BLOB_NAME}) headers = dict( [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) self.assertEqual(headers['Content-Length'], '6') @@ -444,7 +447,7 @@ def test_upload_from_string(self): from urlparse import parse_qsl from urlparse import urlsplit from _gcloud_vendor.apitools.base.py import http_wrapper - KEY = 'key' + BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = 'ABCDEF' loc_response = {'status': httplib.OK, 'location': UPLOAD_URL} @@ -457,9 +460,9 @@ def test_upload_from_string(self): (chunk2_response, ''), ) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.CHUNK_SIZE = 5 - key.upload_from_string(DATA) + blob = self._makeOne(bucket, BLOB_NAME) + blob.CHUNK_SIZE = 5 + blob.upload_from_string(DATA) rq = connection.http._requested self.assertEqual(len(rq), 1) self.assertEqual(rq[0]['method'], 'POST') @@ -469,7 +472,7 @@ def test_upload_from_string(self): self.assertEqual(netloc, 'example.com') self.assertEqual(path, '/b/name/o') self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': 'key'}) + {'uploadType': 'media', 'name': BLOB_NAME}) headers = dict( [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) self.assertEqual(headers['Content-Length'], '6') @@ -477,338 +480,338 @@ def test_upload_from_string(self): def test_make_public(self): from gcloud.storage.acl import _ACLEntity - KEY = 'key' + BLOB_NAME = 'blob-name' permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] after = {'acl': permissive} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.acl.loaded = True - key.make_public() - self.assertEqual(list(key.acl), permissive) + blob = self._makeOne(bucket, BLOB_NAME) + blob.acl.loaded = True + blob.make_public() + self.assertEqual(list(blob.acl), permissive) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'acl': permissive}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_cache_control_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CACHE_CONTROL = 'no-cache' properties = {'cacheControl': CACHE_CONTROL} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.cache_control, CACHE_CONTROL) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.cache_control, CACHE_CONTROL) def test_cache_control_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CACHE_CONTROL = 'no-cache' after = {'cacheControl': CACHE_CONTROL} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.cache_control = CACHE_CONTROL - self.assertEqual(key.cache_control, CACHE_CONTROL) + blob = self._makeOne(bucket, BLOB_NAME) + blob.cache_control = CACHE_CONTROL + self.assertEqual(blob.cache_control, CACHE_CONTROL) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'cacheControl': CACHE_CONTROL}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_component_count(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) COMPONENT_COUNT = 42 properties = {'componentCount': COMPONENT_COUNT} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.component_count, COMPONENT_COUNT) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.component_count, COMPONENT_COUNT) def test_content_disposition_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CONTENT_DISPOSITION = 'Attachment; filename=example.jpg' properties = {'contentDisposition': CONTENT_DISPOSITION} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.content_disposition, CONTENT_DISPOSITION) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) def test_content_disposition_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CONTENT_DISPOSITION = 'Attachment; filename=example.jpg' after = {'contentDisposition': CONTENT_DISPOSITION} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.content_disposition = CONTENT_DISPOSITION - self.assertEqual(key.content_disposition, CONTENT_DISPOSITION) + blob = self._makeOne(bucket, BLOB_NAME) + blob.content_disposition = CONTENT_DISPOSITION + self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'contentDisposition': CONTENT_DISPOSITION}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_content_encoding_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CONTENT_ENCODING = 'gzip' properties = {'contentEncoding': CONTENT_ENCODING} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.content_encoding, CONTENT_ENCODING) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.content_encoding, CONTENT_ENCODING) def test_content_encoding_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CONTENT_ENCODING = 'gzip' after = {'contentEncoding': CONTENT_ENCODING} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.content_encoding = CONTENT_ENCODING - self.assertEqual(key.content_encoding, CONTENT_ENCODING) + blob = self._makeOne(bucket, BLOB_NAME) + blob.content_encoding = CONTENT_ENCODING + self.assertEqual(blob.content_encoding, CONTENT_ENCODING) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'contentEncoding': CONTENT_ENCODING}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_content_language_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CONTENT_LANGUAGE = 'pt-BR' properties = {'contentLanguage': CONTENT_LANGUAGE} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.content_language, CONTENT_LANGUAGE) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.content_language, CONTENT_LANGUAGE) def test_content_language_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CONTENT_LANGUAGE = 'pt-BR' after = {'contentLanguage': CONTENT_LANGUAGE} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.content_language = CONTENT_LANGUAGE - self.assertEqual(key.content_language, CONTENT_LANGUAGE) + blob = self._makeOne(bucket, BLOB_NAME) + blob.content_language = CONTENT_LANGUAGE + self.assertEqual(blob.content_language, CONTENT_LANGUAGE) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'contentLanguage': CONTENT_LANGUAGE}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_content_type_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CONTENT_TYPE = 'image/jpeg' properties = {'contentType': CONTENT_TYPE} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.content_type, CONTENT_TYPE) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.content_type, CONTENT_TYPE) def test_content_type_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CONTENT_TYPE = 'image/jpeg' after = {'contentType': CONTENT_TYPE} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.content_type = CONTENT_TYPE - self.assertEqual(key.content_type, CONTENT_TYPE) + blob = self._makeOne(bucket, BLOB_NAME) + blob.content_type = CONTENT_TYPE + self.assertEqual(blob.content_type, CONTENT_TYPE) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'contentType': CONTENT_TYPE}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_crc32c_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) CRC32C = 'DEADBEEF' properties = {'crc32c': CRC32C} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.crc32c, CRC32C) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.crc32c, CRC32C) def test_crc32c_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' CRC32C = 'DEADBEEF' after = {'crc32c': CRC32C} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.crc32c = CRC32C - self.assertEqual(key.crc32c, CRC32C) + blob = self._makeOne(bucket, BLOB_NAME) + blob.crc32c = CRC32C + self.assertEqual(blob.crc32c, CRC32C) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'crc32c': CRC32C}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_etag(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) ETAG = 'ETAG' properties = {'etag': ETAG} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.etag, ETAG) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.etag, ETAG) def test_generation(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) GENERATION = 42 properties = {'generation': GENERATION} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.generation, GENERATION) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.generation, GENERATION) def test_id(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) ID = 'ID' properties = {'id': ID} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.id, ID) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.id, ID) def test_md5_hash_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) MD5_HASH = 'DEADBEEF' properties = {'md5Hash': MD5_HASH} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.md5_hash, MD5_HASH) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.md5_hash, MD5_HASH) def test_md5_hash_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' MD5_HASH = 'DEADBEEF' after = {'md5Hash': MD5_HASH} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.md5_hash = MD5_HASH - self.assertEqual(key.md5_hash, MD5_HASH) + blob = self._makeOne(bucket, BLOB_NAME) + blob.md5_hash = MD5_HASH + self.assertEqual(blob.md5_hash, MD5_HASH) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'md5Hash': MD5_HASH}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_media_link(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' properties = {'mediaLink': MEDIA_LINK} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.media_link, MEDIA_LINK) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.media_link, MEDIA_LINK) def test_metadata_getter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) METADATA = {'foo': 'Foo'} properties = {'metadata': METADATA} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.metadata, METADATA) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.metadata, METADATA) def test_metadata_setter(self): - KEY = 'key' + BLOB_NAME = 'blob-name' METADATA = {'foo': 'Foo'} after = {'metadata': METADATA} connection = _Connection(after) bucket = _Bucket(connection) - key = self._makeOne(bucket, KEY) - key.metadata = METADATA - self.assertEqual(key.metadata, METADATA) + blob = self._makeOne(bucket, BLOB_NAME) + blob.metadata = METADATA + self.assertEqual(blob.metadata, METADATA) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY) + self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) self.assertEqual(kw[0]['data'], {'metadata': METADATA}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_metageneration(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) METAGENERATION = 42 properties = {'metageneration': METAGENERATION} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.metageneration, METAGENERATION) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.metageneration, METAGENERATION) def test_owner(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'} properties = {'owner': OWNER} - key = self._makeOne(bucket, KEY, properties) - owner = key.owner + blob = self._makeOne(bucket, BLOB_NAME, properties) + owner = blob.owner self.assertEqual(owner['entity'], 'project-owner-12345') self.assertEqual(owner['entityId'], '23456') def test_self_link(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) SELF_LINK = 'http://example.com/self/' properties = {'selfLink': SELF_LINK} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.self_link, SELF_LINK) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.self_link, SELF_LINK) def test_size(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) SIZE = 42 properties = {'size': SIZE} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.size, SIZE) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.size, SIZE) def test_storage_class(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) STORAGE_CLASS = 'http://example.com/self/' properties = {'storageClass': STORAGE_CLASS} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.storage_class, STORAGE_CLASS) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.storage_class, STORAGE_CLASS) def test_time_deleted(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) TIME_DELETED = '2014-11-05T20:34:37Z' properties = {'timeDeleted': TIME_DELETED} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.time_deleted, TIME_DELETED) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.time_deleted, TIME_DELETED) def test_updated(self): - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection() bucket = _Bucket(connection) UPDATED = '2014-11-05T20:34:37Z' properties = {'updated': UPDATED} - key = self._makeOne(bucket, KEY, properties) - self.assertEqual(key.updated, UPDATED) + blob = self._makeOne(bucket, BLOB_NAME, properties) + self.assertEqual(blob.updated, UPDATED) class _Responder(object): @@ -850,7 +853,7 @@ def build_api_url(self, path, query_params=None, def generate_signed_url(self, resource, expiration, **kw): self._signed.append((resource, expiration, kw)) - return ('http://example.com/abucket/akey?Signature=DEADBEEF' + return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' '&Expiration=%s' % expiration) @@ -867,16 +870,16 @@ class _Bucket(object): def __init__(self, connection): self.connection = connection - self._keys = {} + self._blobs = {} self._deleted = [] - def get_key(self, key): - return self._keys.get(key) + def get_blob(self, blob): + return self._blobs.get(blob) - def copy_key(self, key, destination_bucket, new_name): - destination_bucket._keys[new_name] = self._keys[key.name] - return key.from_dict({'name': new_name}, bucket=destination_bucket) + def copy_blob(self, blob, destination_bucket, new_name): + destination_bucket._blobs[new_name] = self._blobs[blob.name] + return blob.from_dict({'name': new_name}, bucket=destination_bucket) - def delete_key(self, key): - del self._keys[key.name] - self._deleted.append(key.name) + def delete_blob(self, blob): + del self._blobs[blob.name] + self._deleted.append(blob.name) diff --git a/gcloud/storage/test_bucket.py b/gcloud/storage/test_bucket.py index 03ccc6018724..9fd90d4c3ef5 100644 --- a/gcloud/storage/test_bucket.py +++ b/gcloud/storage/test_bucket.py @@ -17,11 +17,11 @@ import unittest2 -class Test__KeyIterator(unittest2.TestCase): +class Test__BlobIterator(unittest2.TestCase): def _getTargetClass(self): - from gcloud.storage.bucket import _KeyIterator - return _KeyIterator + from gcloud.storage.bucket import _BlobIterator + return _BlobIterator def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) @@ -45,18 +45,18 @@ def test_get_items_from_response_empty(self): self.assertEqual(iterator.prefixes, ()) def test_get_items_from_response_non_empty(self): - from gcloud.storage.key import Key - KEY = 'key' - response = {'items': [{'name': KEY}], 'prefixes': ['foo']} + from gcloud.storage.blob import Blob + BLOB_NAME = 'blob-name' + response = {'items': [{'name': BLOB_NAME}], 'prefixes': ['foo']} connection = _Connection() bucket = _Bucket(connection) iterator = self._makeOne(bucket) - keys = list(iterator.get_items_from_response(response)) - self.assertEqual(len(keys), 1) - key = keys[0] - self.assertTrue(isinstance(key, Key)) - self.assertTrue(key.connection is connection) - self.assertEqual(key.name, KEY) + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual(len(blobs), 1) + blob = blobs[0] + self.assertTrue(isinstance(blob, Blob)) + self.assertTrue(blob.connection is connection) + self.assertEqual(blob.name, BLOB_NAME) self.assertEqual(iterator.prefixes, ('foo',)) @@ -115,8 +115,8 @@ def test___iter___empty(self): NAME = 'name' connection = _Connection({'items': []}) bucket = self._makeOne(connection, NAME) - keys = list(bucket) - self.assertEqual(keys, []) + blobs = list(bucket) + self.assertEqual(blobs, []) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) @@ -124,13 +124,13 @@ def test___iter___empty(self): def test___iter___non_empty(self): NAME = 'name' - KEY = 'key' - connection = _Connection({'items': [{'name': KEY}]}) + BLOB_NAME = 'blob-name' + connection = _Connection({'items': [{'name': BLOB_NAME}]}) bucket = self._makeOne(connection, NAME) - keys = list(bucket) - key, = keys - self.assertTrue(key.bucket is bucket) - self.assertEqual(key.name, KEY) + blobs = list(bucket) + blob, = blobs + self.assertTrue(blob.bucket is bucket) + self.assertEqual(blob.name, BLOB_NAME) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) @@ -148,13 +148,13 @@ def test___contains___miss(self): def test___contains___hit(self): NAME = 'name' - KEY = 'key' - connection = _Connection({'name': KEY}) + BLOB_NAME = 'blob-name' + connection = _Connection({'name': BLOB_NAME}) bucket = self._makeOne(connection, NAME) - self.assertTrue(KEY in bucket) + self.assertTrue(BLOB_NAME in bucket) kw, = connection._requested self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) def test_acl_property(self): from gcloud.storage.acl import BucketACL @@ -180,48 +180,48 @@ def test_path_w_name(self): bucket = self._makeOne(connection, NAME) self.assertEqual(bucket.path, '/b/%s' % NAME) - def test_get_key_miss(self): + def test_get_blob_miss(self): NAME = 'name' NONESUCH = 'nonesuch' connection = _Connection() bucket = self._makeOne(connection, NAME) - self.assertTrue(bucket.get_key(NONESUCH) is None) + self.assertTrue(bucket.get_blob(NONESUCH) is None) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - def test_get_key_hit(self): + def test_get_blob_hit(self): NAME = 'name' - KEY = 'key' - connection = _Connection({'name': KEY}) + BLOB_NAME = 'blob-name' + connection = _Connection({'name': BLOB_NAME}) bucket = self._makeOne(connection, NAME) - key = bucket.get_key(KEY) - self.assertTrue(key.bucket is bucket) - self.assertEqual(key.name, KEY) + blob = bucket.get_blob(BLOB_NAME) + self.assertTrue(blob.bucket is bucket) + self.assertEqual(blob.name, BLOB_NAME) kw, = connection._requested self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - def test_get_all_keys_empty(self): + def test_get_all_blobs_empty(self): NAME = 'name' connection = _Connection({'items': []}) bucket = self._makeOne(connection, NAME) - keys = bucket.get_all_keys() - self.assertEqual(keys, []) + blobs = bucket.get_all_blobs() + self.assertEqual(blobs, []) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) self.assertEqual(kw['query_params'], {}) - def test_get_all_keys_non_empty(self): + def test_get_all_blobs_non_empty(self): NAME = 'name' - KEY = 'key' - connection = _Connection({'items': [{'name': KEY}]}) + BLOB_NAME = 'blob-name' + connection = _Connection({'items': [{'name': BLOB_NAME}]}) bucket = self._makeOne(connection, NAME) - keys = bucket.get_all_keys() - key, = keys - self.assertTrue(key.bucket is bucket) - self.assertEqual(key.name, KEY) + blobs = bucket.get_all_blobs() + blob, = blobs + self.assertTrue(blob.bucket is bucket) + self.assertEqual(blob.name, BLOB_NAME) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) @@ -232,8 +232,8 @@ def test_iterator_defaults(self): connection = _Connection({'items': []}) bucket = self._makeOne(connection, NAME) iterator = bucket.iterator() - keys = list(iterator) - self.assertEqual(keys, []) + blobs = list(iterator) + self.assertEqual(blobs, []) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) @@ -255,31 +255,31 @@ def test_iterator_explicit(self): max_results=10, versions=True, ) - keys = list(iterator) - self.assertEqual(keys, []) + blobs = list(iterator) + self.assertEqual(blobs, []) kw, = connection._requested self.assertEqual(kw['method'], 'GET') self.assertEqual(kw['path'], '/b/%s/o' % NAME) self.assertEqual(kw['query_params'], EXPECTED) - def test_new_key_existing(self): - from gcloud.storage.key import Key - existing = Key() + def test_new_blob_existing(self): + from gcloud.storage.blob import Blob + existing = Blob() bucket = self._makeOne() - self.assertTrue(bucket.new_key(existing) is existing) + self.assertTrue(bucket.new_blob(existing) is existing) - def test_new_key_str(self): - from gcloud.storage.key import Key - KEY = 'key' + def test_new_blob_str(self): + from gcloud.storage.blob import Blob + BLOB_NAME = 'blob-name' bucket = self._makeOne() - key = bucket.new_key(KEY) - self.assertTrue(isinstance(key, Key)) - self.assertTrue(key.bucket is bucket) - self.assertEqual(key.name, KEY) + blob = bucket.new_blob(BLOB_NAME) + self.assertTrue(isinstance(blob, Blob)) + self.assertTrue(blob.bucket is bucket) + self.assertEqual(blob.name, BLOB_NAME) - def test_new_key_invalid(self): + def test_new_blob_invalid(self): bucket = self._makeOne() - self.assertRaises(TypeError, bucket.new_key, object()) + self.assertRaises(TypeError, bucket.new_blob, object()) def test_delete_default_miss(self): from gcloud.storage.exceptions import NotFound @@ -297,130 +297,131 @@ def test_delete_explicit_hit(self): self.assertTrue(bucket.delete(True)) self.assertEqual(connection._deleted, [(NAME, True)]) - def test_delete_key_miss(self): + def test_delete_blob_miss(self): from gcloud.storage.exceptions import NotFound NAME = 'name' NONESUCH = 'nonesuch' connection = _Connection() bucket = self._makeOne(connection, NAME) - self.assertRaises(NotFound, bucket.delete_key, NONESUCH) + self.assertRaises(NotFound, bucket.delete_blob, NONESUCH) kw, = connection._requested self.assertEqual(kw['method'], 'DELETE') self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - def test_delete_key_hit(self): + def test_delete_blob_hit(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection({}) bucket = self._makeOne(connection, NAME) - key = bucket.delete_key(KEY) - self.assertTrue(key.bucket is bucket) - self.assertEqual(key.name, KEY) + blob = bucket.delete_blob(BLOB_NAME) + self.assertTrue(blob.bucket is bucket) + self.assertEqual(blob.name, BLOB_NAME) kw, = connection._requested self.assertEqual(kw['method'], 'DELETE') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - def test_delete_keys_empty(self): + def test_delete_blobs_empty(self): NAME = 'name' connection = _Connection() bucket = self._makeOne(connection, NAME) - bucket.delete_keys([]) + bucket.delete_blobs([]) self.assertEqual(connection._requested, []) - def test_delete_keys_hit(self): + def test_delete_blobs_hit(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' connection = _Connection({}) bucket = self._makeOne(connection, NAME) - bucket.delete_keys([KEY]) + bucket.delete_blobs([BLOB_NAME]) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - def test_delete_keys_miss_no_on_error(self): + def test_delete_blobs_miss_no_on_error(self): from gcloud.storage.exceptions import NotFound NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' NONESUCH = 'nonesuch' connection = _Connection({}) bucket = self._makeOne(connection, NAME) - self.assertRaises(NotFound, bucket.delete_keys, [KEY, NONESUCH]) + self.assertRaises(NotFound, bucket.delete_blobs, [BLOB_NAME, NONESUCH]) kw = connection._requested self.assertEqual(len(kw), 2) self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) self.assertEqual(kw[1]['method'], 'DELETE') self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - def test_delete_keys_miss_w_on_error(self): + def test_delete_blobs_miss_w_on_error(self): NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' NONESUCH = 'nonesuch' connection = _Connection({}) bucket = self._makeOne(connection, NAME) errors = [] - bucket.delete_keys([KEY, NONESUCH], errors.append) + bucket.delete_blobs([BLOB_NAME, NONESUCH], errors.append) self.assertEqual(errors, [NONESUCH]) kw = connection._requested self.assertEqual(len(kw), 2) self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, KEY)) + self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) self.assertEqual(kw[1]['method'], 'DELETE') self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - def test_copy_keys_wo_name(self): + def test_copy_blobs_wo_name(self): SOURCE = 'source' DEST = 'dest' - KEY = 'key' + BLOB_NAME = 'blob-name' - class _Key(object): - name = KEY - path = '/b/%s/o/%s' % (SOURCE, KEY) + class _Blob(object): + name = BLOB_NAME + path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME) connection = _Connection({}) source = self._makeOne(connection, SOURCE) dest = self._makeOne(connection, DEST) - key = _Key() - new_key = source.copy_key(key, dest) - self.assertTrue(new_key.bucket is dest) - self.assertEqual(new_key.name, KEY) + blob = _Blob() + new_blob = source.copy_blob(blob, dest) + self.assertTrue(new_blob.bucket is dest) + self.assertEqual(new_blob.name, BLOB_NAME) kw, = connection._requested - COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, KEY, DEST, KEY) + COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME, + DEST, BLOB_NAME) self.assertEqual(kw['method'], 'POST') self.assertEqual(kw['path'], COPY_PATH) - def test_copy_keys_w_name(self): + def test_copy_blobs_w_name(self): SOURCE = 'source' DEST = 'dest' - KEY = 'key' + BLOB_NAME = 'blob-name' NEW_NAME = 'new_name' - class _Key(object): - name = KEY - path = '/b/%s/o/%s' % (SOURCE, KEY) + class _Blob(object): + name = BLOB_NAME + path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME) connection = _Connection({}) source = self._makeOne(connection, SOURCE) dest = self._makeOne(connection, DEST) - key = _Key() - new_key = source.copy_key(key, dest, NEW_NAME) - self.assertTrue(new_key.bucket is dest) - self.assertEqual(new_key.name, NEW_NAME) + blob = _Blob() + new_blob = source.copy_blob(blob, dest, NEW_NAME) + self.assertTrue(new_blob.bucket is dest) + self.assertEqual(new_blob.name, NEW_NAME) kw, = connection._requested - COPY_PATH = ( - '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, KEY, DEST, NEW_NAME)) + COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME, + DEST, NEW_NAME) self.assertEqual(kw['method'], 'POST') self.assertEqual(kw['path'], COPY_PATH) - def test_upload_file_default_key(self): + def test_upload_file_default_blob(self): from gcloud._testing import _Monkey from gcloud.storage import bucket as MUT BASENAME = 'file.ext' FILENAME = '/path/to/%s' % BASENAME _uploaded = [] - class _Key(object): + class _Blob(object): def __init__(self, bucket, name): self._bucket = bucket @@ -430,18 +431,18 @@ def upload_from_filename(self, filename): _uploaded.append((self._bucket, self._name, filename)) bucket = self._makeOne() - with _Monkey(MUT, Key=_Key): + with _Monkey(MUT, Blob=_Blob): bucket.upload_file(FILENAME) self.assertEqual(_uploaded, [(bucket, BASENAME, FILENAME)]) - def test_upload_file_explicit_key(self): + def test_upload_file_explicit_blob(self): from gcloud._testing import _Monkey from gcloud.storage import bucket as MUT FILENAME = '/path/to/file' - KEY = 'key' + BLOB_NAME = 'blob-name' _uploaded = [] - class _Key(object): + class _Blob(object): def __init__(self, bucket, name): self._bucket = bucket @@ -451,18 +452,18 @@ def upload_from_filename(self, filename): _uploaded.append((self._bucket, self._name, filename)) bucket = self._makeOne() - with _Monkey(MUT, Key=_Key): - bucket.upload_file(FILENAME, KEY) - self.assertEqual(_uploaded, [(bucket, KEY, FILENAME)]) + with _Monkey(MUT, Blob=_Blob): + bucket.upload_file(FILENAME, BLOB_NAME) + self.assertEqual(_uploaded, [(bucket, BLOB_NAME, FILENAME)]) - def test_upload_file_object_no_key(self): + def test_upload_file_object_no_blob(self): from gcloud._testing import _Monkey from gcloud.storage import bucket as MUT FILENAME = 'file.txt' FILEOBJECT = MockFile(FILENAME) _uploaded = [] - class _Key(object): + class _Blob(object): def __init__(self, bucket, name): self._bucket = bucket @@ -472,19 +473,19 @@ def upload_from_file(self, fh): _uploaded.append((self._bucket, self._name, fh)) bucket = self._makeOne() - with _Monkey(MUT, Key=_Key): + with _Monkey(MUT, Blob=_Blob): bucket.upload_file_object(FILEOBJECT) self.assertEqual(_uploaded, [(bucket, FILENAME, FILEOBJECT)]) - def test_upload_file_object_explicit_key(self): + def test_upload_file_object_explicit_blob(self): from gcloud._testing import _Monkey from gcloud.storage import bucket as MUT FILENAME = 'file.txt' FILEOBJECT = MockFile(FILENAME) - KEY = 'key' + BLOB_NAME = 'blob-name' _uploaded = [] - class _Key(object): + class _Blob(object): def __init__(self, bucket, name): self._bucket = bucket @@ -494,9 +495,9 @@ def upload_from_file(self, fh): _uploaded.append((self._bucket, self._name, fh)) bucket = self._makeOne() - with _Monkey(MUT, Key=_Key): - bucket.upload_file_object(FILEOBJECT, KEY) - self.assertEqual(_uploaded, [(bucket, KEY, FILEOBJECT)]) + with _Monkey(MUT, Blob=_Blob): + bucket.upload_file_object(FILEOBJECT, BLOB_NAME) + self.assertEqual(_uploaded, [(bucket, BLOB_NAME, FILEOBJECT)]) def test_get_cors_eager(self): NAME = 'name' @@ -925,10 +926,10 @@ def test_make_public_w_future(self): def test_make_public_recursive(self): from gcloud.storage.acl import _ACLEntity - from gcloud.storage.bucket import _KeyIterator + from gcloud.storage.bucket import _BlobIterator _saved = [] - class _Key(object): + class _Blob(object): _granted = False def __init__(self, bucket, name): @@ -948,16 +949,16 @@ def grant_read(self): def save_acl(self): _saved.append((self._bucket, self._name, self._granted)) - class _Iterator(_KeyIterator): + class _Iterator(_BlobIterator): def get_items_from_response(self, response): for item in response.get('items', []): - yield _Key(self.bucket, item['name']) + yield _Blob(self.bucket, item['name']) NAME = 'name' - KEY = 'key' + BLOB_NAME = 'blob-name' permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] after = {'acl': permissive, 'defaultObjectAcl': []} - connection = _Connection(after, {'items': [{'name': KEY}]}) + connection = _Connection(after, {'items': [{'name': BLOB_NAME}]}) bucket = self._makeOne(connection, NAME) bucket.acl.loaded = True bucket.default_object_acl.loaded = True @@ -965,7 +966,7 @@ def get_items_from_response(self, response): bucket.make_public(recursive=True) self.assertEqual(list(bucket.acl), permissive) self.assertEqual(list(bucket.default_object_acl), []) - self.assertEqual(_saved, [(bucket, KEY, True)]) + self.assertEqual(_saved, [(bucket, BLOB_NAME, True)]) kw = connection._requested self.assertEqual(len(kw), 2) self.assertEqual(kw[0]['method'], 'PATCH') diff --git a/gcloud/storage/test_connection.py b/gcloud/storage/test_connection.py index 1305324b7214..2edd313aeb86 100644 --- a/gcloud/storage/test_connection.py +++ b/gcloud/storage/test_connection.py @@ -76,14 +76,14 @@ def test___iter___empty(self): {'status': '200', 'content-type': 'application/json'}, '{}', ) - keys = list(conn) - self.assertEqual(len(keys), 0) + blobs = list(conn) + self.assertEqual(len(blobs), 0) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) def test___iter___non_empty(self): PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, @@ -93,11 +93,11 @@ def test___iter___non_empty(self): ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"items": [{"name": "%s"}]}' % KEY, + '{"items": [{"name": "%s"}]}' % BLOB_NAME, ) - keys = list(conn) - self.assertEqual(len(keys), 1) - self.assertEqual(keys[0].name, KEY) + blobs = list(conn) + self.assertEqual(len(blobs), 1) + self.assertEqual(blobs[0].name, BLOB_NAME) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) @@ -122,20 +122,20 @@ def test___contains___miss(self): def test___contains___hit(self): PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, 'storage', conn.API_VERSION, 'b', - 'key?project=%s' % PROJECT, + '%s?project=%s' % (BLOB_NAME, PROJECT), ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"name": "%s"}' % KEY, + '{"name": "%s"}' % BLOB_NAME, ) - self.assertTrue(KEY in conn) + self.assertTrue(BLOB_NAME in conn) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) @@ -370,14 +370,14 @@ def test_get_all_buckets_empty(self): {'status': '200', 'content-type': 'application/json'}, '{}', ) - keys = conn.get_all_buckets() - self.assertEqual(len(keys), 0) + blobs = conn.get_all_buckets() + self.assertEqual(len(blobs), 0) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) def test_get_all_buckets_non_empty(self): PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, @@ -387,11 +387,11 @@ def test_get_all_buckets_non_empty(self): ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"items": [{"name": "%s"}]}' % KEY, + '{"items": [{"name": "%s"}]}' % BLOB_NAME, ) - keys = conn.get_all_buckets() - self.assertEqual(len(keys), 1) - self.assertEqual(keys[0].name, KEY) + blobs = conn.get_all_buckets() + self.assertEqual(len(blobs), 1) + self.assertEqual(blobs[0].name, BLOB_NAME) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) @@ -418,23 +418,23 @@ def test_get_bucket_miss(self): def test_get_bucket_hit(self): from gcloud.storage.bucket import Bucket PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, 'storage', conn.API_VERSION, 'b', - 'key?project=%s' % PROJECT, + '%s?project=%s' % (BLOB_NAME, PROJECT), ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"name": "%s"}' % KEY, + '{"name": "%s"}' % BLOB_NAME, ) - bucket = conn.get_bucket(KEY) + bucket = conn.get_bucket(BLOB_NAME) self.assertTrue(isinstance(bucket, Bucket)) self.assertTrue(bucket.connection is conn) - self.assertEqual(bucket.name, KEY) + self.assertEqual(bucket.name, BLOB_NAME) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) @@ -460,30 +460,30 @@ def test_lookup_miss(self): def test_lookup_hit(self): from gcloud.storage.bucket import Bucket PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, 'storage', conn.API_VERSION, 'b', - 'key?project=%s' % PROJECT, + '%s?project=%s' % (BLOB_NAME, PROJECT), ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"name": "%s"}' % KEY, + '{"name": "%s"}' % BLOB_NAME, ) - bucket = conn.lookup(KEY) + bucket = conn.lookup(BLOB_NAME) self.assertTrue(isinstance(bucket, Bucket)) self.assertTrue(bucket.connection is conn) - self.assertEqual(bucket.name, KEY) + self.assertEqual(bucket.name, BLOB_NAME) self.assertEqual(http._called_with['method'], 'GET') self.assertEqual(http._called_with['uri'], URI) def test_create_bucket_ok(self): from gcloud.storage.bucket import Bucket PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, @@ -493,17 +493,17 @@ def test_create_bucket_ok(self): ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, - '{"name": "%s"}' % KEY, + '{"name": "%s"}' % BLOB_NAME, ) - bucket = conn.create_bucket(KEY) + bucket = conn.create_bucket(BLOB_NAME) self.assertTrue(isinstance(bucket, Bucket)) self.assertTrue(bucket.connection is conn) - self.assertEqual(bucket.name, KEY) + self.assertEqual(bucket.name, BLOB_NAME) self.assertEqual(http._called_with['method'], 'POST') self.assertEqual(http._called_with['uri'], URI) def test_delete_bucket_defaults_miss(self): - _deleted_keys = [] + _deleted_blobs = [] class _Bucket(object): @@ -512,14 +512,14 @@ def __init__(self, name): self.path = '/b/' + name PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, 'storage', conn.API_VERSION, 'b', - 'key?project=%s' % PROJECT, + '%s?project=%s' % (BLOB_NAME, PROJECT), ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, @@ -530,21 +530,21 @@ def _new_bucket(name): return _Bucket(name) conn.new_bucket = _new_bucket - self.assertEqual(conn.delete_bucket(KEY), True) - self.assertEqual(_deleted_keys, []) + self.assertEqual(conn.delete_bucket(BLOB_NAME), True) + self.assertEqual(_deleted_blobs, []) self.assertEqual(http._called_with['method'], 'DELETE') self.assertEqual(http._called_with['uri'], URI) def test_delete_bucket_force_True(self): - _deleted_keys = [] + _deleted_blobs = [] - class _Key(object): + class _Blob(object): def __init__(self, name): self._name = name def delete(self): - _deleted_keys.append(self._name) + _deleted_blobs.append(self._name) class _Bucket(object): @@ -553,17 +553,17 @@ def __init__(self, name): self.path = '/b/' + name def __iter__(self): - return iter([_Key(x) for x in ('foo', 'bar')]) + return iter([_Blob(x) for x in ('foo', 'bar')]) PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) URI = '/'.join([ conn.API_BASE_URL, 'storage', conn.API_VERSION, 'b', - 'key?project=%s' % PROJECT, + '%s?project=%s' % (BLOB_NAME, PROJECT), ]) http = conn._http = Http( {'status': '200', 'content-type': 'application/json'}, @@ -574,28 +574,28 @@ def _new_bucket(name): return _Bucket(name) conn.new_bucket = _new_bucket - self.assertEqual(conn.delete_bucket(KEY, True), True) - self.assertEqual(_deleted_keys, ['foo', 'bar']) + self.assertEqual(conn.delete_bucket(BLOB_NAME, True), True) + self.assertEqual(_deleted_blobs, ['foo', 'bar']) self.assertEqual(http._called_with['method'], 'DELETE') self.assertEqual(http._called_with['uri'], URI) def test_new_bucket_w_existing(self): from gcloud.storage.bucket import Bucket PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) - existing = Bucket(self, KEY) + existing = Bucket(self, BLOB_NAME) self.assertTrue(conn.new_bucket(existing) is existing) - def test_new_bucket_w_key(self): + def test_new_bucket_w_blob(self): from gcloud.storage.bucket import Bucket PROJECT = 'project' - KEY = 'key' + BLOB_NAME = 'blob-name' conn = self._makeOne(PROJECT) - bucket = conn.new_bucket(KEY) + bucket = conn.new_bucket(BLOB_NAME) self.assertTrue(isinstance(bucket, Bucket)) self.assertTrue(bucket.connection is conn) - self.assertEqual(bucket.name, KEY) + self.assertEqual(bucket.name, BLOB_NAME) def test_new_bucket_w_invalid(self): PROJECT = 'project' @@ -610,7 +610,7 @@ def test_generate_signed_url_w_expiration_int(self): from gcloud.storage import connection as MUT ENDPOINT = 'http://api.example.com' - RESOURCE = '/name/key' + RESOURCE = '/name/path' PROJECT = 'project' SIGNED = base64.b64encode('DEADBEEF') conn = self._makeOne(PROJECT, _Credentials()) @@ -664,8 +664,8 @@ def test_get_items_from_response_empty(self): def test_get_items_from_response_non_empty(self): from gcloud.storage.bucket import Bucket - KEY = 'key' - response = {'items': [{'name': KEY}]} + BLOB_NAME = 'blob-name' + response = {'items': [{'name': BLOB_NAME}]} connection = object() iterator = self._makeOne(connection) buckets = list(iterator.get_items_from_response(response)) @@ -673,7 +673,7 @@ def test_get_items_from_response_non_empty(self): bucket = buckets[0] self.assertTrue(isinstance(bucket, Bucket)) self.assertTrue(bucket.connection is connection) - self.assertEqual(bucket.name, KEY) + self.assertEqual(bucket.name, BLOB_NAME) class Test__get_expiration_seconds(unittest2.TestCase): diff --git a/pylintrc_default b/pylintrc_default index 523249fc136e..5686333ac6ca 100644 --- a/pylintrc_default +++ b/pylintrc_default @@ -69,7 +69,7 @@ ignore = # - protected-access: helpers use '_foo' of classes from generated code. # - redefined-builtin: use of 'id', 'type', 'filter' args in API-bound funcs; # use of 'NotImplemented' to map HTTP response code. -# - similarities: 'Bucket' and 'Key' define 'metageneration' and 'owner' with +# - similarities: 'Bucket' and 'Blob' define 'metageneration' and 'owner' with # identical implementation but different docstrings. # - star-args: standard Python idioms for varargs: # ancestor = Query().filter(*order_props) diff --git a/regression/storage.py b/regression/storage.py index fe07a096353a..8da52971fd83 100644 --- a/regression/storage.py +++ b/regression/storage.py @@ -40,11 +40,11 @@ def setUpModule(): def safe_delete(bucket): - for key in bucket: + for blob in bucket: try: - key.delete() + blob.delete() except storage.exceptions.NotFound: - print('Delete failed with 404: %r' % (key,)) + print('Delete failed with 404: %r' % (blob,)) # Passing force=False does not try to delete the contained files. bucket.delete(force=False) @@ -126,58 +126,58 @@ def setUpClass(cls): cls.bucket = SHARED_BUCKETS['test_bucket'] def setUp(self): - self.case_keys_to_delete = [] + self.case_blobs_to_delete = [] def tearDown(self): - for key in self.case_keys_to_delete: - key.delete() + for blob in self.case_blobs_to_delete: + blob.delete() class TestStorageWriteFiles(TestStorageFiles): def test_large_file_write_from_stream(self): - key = self.bucket.new_key('LargeFile') - self.assertEqual(key._properties, {}) + blob = self.bucket.new_blob('LargeFile') + self.assertEqual(blob._properties, {}) file_data = self.FILES['big'] with open(file_data['path'], 'rb') as file_obj: - self.bucket.upload_file_object(file_obj, key=key) - self.case_keys_to_delete.append(key) + self.bucket.upload_file_object(file_obj, blob=blob) + self.case_blobs_to_delete.append(blob) - key._properties.clear() # force a reload - self.assertEqual(key.md5_hash, file_data['hash']) + blob._properties.clear() # force a reload + self.assertEqual(blob.md5_hash, file_data['hash']) def test_small_file_write_from_filename(self): - key = self.bucket.new_key('LargeFile') - self.assertEqual(key._properties, {}) + blob = self.bucket.new_blob('LargeFile') + self.assertEqual(blob._properties, {}) file_data = self.FILES['simple'] - key.upload_from_filename(file_data['path']) - self.case_keys_to_delete.append(key) + blob.upload_from_filename(file_data['path']) + self.case_blobs_to_delete.append(blob) - key._properties.clear() # force a reload - self.assertEqual(key.md5_hash, file_data['hash']) + blob._properties.clear() # force a reload + self.assertEqual(blob.md5_hash, file_data['hash']) def test_write_metadata(self): - key = self.bucket.upload_file(self.FILES['logo']['path']) - self.case_keys_to_delete.append(key) + blob = self.bucket.upload_file(self.FILES['logo']['path']) + self.case_blobs_to_delete.append(blob) # NOTE: This should not be necessary. We should be able to pass # it in to upload_file and also to upload_from_string. - key.content_type = 'image/png' - key._properties.clear() # force a reload - self.assertEqual(key.content_type, 'image/png') + blob.content_type = 'image/png' + blob._properties.clear() # force a reload + self.assertEqual(blob.content_type, 'image/png') def test_direct_write_and_read_into_file(self): - key = self.bucket.new_key('MyBuffer') + blob = self.bucket.new_blob('MyBuffer') file_contents = 'Hello World' - key.upload_from_string(file_contents) - self.case_keys_to_delete.append(key) + blob.upload_from_string(file_contents) + self.case_blobs_to_delete.append(blob) - same_key = self.bucket.new_key('MyBuffer') + same_blob = self.bucket.new_blob('MyBuffer') temp_filename = tempfile.mktemp() with open(temp_filename, 'w') as file_obj: - same_key.get_contents_to_file(file_obj) + same_blob.get_contents_to_file(file_obj) with open(temp_filename, 'rb') as file_obj: stored_contents = file_obj.read() @@ -185,15 +185,15 @@ def test_direct_write_and_read_into_file(self): self.assertEqual(file_contents, stored_contents) def test_copy_existing_file(self): - key = self.bucket.upload_file(self.FILES['logo']['path'], - key='CloudLogo') - self.case_keys_to_delete.append(key) + blob = self.bucket.upload_file(self.FILES['logo']['path'], + blob='CloudLogo') + self.case_blobs_to_delete.append(blob) - new_key = self.bucket.copy_key(key, self.bucket, 'CloudLogoCopy') - self.case_keys_to_delete.append(new_key) + new_blob = self.bucket.copy_blob(blob, self.bucket, 'CloudLogoCopy') + self.case_blobs_to_delete.append(new_blob) - base_contents = key.get_contents_as_string() - copied_contents = new_key.get_contents_as_string() + base_contents = blob.get_contents_as_string() + copied_contents = new_blob.get_contents_as_string() self.assertEqual(base_contents, copied_contents) @@ -205,40 +205,40 @@ class TestStorageListFiles(TestStorageFiles): def setUpClass(cls): super(TestStorageListFiles, cls).setUpClass() # Make sure bucket empty before beginning. - for key in cls.bucket: - key.delete() + for blob in cls.bucket: + blob.delete() logo_path = cls.FILES['logo']['path'] - key = cls.bucket.upload_file(logo_path, key=cls.FILENAMES[0]) - cls.suite_keys_to_delete = [key] + blob = cls.bucket.upload_file(logo_path, blob=cls.FILENAMES[0]) + cls.suite_blobs_to_delete = [blob] - # Copy main key onto remaining in FILENAMES. + # Copy main blob onto remaining in FILENAMES. for filename in cls.FILENAMES[1:]: - new_key = cls.bucket.copy_key(key, cls.bucket, filename) - cls.suite_keys_to_delete.append(new_key) + new_blob = cls.bucket.copy_blob(blob, cls.bucket, filename) + cls.suite_blobs_to_delete.append(new_blob) @classmethod def tearDownClass(cls): - for key in cls.suite_keys_to_delete: - key.delete() + for blob in cls.suite_blobs_to_delete: + blob.delete() def test_list_files(self): - all_keys = self.bucket.get_all_keys() - self.assertEqual(len(all_keys), len(self.FILENAMES)) + all_blobs = self.bucket.get_all_blobs() + self.assertEqual(len(all_blobs), len(self.FILENAMES)) def test_paginate_files(self): truncation_size = 1 count = len(self.FILENAMES) - truncation_size iterator = self.bucket.iterator(max_results=count) response = iterator.get_next_page_response() - keys = list(iterator.get_items_from_response(response)) - self.assertEqual(len(keys), count) + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual(len(blobs), count) self.assertEqual(iterator.page_number, 1) self.assertTrue(iterator.next_page_token is not None) response = iterator.get_next_page_response() - last_keys = list(iterator.get_items_from_response(response)) - self.assertEqual(len(last_keys), truncation_size) + last_blobs = list(iterator.get_items_from_response(response)) + self.assertEqual(len(last_blobs), truncation_size) class TestStoragePseudoHierarchy(TestStorageFiles): @@ -256,26 +256,26 @@ class TestStoragePseudoHierarchy(TestStorageFiles): def setUpClass(cls): super(TestStoragePseudoHierarchy, cls).setUpClass() # Make sure bucket empty before beginning. - for key in cls.bucket: - key.delete() + for blob in cls.bucket: + blob.delete() simple_path = cls.FILES['simple']['path'] - key = cls.bucket.upload_file(simple_path, key=cls.FILENAMES[0]) - cls.suite_keys_to_delete = [key] + blob = cls.bucket.upload_file(simple_path, blob=cls.FILENAMES[0]) + cls.suite_blobs_to_delete = [blob] for filename in cls.FILENAMES[1:]: - new_key = cls.bucket.copy_key(key, cls.bucket, filename) - cls.suite_keys_to_delete.append(new_key) + new_blob = cls.bucket.copy_blob(blob, cls.bucket, filename) + cls.suite_blobs_to_delete.append(new_blob) @classmethod def tearDownClass(cls): - for key in cls.suite_keys_to_delete: - key.delete() + for blob in cls.suite_blobs_to_delete: + blob.delete() def test_root_level_w_delimiter(self): iterator = self.bucket.iterator(delimiter='/') response = iterator.get_next_page_response() - keys = list(iterator.get_items_from_response(response)) - self.assertEqual([key.name for key in keys], ['file01.txt']) + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual([blob.name for blob in blobs], ['file01.txt']) self.assertEqual(iterator.page_number, 1) self.assertTrue(iterator.next_page_token is None) self.assertEqual(iterator.prefixes, ('parent/',)) @@ -283,8 +283,8 @@ def test_root_level_w_delimiter(self): def test_first_level(self): iterator = self.bucket.iterator(delimiter='/', prefix='parent/') response = iterator.get_next_page_response() - keys = list(iterator.get_items_from_response(response)) - self.assertEqual([key.name for key in keys], ['parent/file11.txt']) + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual([blob.name for blob in blobs], ['parent/file11.txt']) self.assertEqual(iterator.page_number, 1) self.assertTrue(iterator.next_page_token is None) self.assertEqual(iterator.prefixes, ('parent/child/',)) @@ -292,8 +292,8 @@ def test_first_level(self): def test_second_level(self): iterator = self.bucket.iterator(delimiter='/', prefix='parent/child/') response = iterator.get_next_page_response() - keys = list(iterator.get_items_from_response(response)) - self.assertEqual([key.name for key in keys], + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual([blob.name for blob in blobs], ['parent/child/file21.txt', 'parent/child/file22.txt']) self.assertEqual(iterator.page_number, 1) @@ -309,8 +309,8 @@ def test_third_level(self): iterator = self.bucket.iterator(delimiter='/', prefix='parent/child/grand/') response = iterator.get_next_page_response() - keys = list(iterator.get_items_from_response(response)) - self.assertEqual([key.name for key in keys], + blobs = list(iterator.get_items_from_response(response)) + self.assertEqual([blob.name for blob in blobs], ['parent/child/grand/file31.txt']) self.assertEqual(iterator.page_number, 1) self.assertTrue(iterator.next_page_token is None) @@ -326,33 +326,33 @@ def setUp(self): with open(logo_path, 'r') as file_obj: self.LOCAL_FILE = file_obj.read() - key = self.bucket.new_key('LogoToSign.jpg') - key.upload_from_string(self.LOCAL_FILE) - self.case_keys_to_delete.append(key) + blob = self.bucket.new_blob('LogoToSign.jpg') + blob.upload_from_string(self.LOCAL_FILE) + self.case_blobs_to_delete.append(blob) def tearDown(self): - for key in self.case_keys_to_delete: - if key.exists(): - key.delete() + for blob in self.case_blobs_to_delete: + if blob.exists(): + blob.delete() def test_create_signed_read_url(self): - key = self.bucket.new_key('LogoToSign.jpg') + blob = self.bucket.new_blob('LogoToSign.jpg') expiration = int(time.time() + 5) - signed_url = key.generate_signed_url(expiration, method='GET') + signed_url = blob.generate_signed_url(expiration, method='GET') response, content = HTTP.request(signed_url, method='GET') self.assertEqual(response.status, 200) self.assertEqual(content, self.LOCAL_FILE) def test_create_signed_delete_url(self): - key = self.bucket.new_key('LogoToSign.jpg') + blob = self.bucket.new_blob('LogoToSign.jpg') expiration = int(time.time() + 283473274) - signed_delete_url = key.generate_signed_url(expiration, - method='DELETE') + signed_delete_url = blob.generate_signed_url(expiration, + method='DELETE') response, content = HTTP.request(signed_delete_url, method='DELETE') self.assertEqual(response.status, 204) self.assertEqual(content, '') - # Check that the key has actually been deleted. - self.assertFalse(key in self.bucket) + # Check that the blob has actually been deleted. + self.assertFalse(blob in self.bucket)