Skip to content

Commit

Permalink
Enable attribute update on resource-type API.
Browse files Browse the repository at this point in the history
Currently, the resource-type API, not allow operators to update
resource type's attributes, the API only allows Add and Delete.

So, if an operator desires to change a required resource type's
attribute to optional and vice-versa, he/she will need to do it
manually in the database or deleting and creating the attribute
again, but it will result in the attribute's history lose.

Therefore, to help operators in that situation, I propose to
enhance the Resource-type API to allow updates while adding a
new attribute. If the new attribute already exists and is
different from the old, it will be updated; otherwise, nothing
will happen and we will return the resource type's attribute
list as the old behavior.

The first version of this implementation will not allow updating
the resource type's attribute's type.
  • Loading branch information
pedro-martins authored and mergify[bot] committed Feb 17, 2021
1 parent 2865e65 commit eb87188
Show file tree
Hide file tree
Showing 3 changed files with 363 additions and 56 deletions.
167 changes: 113 additions & 54 deletions gnocchi/indexer/sqlalchemy.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,68 +393,127 @@ def create_resource_type(self, resource_type):
return resource_type

def update_resource_type(self, name, add_attributes=None,
del_attributes=None):
if not add_attributes and not del_attributes:
del_attributes=None, update_attributes=None):
if not add_attributes and not del_attributes and not update_attributes:
return
add_attributes = add_attributes or []
del_attributes = del_attributes or []
update_attributes = update_attributes or []

self._set_resource_type_state(name, "updating", "active")

try:
with self.facade.independent_writer() as session:
engine = session.connection()
rt = self._get_resource_type(session, name)

with self.facade.writer_connection() as connection:
ctx = migration.MigrationContext.configure(connection)
op = operations.Operations(ctx)
for table in [rt.tablename, '%s_history' % rt.tablename]:
with op.batch_alter_table(table) as batch_op:
for attr in del_attributes:
batch_op.drop_column(attr)
for attr in add_attributes:
server_default = attr.for_filling(
engine.dialect)
batch_op.add_column(sqlalchemy.Column(
attr.name, attr.satype,
nullable=not attr.required,
server_default=server_default))

# We have all rows filled now, we can remove
# the server_default
if server_default is not None:
batch_op.alter_column(
column_name=attr.name,
existing_type=attr.satype,
existing_server_default=server_default,
existing_nullable=not attr.required,
server_default=None)

rt.state = "active"
rt.updated_at = utils.utcnow()
rt.attributes.extend(add_attributes)
for attr in list(rt.attributes):
if attr.name in del_attributes:
rt.attributes.remove(attr)
# FIXME(sileht): yeah that's wierd but attributes is a custom
# json column and 'extend' doesn't trigger sql update, this
# enforce the update. I wonder if sqlalchemy provides something
# on column description side.
sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')
with self.facade.independent_writer() as session:
engine = session.connection()
rt = self._get_resource_type(session, name)

except Exception:
# NOTE(sileht): We fail the DDL, we have no way to automatically
# recover, just set a particular state
# TODO(sileht): Create a repair REST endpoint that delete
# columns not existing in the database but in the resource type
# description. This will allow to pass wrong update_error to active
# state, that currently not possible.
self._set_resource_type_state(name, "updating_error")
raise
with self.facade.writer_connection() as connection:
ctx = migration.MigrationContext.configure(connection)
op = operations.Operations(ctx)
self.fill_null_attribute_values(engine, name, rt, session,
update_attributes)

self._set_resource_type_state(name, "updating", "active")
for table in [rt.tablename, '%s_history' % rt.tablename]:
with op.batch_alter_table(table) as batch_op:
for attr in del_attributes:
LOG.debug("Dropping column [%s] from resource [%s]"
" and database table [%s]",
attr, name, table)
batch_op.drop_column(attr)
for attr in add_attributes:
LOG.debug("Adding new column [%s] with type [%s], "
"nullable [%s] and default value [%s] "
"in resource [%s] and database "
"table [%s]", attr.name, attr.satype,
not attr.required,
getattr(attr, 'fill', None), name, table)

server_default = attr.for_filling(
engine.dialect)
batch_op.add_column(sqlalchemy.Column(
attr.name, attr.satype,
nullable=not attr.required,
server_default=server_default))

# We have all rows filled now, we can remove
# the server_default
if server_default is not None:
LOG.debug("Removing default value [%s] from "
"column [%s] of resource [%s] and "
"database table [%s]",
getattr(attr, 'fill', None),
attr.name, name, table)
batch_op.alter_column(
column_name=attr.name,
existing_type=attr.satype,
existing_server_default=server_default,
existing_nullable=not attr.required,
server_default=None)

for attr in update_attributes:
LOG.debug("Updating column [%s] from old values "
"type [%s], nullable [%s], to new values"
" type [%s], nullable [%s] of resource "
"[%s] and database_table [%s]",
attr[1].name, attr[1].satype,
not attr[1].required, attr[0].satype,
not attr[0].required, name, table)
batch_op.alter_column(
column_name=attr[1].name,
existing_type=attr[1].satype,
existing_nullable=not attr[1].required,
type_=attr[0].satype,
nullable=not attr[0].required)

rt.state = "active"
rt.updated_at = utils.utcnow()
rt.attributes.extend(add_attributes)
update_attributes = list(map(lambda a: a[0],
update_attributes))
update_attributes_names = list(map(lambda a: a.name,
update_attributes))
for attr in list(rt.attributes):
if (attr.name in del_attributes or
attr.name in update_attributes_names):
rt.attributes.remove(attr)

rt.attributes.extend(update_attributes)
# FIXME(sileht): yeah that's wierd but attributes is a custom
# json column and 'extend' doesn't trigger sql update, this
# enforce the update. I wonder if sqlalchemy provides something
# on column description side.
LOG.debug("Updating resource [%s] setting attributes as [%s]",
name, list(rt.attributes))
sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')

return rt

def fill_null_attribute_values(self, engine, name, rt, session,
update_attributes):
for table in [rt.tablename, '%s_history' % rt.tablename]:
for attr in update_attributes:
if (hasattr(attr[0], 'fill') and
attr[0].fill is not None):
mappers = self._resource_type_to_mappers(
session, name)
if table == rt.tablename:
resource_cls = mappers["resource"]
else:
resource_cls = mappers["history"]
cls_attr = attr[0].name
f = QueryTransformer.build_filter(
engine.dialect.name, resource_cls,
{'=': {cls_attr: None}})
q = session.query(resource_cls).filter(
f).with_for_update()
resources = q.all()
if resources:
LOG.debug("Null resources [%s] to be filled with [%s] "
"for resource-type [%s]", resources,
attr[0].fill, name)
for resource in resources:
if hasattr(resource, attr[0].name):
setattr(resource, attr[0].name,
attr[0].fill)

def get_resource_type(self, name):
with self.facade.independent_reader() as session:
return self._get_resource_type(session, name)
Expand Down
52 changes: 50 additions & 2 deletions gnocchi/rest/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -901,24 +901,72 @@ def patch(self):
if k not in rt_json_current["attributes"]}
del_attrs = [k for k in rt_json_current["attributes"]
if k not in rt_json_next["attributes"]]
update_attrs = self.retrieve_update_attrs(rt_json_current,
rt_json_next)

if not add_attrs and not del_attrs:
if update_attrs:
LOG.debug("Updating attributes [%s] for resource-type [%s]",
update_attrs, self._name)

if not add_attrs and not del_attrs and not update_attrs:
# NOTE(sileht): just returns the resource, the asked changes
# just do nothing
return rt

try:
add_attrs = schema.attributes_from_dict(add_attrs)
update_attrs = self.create_update_attrs(schema, update_attrs)
except resource_type.InvalidResourceAttribute as e:
abort(400, "Invalid input: %s" % e)

try:
return pecan.request.indexer.update_resource_type(
self._name, add_attributes=add_attrs,
del_attributes=del_attrs)
del_attributes=del_attrs, update_attributes=update_attrs)
except indexer.NoSuchResourceType as e:
abort(400, six.text_type(e))

def create_update_attrs(self, schema, update_attrs):
new_attrs = dict(map(lambda entry: (entry[0], entry[1][1]),
update_attrs.items()))
old_attrs = dict(map(lambda entry: (entry[0], entry[1][0]),
update_attrs.items()))
update_attrs_new = schema.attributes_from_dict(new_attrs)
update_attrs_new.sort(key=lambda attr: attr.name)
update_attrs_old = schema.attributes_from_dict(old_attrs)
update_attrs_old.sort(key=lambda attr: attr.name)
update_attrs = []
for i in range(len(update_attrs_new)):
update_attrs.append((update_attrs_new[i],
update_attrs_old[i]))

return update_attrs

def retrieve_update_attrs(self, rt_json_current, rt_json_next):
update_attrs = {}
for k, v in rt_json_current["attributes"].items():
if k in rt_json_next["attributes"]:
self.validate_types(k, rt_json_next, v)
should_be_updated = False
for kc, vc in v.items():
if vc != rt_json_next["attributes"][k][kc]:
should_be_updated = True
break

if should_be_updated:
update_attrs[k] = (v, rt_json_next["attributes"][k])

return update_attrs

def validate_types(self, attribute, new_json, old_json):
old_type = old_json['type']
new_type = new_json["attributes"][attribute]['type']
if new_type != old_type:
msg = "Type update is not available yet. Changing %s to %s " \
"for attribute %s of resource %s" % (old_type, new_type,
attribute, self._name)
abort(400, msg)

@pecan.expose('json')
def delete(self):
try:
Expand Down
Loading

0 comments on commit eb87188

Please sign in to comment.