Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SUPDESQ-34] added schema and linked field to vocab #33

Merged
merged 8 commits into from
Dec 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ This will create the following two tables:
vocabulary_service
vocabulary_service_term

Update the database tables:

ckan db upgrade -p vocabulary_services

*(NOT to be confused with the CKAN core `vocabulary` table)*

## Background tasks
Expand Down
2 changes: 1 addition & 1 deletion ckanext/vocabulary_services/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import os

from ckan.views.admin import _get_sysadmins
from ckan.model import Session
from ckanapi import LocalCKAN, ValidationError
from ckanext.invalid_uris.helpers import valid_uri
from ckanext.vocabulary_services import model
Expand Down Expand Up @@ -129,6 +130,5 @@ def init_db_cmd():

click.secho(u"Vocabulary services tables are setup", fg=u"green")


def get_commands():
return [init_db_cmd, refresh_cmd]
28 changes: 28 additions & 0 deletions ckanext/vocabulary_services/fanstatic/add-edit-form.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
jQuery(document).ready(function () {
var $vocabFormEl = jQuery('#dataset-edit');
var $nameEl = $vocabFormEl.find('#field-name');
var $linkedSchemaFieldEl = $vocabFormEl.find('#linked_schema_field');

// Name field should be readonly.
$nameEl.attr('readonly', 'readonly');

// Update the linked schema field options.
$vocabFormEl.find('#schema').change(function (e) {
var options = linked_schema_field[$(this).val()];
$linkedSchemaFieldEl.html('');
for (var i = 0; i < options.length; i++) {
$linkedSchemaFieldEl.append('<option value="' + options[i].value + '" data-name="' + options[i].name + '">' + options[i].text + '</option>')
}

$linkedSchemaFieldEl.change();
});

// Update name field.
$linkedSchemaFieldEl.change(function (e) {
$nameEl.val($(this).find('option:selected').attr('data-name'));
});

// Enable select2.
$vocabFormEl.find('#schema').change();
$linkedSchemaFieldEl.select2();
});
6 changes: 6 additions & 0 deletions ckanext/vocabulary_services/fanstatic/webassets.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
add_edit_form:
contents:
- add-edit-form.js
extra:
preload:
- vendor/jquery
65 changes: 63 additions & 2 deletions ckanext/vocabulary_services/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@

from ckan.lib.base import abort
from ckan.logic import check_access as logic_check_access
from ckan.plugins.toolkit import get_action
from pprint import pformat
from ckan.plugins.toolkit import get_action, h

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -86,3 +85,65 @@ def get_children(broader):
parent['children'] = parent_nodes['children']

return parents


def get_linked_schema_field_options(existing_vocab_services, force_include_vocab):
'''
Get all available options for linked_schema_field,
any field that already registered in existing_vocab_services
will be filtered out.
'''
package_types = [
'dataset',
'dataservice'
]
fields = {
'dataset__dataset_fields': [],
'dataservice__dataset_fields': [],
'dataset__resource_fields': []
}
def existing_field_names(package_type):
field_names = [service.linked_schema_field for service in existing_vocab_services if len(service.linked_schema_field.strip()) > 0 and service.schema == package_type]

if force_include_vocab and len(force_include_vocab.linked_schema_field.strip()) > 0 and force_include_vocab.linked_schema_field in field_names:
field_names.remove(force_include_vocab.linked_schema_field)

return field_names

for package_type in package_types:
schema = h.scheming_get_dataset_schema(package_type)
dataset_fields = schema.get('dataset_fields', [])

if package_type == 'dataset':
resource_fields = schema.get('resource_fields', [])
fields['dataset__dataset_fields'] = _extract_vocab_field_from_schema(dataset_fields, existing_field_names('dataset__dataset_fields'))
fields['dataset__resource_fields'] = _extract_vocab_field_from_schema(resource_fields, existing_field_names('dataset__resource_fields'))
else:
fields['dataservice__dataset_fields'] = _extract_vocab_field_from_schema(dataset_fields, existing_field_names('dataservice__dataset_fields'))

return fields


def _extract_vocab_field_from_schema(schema_fields, existing_field_names):
'''
Get all fields that has vocabulary_service_name.
'''
def extract_vocab(vocab_name, field_name, sf):
if vocab_name and field_name not in existing_field_names and sf.get('vocabulary_service_name', '') != 'point-of-contact':
vocab_fields.append({
'text': sf.get('label'),
'name': sf.get('vocabulary_service_name'),
'value': sf.get('field_name')
})

vocab_fields = []
for schema_field in schema_fields:
extract_vocab(schema_field.get('vocabulary_service_name', False), schema_field.get('field_name', False), schema_field)

schema_field_groups = schema_field.get('field_group', False)
if schema_field_groups:
for schema_field_group in schema_field_groups:
extract_vocab(schema_field_group.get('vocabulary_service_name', False), schema_field_group.get('field_name', False), schema_field_group)

# Sort the value.
return sorted(vocab_fields, key=lambda d: d['text'])
2 changes: 2 additions & 0 deletions ckanext/vocabulary_services/logic/action/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ def vocabulary_service_create(context, data_dict):
type=data_dict.get('type', ''),
title=data_dict.get('title', ''),
name=data_dict.get('name', ''),
schema=data_dict.get('schema', ''),
linked_schema_field=data_dict.get('linked_schema_field', ''),
uri=data_dict.get('uri', ''),
update_frequency=data_dict.get('update_frequency', ''),
allow_duplicate_terms=allow_duplicate_terms,
Expand Down

This file was deleted.

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Generic single-database configuration.
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# A generic, single database configuration.

[alembic]
# path to migration scripts
script_location = %(here)s

# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s

# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =

# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40

# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false

# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false

# version location specification; this defaults
# to /app/src/ckanext-vocabulary-services/ckanext/vocabulary_services/migration/vocabulary_services/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat /app/src/ckanext-vocabulary-services/ckanext/vocabulary_services/migration/vocabulary_services/versions

# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8

sqlalchemy.url = driver://user:pass@localhost/dbname


# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic

[handlers]
keys = console

[formatters]
keys = generic

[logger_root]
level = WARN
handlers = console
qualname =

[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine

[logger_alembic]
level = INFO
handlers =
qualname = alembic

[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic

[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
81 changes: 81 additions & 0 deletions ckanext/vocabulary_services/migration/vocabulary_services/env.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-

from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig

import os

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)

# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None

# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.

name = os.path.basename(os.path.dirname(__file__))


def run_migrations_offline():
"""Run migrations in 'offline' mode.

This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.

Calls to context.execute() here emit the given string to the
script output.

"""

url = config.get_main_option(u"sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True,
version_table=u'{}_alembic_version'.format(name)
)

with context.begin_transaction():
context.run_migrations()


def run_migrations_online():
"""Run migrations in 'online' mode.

In this scenario we need to create an Engine
and associate a connection with the context.

"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix=u'sqlalchemy.',
poolclass=pool.NullPool)

with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
version_table=u'{}_alembic_version'.format(name)
)

with context.begin_transaction():
context.run_migrations()


if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
Loading