From 84229f2f52b9331a5dfe460a670e010b62f02f34 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 15:01:16 -0500 Subject: [PATCH 01/79] Add workbench migrations --- .../migrations/0006_spdataset_isupdate.py | 18 ++++++++++++++++++ .../migrations/0007_spdataset_parent.py | 19 +++++++++++++++++++ .../0008_alter_spdataset_isupdate.py | 18 ++++++++++++++++++ 3 files changed, 55 insertions(+) create mode 100644 specifyweb/workbench/migrations/0006_spdataset_isupdate.py create mode 100644 specifyweb/workbench/migrations/0007_spdataset_parent.py create mode 100644 specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py diff --git a/specifyweb/workbench/migrations/0006_spdataset_isupdate.py b/specifyweb/workbench/migrations/0006_spdataset_isupdate.py new file mode 100644 index 00000000000..c3911773fcb --- /dev/null +++ b/specifyweb/workbench/migrations/0006_spdataset_isupdate.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.15 on 2024-08-11 17:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('workbench', '0005_auto_20210428_1634'), + ] + + operations = [ + migrations.AddField( + model_name='spdataset', + name='isupdate', + field=models.BooleanField(default=False), + ), + ] diff --git a/specifyweb/workbench/migrations/0007_spdataset_parent.py b/specifyweb/workbench/migrations/0007_spdataset_parent.py new file mode 100644 index 00000000000..60e006ac16b --- /dev/null +++ b/specifyweb/workbench/migrations/0007_spdataset_parent.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.15 on 2024-08-16 14:50 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('workbench', '0006_spdataset_isupdate'), + ] + + operations = [ + migrations.AddField( + model_name='spdataset', + name='parent', + field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='backer', to='workbench.spdataset'), + ), + ] diff --git a/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py b/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py new file mode 100644 index 00000000000..e84fd04af03 --- /dev/null +++ b/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.15 on 2024-08-30 15:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('workbench', '0007_spdataset_parent'), + ] + + operations = [ + migrations.AlterField( + model_name='spdataset', + name='isupdate', + field=models.BooleanField(default=False, null=True), + ), + ] From da7cf871c81b5f77aed923ffbd7ec6db194d4409 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 15:41:50 -0500 Subject: [PATCH 02/79] Update workbench models --- specifyweb/workbench/models.py | 127 +++++++++++++++++++++++---------- 1 file changed, 91 insertions(+), 36 deletions(-) diff --git a/specifyweb/workbench/models.py b/specifyweb/workbench/models.py index c74ea3f1f75..bb17dce039a 100644 --- a/specifyweb/workbench/models.py +++ b/specifyweb/workbench/models.py @@ -1,15 +1,22 @@ import json -from functools import partialmethod from django import http from django.core.exceptions import ObjectDoesNotExist -from django.db import models, transaction +from django.db import models from django.http import Http404 from django.utils import timezone -from specifyweb.specify.models import Collection, Specifyuser, Agent, datamodel, custom_save +from specifyweb.specify.func import Func +from specifyweb.specify.models import ( + Collection, + Specifyuser, + Agent, + datamodel, + custom_save, +) from specifyweb.specify.api import uri_for_model + class Dataset(models.Model): # All these attributes are meta-data. name = models.CharField(max_length=256) @@ -29,22 +36,48 @@ class Dataset(models.Model): # Misc meta-data. timestampcreated = models.DateTimeField(default=timezone.now) timestampmodified = models.DateTimeField(auto_now=True) - createdbyagent = models.ForeignKey(Agent, null=True, on_delete=models.SET_NULL, related_name="+") - modifiedbyagent = models.ForeignKey(Agent, null=True, on_delete=models.SET_NULL, related_name="+") - - base_meta_fields = ["name", "uploaderstatus", "timestampcreated", "timestampmodified"] - object_response_fields = [*base_meta_fields, "id", "remarks", "importedfilename", "uploadresult", "uploadplan"] + createdbyagent = models.ForeignKey( + Agent, null=True, on_delete=models.SET_NULL, related_name="+" + ) + modifiedbyagent = models.ForeignKey( + Agent, null=True, on_delete=models.SET_NULL, related_name="+" + ) + + base_meta_fields = [ + "name", + "uploaderstatus", + "timestampcreated", + "timestampmodified", + ] + object_response_fields = [ + *base_meta_fields, + "id", + "remarks", + "importedfilename", + "uploadresult", + "uploadplan", + ] @classmethod def get_meta_fields(cls, request, extra_meta_fields=None, extra_filters=None): - attrs = [*cls.base_meta_fields, *(extra_meta_fields if extra_meta_fields is not None else [])] + attrs = [ + *cls.base_meta_fields, + *(extra_meta_fields if extra_meta_fields is not None else []), + ] - dss = cls.objects.filter( + dss = cls.objects.filter( specifyuser=request.specify_user, collection=request.specify_collection, **(extra_filters if extra_filters is not None else {}) ).only(*attrs) - return [{'id': ds.id, **{attr: getattr(ds, attr) for attr in attrs}, 'uploadplan': json.loads(ds.uploadplan) if ds.uploadplan else None} for ds in dss] + return [ + { + "id": ds.id, + **{attr: getattr(ds, attr) for attr in attrs}, + "uploadplan": json.loads(ds.uploadplan) if ds.uploadplan else None, + } + for ds in dss + ] # raise_404: Whether to raise 404 or return http 404. # lock_object: Whether to run a "select for update" or "select" @@ -52,13 +85,18 @@ def get_meta_fields(cls, request, extra_meta_fields=None, extra_filters=None): def validate_dataset_request(cls, raise_404: bool, lock_object: bool): def decorator(func): def inner(request, **kwargs): - ds_id = kwargs.get('ds_id', None) + ds_id = kwargs.get("ds_id", None) if ds_id is None: - raise Exception('ds_id not a key in the request. ' - 'Probably because correct group name is not used url regexp') + raise Exception( + "ds_id not a key in the request. " + "Probably because correct group name is not used url regexp" + ) try: - ds = cls.objects.select_for_update().get(id=ds_id) \ - if lock_object else cls.objects.get(id=ds_id) + ds = ( + cls.objects.select_for_update().get(id=ds_id) + if lock_object + else cls.objects.get(id=ds_id) + ) except ObjectDoesNotExist as e: if raise_404: raise Http404(e) @@ -66,49 +104,66 @@ def inner(request, **kwargs): if ds.specifyuser != request.specify_user: return http.HttpResponseForbidden() - new_args = {key: kwargs[key] for key in kwargs if key != 'ds_id'} + new_args = {key: kwargs[key] for key in kwargs if key != "ds_id"} return func(request, ds, **new_args) + return inner - return decorator + return decorator def get_dataset_as_dict(self): ds_dict = {key: getattr(self, key) for key in self.object_response_fields} - ds_dict.update({ - "rows": self.data, - "uploadplan": json.loads(self.uploadplan) if self.uploadplan else None, - "createdbyagent": uri_for_model('agent', self.createdbyagent_id) if self.createdbyagent_id is not None else None, - "modifiedbyagent": uri_for_model('agent', self.modifiedbyagent_id) if self.modifiedbyagent_id is not None else None - }) + ds_dict.update( + { + "rows": self.data, + "uploadplan": Func.maybe(self.uploadplan, json.loads), + "createdbyagent": ( + uri_for_model("agent", self.createdbyagent_id) + if self.createdbyagent_id is not None + else None + ), + "modifiedbyagent": ( + uri_for_model("agent", self.modifiedbyagent_id) + if self.modifiedbyagent_id is not None + else None + ), + } + ) return ds_dict class Meta: abstract = True - # save = partialmethod(custom_save) - class Spdataset(Dataset): - specify_model = datamodel.get_table('spdataset') + specify_model = datamodel.get_table("spdataset") columns = models.JSONField() visualorder = models.JSONField(null=True) rowresults = models.TextField(null=True) + isupdate = models.BooleanField(default=False, null=True) - class Meta: - db_table = 'spdataset' + # very complicated. Essentially, each batch-edit dataset gets backed by another dataset (for rollbacks). + # This should be a one-to-one field, imagine the mess otherwise. + parent = models.OneToOneField( + "Spdataset", related_name="backer", null=True, on_delete=models.CASCADE + ) - # save = partialmethod(custom_save) + class Meta: + db_table = "spdataset" def get_dataset_as_dict(self): ds_dict = super().get_dataset_as_dict() - ds_dict.update({ - "columns": self.columns, - "visualorder": self.visualorder, - "rowresults": self.rowresults and json.loads(self.rowresults) - }) + ds_dict.update( + { + "columns": self.columns, + "visualorder": self.visualorder, + "rowresults": self.rowresults and json.loads(self.rowresults), + "isupdate": self.isupdate == True, + } + ) return ds_dict def was_uploaded(self) -> bool: - return self.uploadresult and self.uploadresult['success'] + return self.uploadresult and self.uploadresult["success"] From 35a485867fa791c88a9863a3abf55341c378c49d Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 15:43:23 -0500 Subject: [PATCH 03/79] Add batch edit permissions --- specifyweb/workbench/permissions.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 specifyweb/workbench/permissions.py diff --git a/specifyweb/workbench/permissions.py b/specifyweb/workbench/permissions.py new file mode 100644 index 00000000000..9ad8b20db27 --- /dev/null +++ b/specifyweb/workbench/permissions.py @@ -0,0 +1,29 @@ +from specifyweb.permissions.permissions import PermissionTarget, PermissionTargetAction + + +class DataSetPT(PermissionTarget): + resource = "/workbench/dataset" + create = PermissionTargetAction() + update = PermissionTargetAction() + delete = PermissionTargetAction() + upload = PermissionTargetAction() + unupload = PermissionTargetAction() + validate = PermissionTargetAction() + transfer = PermissionTargetAction() + create_recordset = PermissionTargetAction() + + +class BatchEditDataSetPT(PermissionTarget): + resource = "/batch_edit/dataset" + create = PermissionTargetAction() + update = PermissionTargetAction() + delete = PermissionTargetAction() + commit = PermissionTargetAction() + rollback = PermissionTargetAction() + validate = PermissionTargetAction() + transfer = PermissionTargetAction() + create_recordset = PermissionTargetAction() + # whether dependents should be deleted (checked during upload) + delete_dependents = PermissionTargetAction() + # whether multiple tables need to be updated (checked during dataset construction) + edit_multiple_tables = PermissionTargetAction() From 8584d7750e1228a06b0e3de8cba4c4ff9b74893a Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 15:47:38 -0500 Subject: [PATCH 04/79] Make a single migration --- specifyweb/specify/func.py | 66 +++++++++++++++++++ ...spdataset_parent.py => 0006_batch_edit.py} | 9 ++- .../migrations/0006_spdataset_isupdate.py | 18 ----- .../0008_alter_spdataset_isupdate.py | 18 ----- 4 files changed, 73 insertions(+), 38 deletions(-) create mode 100644 specifyweb/specify/func.py rename specifyweb/workbench/migrations/{0007_spdataset_parent.py => 0006_batch_edit.py} (60%) delete mode 100644 specifyweb/workbench/migrations/0006_spdataset_isupdate.py delete mode 100644 specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py diff --git a/specifyweb/specify/func.py b/specifyweb/specify/func.py new file mode 100644 index 00000000000..a543c91589a --- /dev/null +++ b/specifyweb/specify/func.py @@ -0,0 +1,66 @@ +from functools import reduce +from typing import Callable, Dict, Generator, List, Optional, Tuple, TypeVar +from django.db.models import Q + +# made as a class to encapsulate type variables and prevent pollution of export + + +class Func: + I = TypeVar("I") + O = TypeVar("O") + + @staticmethod + def maybe(value: Optional[I], callback: Callable[[I], O]): + if value is None: + return None + return callback(value) + + @staticmethod + def sort_by_key(to_sort: Dict[I, O], reverse=False) -> List[Tuple[I, O]]: + return sorted(to_sort.items(), key=lambda t: t[0], reverse=reverse) + + @staticmethod + def make_ors(eprns: List[Q]) -> Q: + assert len(eprns) > 0 + return reduce(lambda accum, curr: accum | curr, eprns) + + @staticmethod + def make_generator(step=1): + def _generator(step=step): + i = 0 + while True: + yield i + i += step + + return _generator(step) + + @staticmethod + def tap_call( + callback: Callable[[], O], generator: Generator[int, None, None] + ) -> Tuple[bool, O]: + init_1 = next(generator) + init_2 = next(generator) + step = init_2 - init_1 + to_return = callback() + post = next(generator) + called = (post - init_2) != step + assert ( + post - init_2 + ) % step == 0, "(sanity check failed): made irregular generator" + return called, to_return + + @staticmethod + def remove_keys(source: Dict[I, O], callback: Callable[[O], bool]) -> Dict[I, O]: + return {key: value for key, value in source.items() if callback(key, value)} + + @staticmethod + def is_not_empty(key, val): + return val + + @staticmethod + def first(source: List[Tuple[I, O]]) -> List[I]: + return [first for (first, _) in source] + + @staticmethod + def second(source: List[Tuple[I, O]]) -> List[O]: + return [second for (_, second) in source] diff --git a/specifyweb/workbench/migrations/0007_spdataset_parent.py b/specifyweb/workbench/migrations/0006_batch_edit.py similarity index 60% rename from specifyweb/workbench/migrations/0007_spdataset_parent.py rename to specifyweb/workbench/migrations/0006_batch_edit.py index 60e006ac16b..1a77b2ef709 100644 --- a/specifyweb/workbench/migrations/0007_spdataset_parent.py +++ b/specifyweb/workbench/migrations/0006_batch_edit.py @@ -1,4 +1,4 @@ -# Generated by Django 3.2.15 on 2024-08-16 14:50 +# Generated by Django 3.2.15 on 2024-11-25 20:46 from django.db import migrations, models import django.db.models.deletion @@ -7,10 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('workbench', '0006_spdataset_isupdate'), + ('workbench', '0005_auto_20210428_1634'), ] operations = [ + migrations.AddField( + model_name='spdataset', + name='isupdate', + field=models.BooleanField(default=False, null=True), + ), migrations.AddField( model_name='spdataset', name='parent', diff --git a/specifyweb/workbench/migrations/0006_spdataset_isupdate.py b/specifyweb/workbench/migrations/0006_spdataset_isupdate.py deleted file mode 100644 index c3911773fcb..00000000000 --- a/specifyweb/workbench/migrations/0006_spdataset_isupdate.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by Django 3.2.15 on 2024-08-11 17:53 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('workbench', '0005_auto_20210428_1634'), - ] - - operations = [ - migrations.AddField( - model_name='spdataset', - name='isupdate', - field=models.BooleanField(default=False), - ), - ] diff --git a/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py b/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py deleted file mode 100644 index e84fd04af03..00000000000 --- a/specifyweb/workbench/migrations/0008_alter_spdataset_isupdate.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by Django 3.2.15 on 2024-08-30 15:10 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('workbench', '0007_spdataset_parent'), - ] - - operations = [ - migrations.AlterField( - model_name='spdataset', - name='isupdate', - field=models.BooleanField(default=False, null=True), - ), - ] From 439b3b6c3572ae5e2d1989d1c4fd72302fcf3d39 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 16:02:32 -0500 Subject: [PATCH 05/79] Add css for batch edit cells --- specifyweb/frontend/js_src/css/main.css | 11 ++++++++-- specifyweb/frontend/js_src/css/workbench.css | 22 ++++++++++++++++++-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/specifyweb/frontend/js_src/css/main.css b/specifyweb/frontend/js_src/css/main.css index efc7ff4a690..115648f168e 100644 --- a/specifyweb/frontend/js_src/css/main.css +++ b/specifyweb/frontend/js_src/css/main.css @@ -150,6 +150,7 @@ /* Make spinner buttons larger */ [type='number']:not([readonly], .no-arrows)::-webkit-outer-spin-button, [type='number']:not([readonly], .no-arrows)::-webkit-inner-spin-button { + -webkit-appearance: inner-spin-button !important; @apply absolute right-0 top-0 h-full w-2; } @@ -253,10 +254,16 @@ --invalid-cell: theme('colors.red.300'); --modified-cell: theme('colors.yellow.250'); --search-result: theme('colors.green.300'); - @apply dark:[--invalid-cell:theme('colors.red.900')] + --updated-cell: theme('colors.cyan.200'); + --deleted-cell: theme('colors.amber.500'); + --matched-and-changed-cell: theme('colors.blue.200'); + @apply dark:[--deleted-cell:theme('colors.amber.600')] + dark:[--invalid-cell:theme('colors.red.900')] + dark:[--matched-and-changed-cell:theme('colors.fuchsia.700')] dark:[--modified-cell:theme('colors.yellow.900')] dark:[--new-cell:theme('colors.indigo.900')] - dark:[--search-result:theme('colors.green.900')]; + dark:[--search-result:theme('colors.green.900')] + dark:[--updated-cell:theme('colors.cyan.800')]; } .custom-select { diff --git a/specifyweb/frontend/js_src/css/workbench.css b/specifyweb/frontend/js_src/css/workbench.css index f7839c71071..d8a0fbfd813 100644 --- a/specifyweb/frontend/js_src/css/workbench.css +++ b/specifyweb/frontend/js_src/css/workbench.css @@ -38,7 +38,10 @@ } /* CONTENT styles */ -.wbs-form.wb-show-upload-results .wb-no-match-cell, +.wbs-form.wb-show-upload-results .wb-no-match-cell +.wbs-form.wb-show-upload-results .wb-updated-cell +.wbs-form.wb-show-upload-results .wb-deleted-cell +.wbs-form.wb-show-upload-results .wb-matched-and-changed-cell .wbs-form.wb-focus-coordinates .wb-coordinate-cell { @apply text-black dark:text-white; } @@ -50,13 +53,28 @@ /* Cell navigation */ .wbs-form - :is(.wb-no-match-cell, .wb-modified-cell, .htCommentCell, .wb-search-match-cell), + :is(.wb-no-match-cell, .wb-modified-cell, .htCommentCell, .wb-search-match-cell, .wb-updated-cell, .wb-deleted-cell, .wb-matched-and-changed-cell), .wb-navigation-section { @apply !bg-[color:var(--accent-color)]; } /* The order here determines the priority of the states * From the lowest till the highest */ +.wbs-form:not(.wb-hide-new-cells) .wb-updated-cell, +.wb-navigation-section[data-navigation-type='updatedCells'] { + --accent-color: var(--updated-cell); +} + +.wbs-form:not(.wb-hide-new-cells) .wb-deleted-cell, +.wb-navigation-section[data-navigation-type='deletedCells'] { + --accent-color: var(--deleted-cell); +} + +.wbs-form:not(.wb-hide-new-cells) .wb-matched-and-changed-cell, +.wb-navigation-section[data-navigation-type='matchedAndChangedCells'] { + --accent-color: var(--matched-and-changed-cell); +} + .wbs-form:not(.wb-hide-new-cells) .wb-no-match-cell, .wb-navigation-section[data-navigation-type='newCells'] { --accent-color: var(--new-cell); From 020ed03cd422b92acacd66e1a70cdb44774fea08 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Mon, 25 Nov 2024 17:23:46 -0500 Subject: [PATCH 06/79] Add batch edit in sidebar --- .../js_src/lib/components/BatchEdit/index.tsx | 320 ++++++++++++++++++ 1 file changed, 320 insertions(+) create mode 100644 specifyweb/frontend/js_src/lib/components/BatchEdit/index.tsx diff --git a/specifyweb/frontend/js_src/lib/components/BatchEdit/index.tsx b/specifyweb/frontend/js_src/lib/components/BatchEdit/index.tsx new file mode 100644 index 00000000000..2b14c54dd4e --- /dev/null +++ b/specifyweb/frontend/js_src/lib/components/BatchEdit/index.tsx @@ -0,0 +1,320 @@ +import React from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { batchEditText } from '../../localization/batchEdit'; +import { commonText } from '../../localization/common'; +import { ajax } from '../../utils/ajax'; +import { f } from '../../utils/functools'; +import type { RA } from '../../utils/types'; +import { defined, filterArray } from '../../utils/types'; +import { group, keysToLowerCase, sortFunction } from '../../utils/utils'; +import { H2, H3 } from '../Atoms'; +import { Button } from '../Atoms/Button'; +import { dialogIcons } from '../Atoms/Icons'; +import { LoadingContext } from '../Core/Contexts'; +import type { AnyTree, SerializedResource } from '../DataModel/helperTypes'; +import type { SpecifyResource } from '../DataModel/legacyTypes'; +import { schema } from '../DataModel/schema'; +import { serializeResource } from '../DataModel/serializers'; +import type { LiteralField, Relationship } from '../DataModel/specifyField'; +import type { SpecifyTable } from '../DataModel/specifyTable'; +import { strictGetTable } from '../DataModel/tables'; +import type { GeographyTreeDefItem, SpQuery, Tables } from '../DataModel/types'; +import { + isTreeTable, + strictGetTreeDefinitionItems, + treeRanksPromise, +} from '../InitialContext/treeRanks'; +import { Dialog } from '../Molecules/Dialog'; +import { TableIcon } from '../Molecules/TableIcon'; +import { userPreferences } from '../Preferences/userPreferences'; +import { QueryFieldSpec } from '../QueryBuilder/fieldSpec'; +import type { QueryField } from '../QueryBuilder/helpers'; +import { uniquifyDataSetName } from '../WbImport/helpers'; +import { + anyTreeRank, + relationshipIsToMany, +} from '../WbPlanView/mappingHelpers'; +import { generateMappingPathPreview } from '../WbPlanView/mappingPreview'; + +const queryFieldSpecHeader = (queryFieldSpec: QueryFieldSpec) => + generateMappingPathPreview( + queryFieldSpec.baseTable.name, + queryFieldSpec.toMappingPath() + ); + +export function BatchEditFromQuery({ + query, + fields, + baseTableName, + recordSetId, +}: { + readonly query: SpecifyResource; + readonly fields: RA; + readonly baseTableName: keyof Tables; + readonly recordSetId?: number; +}) { + const navigate = useNavigate(); + const post = async (dataSetName: string) => + ajax<{ readonly id: number }>('/stored_query/batch_edit/', { + method: 'POST', + errorMode: 'dismissible', + headers: { Accept: 'application/json' }, + body: keysToLowerCase({ + ...serializeResource(query), + captions: fields + .filter(({ isDisplay }) => isDisplay) + .map(({ mappingPath }) => + generateMappingPathPreview(baseTableName, mappingPath) + ), + name: dataSetName, + recordSetId, + limit: userPreferences.get('batchEdit', 'query', 'limit'), + }), + }); + const [errors, setErrors] = React.useState(undefined); + const loading = React.useContext(LoadingContext); + + const queryFieldSpecs = React.useMemo( + () => + filterArray( + fields.map((field) => + field.isDisplay + ? QueryFieldSpec.fromPath(baseTableName, field.mappingPath) + : undefined + ) + ), + [fields] + ); + + return ( + <> + { + loading( + treeRanksPromise.then(async () => { + const missingRanks = findAllMissing(queryFieldSpecs); + const invalidFields = queryFieldSpecs.filter((fieldSpec) => + filters.some((filter) => filter(fieldSpec)) + ); + const hasErrors = + Object.values(missingRanks).some((ranks) => ranks.length > 0) || + invalidFields.length > 0; + + if (hasErrors) { + setErrors({ + missingRanks, + invalidFields: invalidFields.map(queryFieldSpecHeader), + }); + return; + } + + const newName = batchEditText.datasetName({ + queryName: query.get('name'), + datePart: new Date().toDateString(), + }); + return uniquifyDataSetName(newName, undefined, 'batchEdit').then( + async (name) => + post(name).then(({ data }) => + navigate(`/specify/workbench/${data.id}`) + ) + ); + }) + ); + }} + > + <>{batchEditText.batchEdit()} + + {errors !== undefined && ( + setErrors(undefined)} /> + )} + + ); +} + +type QueryError = { + readonly missingRanks: { + readonly // Query can contain relationship to multiple trees + [KEY in AnyTree['tableName']]: RA; + }; + readonly invalidFields: RA; +}; + +function containsFaultyNestedToMany(queryFieldSpec: QueryFieldSpec): boolean { + const joinPath = queryFieldSpec.joinPath; + if (joinPath.length <= 1) return false; + const nestedToManyCount = joinPath.filter( + (relationship) => + relationship.isRelationship && relationshipIsToMany(relationship) + ); + return nestedToManyCount.length > 1; +} + +const containsSystemTables = (queryFieldSpec: QueryFieldSpec) => + queryFieldSpec.joinPath.some((field) => field.table.isSystem); + +const hasHierarchyBaseTable = (queryFieldSpec: QueryFieldSpec) => + Object.keys(schema.domainLevelIds).includes( + queryFieldSpec.baseTable.name.toLowerCase() as 'collection' + ); + +const filters = [containsFaultyNestedToMany, containsSystemTables]; + +const getTreeDefFromName = ( + rankName: string, + treeDefItems: RA> +) => + defined( + treeDefItems.find( + (treeRank) => treeRank.name.toLowerCase() === rankName.toLowerCase() + ) + ); + +function findAllMissing( + queryFieldSpecs: RA +): QueryError['missingRanks'] { + const treeFieldSpecs = group( + filterArray( + queryFieldSpecs.map((fieldSpec) => + isTreeTable(fieldSpec.table.name) && + fieldSpec.treeRank !== anyTreeRank && + fieldSpec.treeRank !== undefined + ? [ + fieldSpec.table, + { rank: fieldSpec.treeRank, field: fieldSpec.getField() }, + ] + : undefined + ) + ) + ); + + return Object.fromEntries( + treeFieldSpecs.map(([treeTable, treeRanks]) => [ + treeTable.name, + findMissingRanks(treeTable, treeRanks), + ]) + ); +} + +// TODO: discuss if we need to add more of them, and if we need to add more of them for other table. +const requiredTreeFields: RA = ['name'] as const; + +function findMissingRanks( + treeTable: SpecifyTable, + treeRanks: RA< + | { readonly rank: string; readonly field?: LiteralField | Relationship } + | undefined + > +): RA { + const allTreeDefItems = strictGetTreeDefinitionItems( + treeTable.name as 'Geography', + false + ); + + // Duplicates don't affect any logic here + const currentTreeRanks = filterArray( + treeRanks.map((treeRank) => + f.maybe(treeRank, ({ rank, field }) => ({ + specifyRank: getTreeDefFromName(rank, allTreeDefItems), + field, + })) + ) + ); + + const currentRanksSorted = Array.from(currentTreeRanks).sort( + sortFunction(({ specifyRank: { rankId } }) => rankId) + ); + + const highestRank = currentRanksSorted[0]; + + return allTreeDefItems.flatMap(({ rankId, name }) => + rankId < highestRank.specifyRank.rankId + ? [] + : filterArray( + requiredTreeFields.map((requiredField) => + currentTreeRanks.some( + (rank) => + rank.specifyRank.name === name && + rank.field !== undefined && + requiredField === rank.field.name + ) + ? undefined + : `${name} ${ + defined( + strictGetTable(treeTable.name).getField(requiredField) + ).label + }` + ) + ) + ); +} + +function ErrorsDialog({ + errors, + onClose: handleClose, +}: { + readonly errors: QueryError; + readonly onClose: () => void; +}): JSX.Element { + return ( + + + + + ); +} + +function ShowInvalidFields({ + error, +}: { + readonly error: QueryError['invalidFields']; +}) { + const hasErrors = error.length > 0; + return hasErrors ? ( +
+
+

{batchEditText.removeField()}

+
+ {error.map((singleError) => ( +

{singleError}

+ ))} +
+ ) : null; +} + +function ShowMissingRanks({ + error, +}: { + readonly error: QueryError['missingRanks']; +}) { + const hasMissing = Object.values(error).some((rank) => rank.length > 0); + return hasMissing ? ( +
+
+

{batchEditText.addTreeRank()}

+
+ {Object.entries(error).map(([treeTable, ranks]) => ( +
+
+ +

{strictGetTable(treeTable).label}

+
+
+ {ranks.map((rank) => ( +

{rank}

+ ))} +
+
+ ))} +
+ ) : null; +} From b4bcf44410130a0da728ad1bd50c7ff8aec7d6f8 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:45:33 -0500 Subject: [PATCH 07/79] Add permission definitions --- .../lib/components/Permissions/definitions.ts | 67 ++++++++++--------- 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts b/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts index 68bca10b572..ce9481a9cc0 100644 --- a/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts +++ b/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts @@ -1,24 +1,20 @@ export const tableActions = ['read', 'create', 'update', 'delete'] as const; export const collectionAccessResource = '/system/sp7/collection'; export const operationPolicies = { + '/system/sp7/collection': ['access'], + '/admin/user/password': ['update'], '/admin/user/agents': ['update'], + '/admin/user/sp6/is_admin': ['update'], + '/record/merge': ['update', 'delete'], '/admin/user/invite_link': ['create'], '/admin/user/oic_providers': ['read'], - '/admin/user/password': ['update'], '/admin/user/sp6/collection_access': ['read', 'update'], - '/admin/user/sp6/is_admin': ['update'], - '/attachment_import/dataset': [ - 'create', - 'update', - 'delete', - 'upload', - 'rollback', - ], + '/report': ['execute'], '/export/dwca': ['execute'], '/export/feed': ['force_update'], - '/permissions/library/roles': ['read', 'create', 'update', 'delete'], '/permissions/list_admins': ['read'], '/permissions/policies/user': ['read', 'update'], + '/permissions/user/roles': ['read', 'update'], '/permissions/roles': [ 'read', 'create', @@ -26,16 +22,8 @@ export const operationPolicies = { 'delete', 'copy_from_library', ], - '/permissions/user/roles': ['read', 'update'], - '/querybuilder/query': [ - 'execute', - 'export_csv', - 'export_kml', - 'create_recordset', - ], - '/record/merge': ['update', 'delete'], - '/report': ['execute'], - '/system/sp7/collection': ['access'], + '/permissions/library/roles': ['read', 'create', 'update', 'delete'], + '/tree/edit/taxon': ['merge', 'move', 'synonymize', 'desynonymize', 'repair'], '/tree/edit/geography': [ 'merge', 'move', @@ -43,35 +31,33 @@ export const operationPolicies = { 'desynonymize', 'repair', ], - '/tree/edit/geologictimeperiod': [ + '/tree/edit/storage': [ 'merge', 'move', 'synonymize', 'desynonymize', 'repair', + 'bulk_move', ], - '/tree/edit/lithostrat': [ + '/tree/edit/geologictimeperiod': [ 'merge', 'move', 'synonymize', 'desynonymize', 'repair', ], - '/tree/edit/storage': [ + '/tree/edit/lithostrat': [ 'merge', 'move', - 'bulk_move', 'synonymize', 'desynonymize', 'repair', ], - '/tree/edit/taxon': ['merge', 'move', 'synonymize', 'desynonymize', 'repair'], - '/tree/edit/tectonicunit': [ - 'merge', - 'move', - 'synonymize', - 'desynonymize', - 'repair', + '/querybuilder/query': [ + 'execute', + 'export_csv', + 'export_kml', + 'create_recordset', ], '/workbench/dataset': [ 'create', @@ -83,6 +69,25 @@ export const operationPolicies = { 'transfer', 'create_recordset', ], + '/attachment_import/dataset': [ + 'create', + 'update', + 'delete', + 'upload', + 'rollback', + ], + '/batch_edit/dataset': [ + 'create', + 'update', + 'delete', + 'commit', + 'rollback', + 'validate', + 'transfer', + 'create_recordset', + 'delete_dependents', + 'edit_multiple_tables', + ], } as const; /** From 86d2e4adddd4c2069a52385270fe0160fcb685f2 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:45:39 -0500 Subject: [PATCH 08/79] Add menu item definitions --- .../js_src/lib/components/Header/menuItemDefinitions.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/specifyweb/frontend/js_src/lib/components/Header/menuItemDefinitions.ts b/specifyweb/frontend/js_src/lib/components/Header/menuItemDefinitions.ts index f4aaaaebd3d..390ed36feba 100644 --- a/specifyweb/frontend/js_src/lib/components/Header/menuItemDefinitions.ts +++ b/specifyweb/frontend/js_src/lib/components/Header/menuItemDefinitions.ts @@ -3,6 +3,7 @@ */ import { attachmentsText } from '../../localization/attachments'; +import { batchEditText } from '../../localization/batchEdit'; import { commonText } from '../../localization/common'; import { headerText } from '../../localization/header'; import { interactionsText } from '../../localization/interactions'; @@ -109,6 +110,11 @@ const rawMenuItems = ensure>>()({ icon: icons.chartBar, enabled: () => hasPermission('/querybuilder/query', 'execute'), }, + batchEdit: { + url: '/specify/overlay/batch-edit', + title: batchEditText.batchEdit(), + icon: icons.table, + } } as const); export type MenuItemName = keyof typeof rawMenuItems | 'search'; From aa641f422c15326390f85971b9a84f8f9a6baf05 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:46:13 -0500 Subject: [PATCH 09/79] Add batch edit to Queries --- .../lib/components/QueryBuilder/Wrapped.tsx | 35 ++++++++++++------- .../lib/components/Router/OverlayRoutes.tsx | 10 ++++++ 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx b/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx index 1a5e722c8d1..c9566c14906 100644 --- a/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx +++ b/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx @@ -17,6 +17,7 @@ import { Container } from '../Atoms'; import { Button } from '../Atoms/Button'; import { Form } from '../Atoms/Form'; import { icons } from '../Atoms/Icons'; +import { BatchEditFromQuery } from '../BatchEdit'; import { ReadOnlyContext } from '../Core/Contexts'; import type { SerializedResource } from '../DataModel/helperTypes'; import type { SpecifyResource } from '../DataModel/legacyTypes'; @@ -40,6 +41,7 @@ import { } from '../WbPlanView/mappingHelpers'; import { getMappingLineData } from '../WbPlanView/navigator'; import { navigatorSpecs } from '../WbPlanView/navigatorSpecs'; +import { datasetVariants } from '../WbUtils/datasetVariants'; import { CheckReadAccess } from './CheckReadAccess'; import { MakeRecordSetButton } from './Components'; import { IsQueryBasicContext, useQueryViewPref } from './Context'; @@ -196,7 +198,6 @@ function Wrapped({ type: 'any', startValue: '', isNot: false, - isStrict: false, }, ], isDisplay: true, @@ -589,17 +590,27 @@ function Wrapped({ ) : undefined } extraButtons={ - query.countOnly ? undefined : ( - - ) + <> + {datasetVariants.batchEdit.canCreate() && ( + + )} + {query.countOnly ? undefined : ( + + )} + } fields={state.fields} forceCollection={forceCollection} diff --git a/specifyweb/frontend/js_src/lib/components/Router/OverlayRoutes.tsx b/specifyweb/frontend/js_src/lib/components/Router/OverlayRoutes.tsx index 08769efeb6f..6516db38c46 100644 --- a/specifyweb/frontend/js_src/lib/components/Router/OverlayRoutes.tsx +++ b/specifyweb/frontend/js_src/lib/components/Router/OverlayRoutes.tsx @@ -15,6 +15,7 @@ import { wbText } from '../../localization/workbench'; import type { RA } from '../../utils/types'; import { Redirect } from './Redirect'; import type { EnhancedRoute } from './RouterUtils'; +import { batchEditText } from '../../localization/batchEdit'; /* eslint-disable @typescript-eslint/promise-function-async */ /** @@ -239,6 +240,15 @@ export const overlayRoutes: RA = [ ({ TableUniquenessRules }) => TableUniquenessRules ), }, + { + // There's no physical difference between a workbench and batch-edit dataset, but separating them out helps UI. + path: 'batch-edit', + title: batchEditText.batchEdit(), + element: () => + import('../Toolbar/WbsDialog').then( + ({ BatchEditDataSetsOverlay }) => BatchEditDataSetsOverlay + ), + }, ], }, ]; From d1aab76df3ea0295069bb1bdfd481fb25f350cf9 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:46:39 -0500 Subject: [PATCH 10/79] Add batch edit localizations --- .../js_src/lib/localization/batchEdit.ts | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 specifyweb/frontend/js_src/lib/localization/batchEdit.ts diff --git a/specifyweb/frontend/js_src/lib/localization/batchEdit.ts b/specifyweb/frontend/js_src/lib/localization/batchEdit.ts new file mode 100644 index 00000000000..6c090d6a5a1 --- /dev/null +++ b/specifyweb/frontend/js_src/lib/localization/batchEdit.ts @@ -0,0 +1,100 @@ +/** + * Localization strings used for displaying Attachments + * + * @module + */ + +import { createDictionary } from './utils'; + +export const batchEditText = createDictionary({ + batchEdit: { + 'en-us': 'Batch Edit', + }, + numberOfRecords: { + 'en-us': 'Number of records selected from the query', + }, + removeField: { + 'en-us': + 'Field not supported for batch edit. Either remove the field, or make it hidden.', + }, + addTreeRank: { + 'en-us': 'Please add the following missing rank to the query', + }, + datasetName: { + 'en-us': '{queryName:string} {datePart:string}', + }, + errorInQuery: { + 'en-us': 'Following errors were found in the query', + }, + createUpdateDataSetInstructions: { + 'en-us': 'Use the query builder to make a new batch edit dataset', + }, + showRollback: { + 'en-us': 'Show revert button', + }, + showRollbackDescription: { + 'en-us': + 'Revert is currently an experimental feature. This preference will hide the button', + }, + commit: { + 'en-us': 'Commit', + }, + startCommitDescription: { + 'en-us': + 'Commiting the Data Set will update, add, and delete the data from the spreadsheet to the Specify database.', + }, + startRevertDescription: { + 'en-us': + 'Rolling back the dataset will re-update the values, delete created records, and create new records', + }, + commitSuccessfulDescription: { + 'en-us': `Click on the "Results" button to see the number of records affected in each database table`, + }, + dateSetRevertDescription: { + 'en-us': `This Rolledback Data Set is saved, however, it cannot be edit. Please re-run the query`, + }, + committing: { + 'en-us': 'Committing', + }, + beStatusCommit: { + 'en-us': 'Data Set Commit Status', + }, + startCommit: { + 'en-us': 'Begin Data Set Commit?', + }, + commitErrors: { + 'en-us': 'Commit Failed due to Error Cells', + }, + commitErrorsDescription: { + 'en-us': 'The Commit failed due to one or more cell value errors.', + }, + commitCancelled: { + 'en-us': 'Commit Cancelled', + }, + commitCancelledDescription: { + 'en-us': 'Commit Cancelled Description', + }, + commitSuccessful: { + 'en-us': 'Commit Completed with No Errors', + }, + batchEditRecordSetName: { + 'en-us': 'BE commit of "{dataSet:string}"', + }, + deferForMatch: { + 'en-us': 'Use only visible fields for match', + }, + deferForMatchDescription: { + 'en-us': + 'If true, invisible database fields will not be used for matching. Default value is {default:boolean}', + }, + deferForNullCheck: { + 'en-us': 'Use only visible fields for empty record check', + }, + deferForNullCheckDescription: { + 'en-us': + 'If true, invisible database fields will not be used for determining whether the record is empty or not. Default value is {default: boolean}', + }, + batchEditPrefs: { + 'en-us': 'Batch Edit Prefs', + }, +} as const); From 1e6abff29ca47446a8e1ae2aacd33fb5dfb868fd Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:46:54 -0500 Subject: [PATCH 11/79] Workbench localizations for batch edit --- .../js_src/lib/localization/workbench.ts | 133 +++++------------- 1 file changed, 37 insertions(+), 96 deletions(-) diff --git a/specifyweb/frontend/js_src/lib/localization/workbench.ts b/specifyweb/frontend/js_src/lib/localization/workbench.ts index 133aaac36c6..2f1f7d438ff 100644 --- a/specifyweb/frontend/js_src/lib/localization/workbench.ts +++ b/specifyweb/frontend/js_src/lib/localization/workbench.ts @@ -384,11 +384,6 @@ export const wbText = createDictionary({ }, startUpload: { 'en-us': 'Begin Data Set Upload?', - 'ru-ru': 'Начать загрузку набора данных?', - 'es-es': '¿Comenzar carga de conjunto de datos?', - 'fr-fr': "Commencer le téléchargement de l'ensemble de données ?", - 'uk-ua': 'Почати завантаження набору даних?', - 'de-ch': 'Hochladen des Datensatzes beginnen?', }, startUploadDescription: { 'en-us': @@ -644,11 +639,6 @@ export const wbText = createDictionary({ }, uploadSuccessful: { 'en-us': 'Upload Completed with No Errors', - 'ru-ru': 'Загрузка завершена без ошибок', - 'es-es': 'Carga completada sin errores', - 'fr-fr': 'Téléchargement terminé sans erreur', - 'uk-ua': 'Завантаження завершено без помилок', - 'de-ch': 'Upload ohne Fehler abgeschlossen', }, uploadSuccessfulDescription: { 'en-us': ` @@ -678,57 +668,14 @@ export const wbText = createDictionary({ }, uploadErrors: { 'en-us': 'Upload Failed due to Error Cells', - 'ru-ru': 'Ошибка загрузки из-за ошибок', - 'es-es': 'Carga fallida debido a celdas de error', - 'fr-fr': "Échec du téléchargement en raison de cellules d'erreur", - 'uk-ua': 'Помилка завантаження через клітинки помилок', - 'de-ch': 'Der Upload ist aufgrund fehlerhafter Zellen fehlgeschlagen', }, uploadErrorsDescription: { - 'en-us': 'The upload failed due to one or more cell value errors.', - 'ru-ru': - 'Загрузка не удалась из-за одной или нескольких ошибок значений ячеек.', - 'es-es': 'La carga falló debido a uno o más errores de valor de celda.', - 'fr-fr': ` - Le téléchargement a échoué en raison d'une ou plusieurs erreurs de valeur - de cellule. - `, - 'uk-ua': - 'Помилка завантаження через одну або кілька помилок значення клітинки.', - 'de-ch': ` - Der Upload ist aufgrund eines oder mehrerer Zellenwertfehler - fehlgeschlagen. - `, + 'en-us': 'The Upload failed due to one or more cell value errors.', }, uploadErrorsSecondDescription: { 'en-us': ` Validate the Data Set and review the mouseover hints for each error cell, - then make the appropriate corrections. Save and retry the Upload. - `, - 'ru-ru': ` - Проверте набор данных и наведите указатель мыши на каждую ячейку с - ошибкой, затем сделайте соответствующие исправления, сохраните и повторите - попытку. - `, - 'es-es': ` - Valide el conjunto de datos y revise las sugerencias del mouseover para - cada celda de error, luego haga las correcciones apropiadas. Guarde y - vuelva a intentar la carga. - `, - 'fr-fr': ` - Validez l'ensemble de données et examinez les conseils de passage de la - souris pour chaque cellule d'erreur, puis apportez les corrections - appropriées. Enregistrez et réessayez le téléchargement. - `, - 'uk-ua': ` - Перевірте набір даних і перегляньте підказки для кожної клітинки помилки, - а потім внесіть відповідні виправлення. Збережіть і повторіть спробу - завантаження. - `, - 'de-ch': ` - Validieren Sie den Datensatz und überprüfen Sie die Mouseover-Hinweise für - jede Fehlerzelle. Nehmen Sie dann die entsprechenden Korrekturen vor. - Speichern Sie und versuchen Sie den Upload erneut. + then make the appropriate corrections. Save and retry the {type:string}. `, }, dataSetRollback: { @@ -797,19 +744,9 @@ export const wbText = createDictionary({ }, uploadCanceled: { 'en-us': 'Upload Cancelled', - 'ru-ru': 'Загрузка отменена', - 'es-es': 'Subida cancelada', - 'de-ch': 'Datensatzvalidierung abgebrochen.', - 'fr-fr': 'Téléchargement annulé', - 'uk-ua': 'Завантаження скасовано', }, uploadCanceledDescription: { 'en-us': 'Data Set Upload cancelled.', - 'ru-ru': 'Загрузка набора данных отменена.', - 'es-es': 'Carga de conjunto de datos cancelada.', - 'fr-fr': "Téléchargement de l'ensemble de données annulé.", - 'uk-ua': 'Завантаження набору даних скасовано.', - 'de-ch': 'Der Upload des Datensatzes wurde abgebrochen.', }, coordinateConverter: { 'en-us': 'Geocoordinate Format', @@ -971,14 +908,6 @@ export const wbText = createDictionary({ 'fr-fr': 'Télécharger le forfait', 'uk-ua': 'План завантаження', }, - potentialUploadResults: { - 'en-us': 'Potential Upload Results', - 'ru-ru': 'Возможные результаты загрузки', - 'es-es': 'Resultados potenciales de la carga', - 'fr-fr': 'Résultats potentiels du téléchargement', - 'uk-ua': 'Потенційні результати завантаження', - 'de-ch': 'Mögliche Upload-Ergebnisse', - }, noUploadResultsAvailable: { 'en-us': 'No upload results are available for this cell', 'ru-ru': 'Для этой ячейки нет результатов загрузки', @@ -986,28 +915,7 @@ export const wbText = createDictionary({ 'fr-fr': "Aucun résultat de téléchargement n'est disponible pour cette cellule", 'uk-ua': 'Для цієї клітинки немає результатів завантаження', - 'de-ch': 'Für diese Zelle sind keine Upload-Ergebnisse verfügbar', - }, - wbUploadedDescription: { - 'en-us': 'Number of new records created in each table:', - 'ru-ru': 'Количество новых записей, созданных в каждой таблице:', - 'es-es': 'Número de registros nuevos creados en cada tabla:', - 'fr-fr': 'Nombre de nouveaux enregistrements créés dans chaque table :', - 'uk-ua': 'Кількість нових записів, створених у кожній таблиці:', - 'de-ch': 'Anzahl der in jeder Tabelle erstellten neuen Datensätze:', - }, - wbUploadedPotentialDescription: { - 'en-us': 'Number of new records that would be created in each table:', - 'ru-ru': - 'Количество новых записей, которые будут созданы в каждой таблице:', - 'es-es': 'Número de registros nuevos que se crearían en cada tabla:', - 'fr-fr': ` - Nombre de nouveaux enregistrements qui seraient créés dans chaque table : - `, - 'uk-ua': 'Кількість нових записів, які будуть створені в кожній таблиці:', - 'de-ch': ` - Anzahl der neuen Datensätze, die in jeder Tabelle erstellt werden würden: - `, + 'de-ch': 'Für diese Zelle sind keine Uploasd-Ergebnisse verfügbar', }, navigationOptions: { 'en-us': 'Navigation Options', @@ -1263,7 +1171,7 @@ export const wbText = createDictionary({ 'de-ch': 'Neuer Datensatz {date}', }, dataSets: { - 'en-us': 'WorkBench Data Sets', + 'en-us': '{variant:string} Data Sets', 'ru-ru': 'Наборы данных', 'es-es': 'Conjuntos de datos de WorkBench', 'fr-fr': 'Ensembles de données WorkBench', @@ -1584,4 +1492,37 @@ export const wbText = createDictionary({ 'fr-fr': '{node:string} (dans {parent:string})', 'uk-ua': '{node:string} (у {parent:string})', }, + updatedCells: { + 'en-us': 'Updated Cells', + }, + deletedCells: { + 'en-us': 'Deleted Cells', + }, + affectedResults: { + 'en-us': 'Records affected', + }, + potentialAffectedResults: { + 'en-us': 'Potential records affected', + }, + wbAffectedDescription: { + 'en-us': 'Number of new records affected in each table:', + }, + wbAffectedPotentialDescription: { + 'en-us': 'Number of new records that would be affected in each table:', + }, + recordsCreated: { + 'en-us': 'Records created', + }, + recordsUpdated: { + 'en-us': 'Records updated', + }, + recordsDeleted: { + 'en-us': 'Records deleted (not including dependents)', + }, + recordsMatchedAndChanged: { + 'en-us': 'Records matched, different from current related', + }, + matchAndChanged: { + 'en-us': 'Matched and changed cells', + }, } as const); From 9fa7373ab4ad0cf256e0fcfe2691542fe7ed2201 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 16:47:15 -0500 Subject: [PATCH 12/79] Add variants for wb and batch edit --- .../components/WbUtils/datasetVariants.tsx | 154 ++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 specifyweb/frontend/js_src/lib/components/WbUtils/datasetVariants.tsx diff --git a/specifyweb/frontend/js_src/lib/components/WbUtils/datasetVariants.tsx b/specifyweb/frontend/js_src/lib/components/WbUtils/datasetVariants.tsx new file mode 100644 index 00000000000..3ce2f05ec72 --- /dev/null +++ b/specifyweb/frontend/js_src/lib/components/WbUtils/datasetVariants.tsx @@ -0,0 +1,154 @@ +import { batchEditText } from '../../localization/batchEdit'; +import { commonText } from '../../localization/common'; +import { wbPlanText } from '../../localization/wbPlan'; +import { wbText } from '../../localization/workbench'; +import { f } from '../../utils/functools'; +import { hasPermission } from '../Permissions/helpers'; +import { userPreferences } from '../Preferences/userPreferences'; +import type { Dataset } from '../WbPlanView/Wrapped'; + +const baseWbVariant = { + fetchUrl: '/api/workbench/dataset/', + sortConfig: { + key: 'listOfDataSets', + field: 'name', + }, + canImport: () => hasPermission('/workbench/dataset', 'create'), + canEdit: () => hasPermission('/workbench/dataset', 'update'), + route: (id: number) => `/specify/workbench/${id}`, + metaRoute: (id: number) => `/specify/overlay/workbench/${id}/meta/`, + canCreate: () => hasPermission('/workbench/dataset', 'create'), + canTransfer: () => hasPermission('/workbench/dataset', 'transfer'), + canUpdate: () => hasPermission('/workbench/dataset', 'update'), + canDo: () => hasPermission('/workbench/dataset', 'upload'), + canUndo: () => hasPermission('/workbench/dataset', 'unupload'), + canValidate: () => hasPermission('/workbench/dataset', 'validate'), + localization: { + datasetsDialog: { + header: (count: number) => + commonText.countLine({ + resource: wbText.dataSets({ variant: wbText.workBench() }), + count, + }), + empty: () => + `${wbText.wbsDialogEmpty()} ${ + hasPermission('/workbench/dataset', 'create') + ? wbText.createDataSetInstructions() + : '' + }`, + }, + viewer: { + do: wbText.upload(), + doStart: wbText.startUpload(), + doErrors: wbText.uploadErrors(), + doCancelled: wbText.uploadCanceled(), + doCancelledDescription: wbText.uploadCanceledDescription(), + doStartDescription: wbText.startUploadDescription(), + doErrorsDescription: wbText.uploadErrorsDescription(), + undo: wbText.rollback(), + undoConfirm: wbText.beginRollback(), + undoStartDescription: wbText.beginRollbackDescription(), + doSuccessfulDescription: wbText.uploadSuccessfulDescription(), + undoFinishedDescription: wbText.dataSetRollbackDescription(), + doing: wbText.uploading(), + doStatus: wbText.wbStatusUpload(), + doSuccessful: wbText.uploadSuccessful(), + }, + }, + documentationUrl: + 'https://discourse.specifysoftware.org/t/the-specify-7-workbench/540', +} as const; + +// Defines a shared interface to access dataset variants +export const datasetVariants = { + workbench: baseWbVariant, + workbenchChoosePlan: { + ...baseWbVariant, + fetchUrl: '/api/workbench/dataset/?with_plan', + sortConfig: baseWbVariant.sortConfig, + canImport: () => false, + canEdit: () => false, + localization: { + datasetsDialog: { + header: () => wbPlanText.copyPlan(), + empty: () => wbPlanText.noPlansToCopyFrom(), + }, + }, + }, + batchEdit: { + ...baseWbVariant, + fetchUrl: '/api/workbench/dataset/?isupdate=1', + sortConfig: { + key: 'listOfBatchEditDataSets', + field: 'name', + }, + // Cannot import via the header + canImport: () => false, + header: (count: number) => + commonText.countLine({ + resource: wbText.dataSets({ variant: batchEditText.batchEdit() }), + count, + }), + canEdit: () => hasPermission('/batch_edit/dataset', 'update'), + canCreate: () => hasPermission('/batch_edit/dataset', 'create'), + canTransfer: () => hasPermission('/batch_edit/dataset', 'transfer'), + canDo: () => hasPermission('/batch_edit/dataset', 'commit'), + canUndo: () => + userPreferences.get('batchEdit', 'editor', 'showRollback') && + hasPermission('/batch_edit/dataset', 'rollback'), + canValidate: () => hasPermission('/batch_edit/dataset', 'validate'), + localization: { + datasetsDialog: { + header: (count: number) => + commonText.countLine({ + resource: wbText.dataSets({ variant: batchEditText.batchEdit() }), + count, + }), + empty: () => + `${wbText.wbsDialogEmpty()} ${ + hasPermission('/batch_edit/dataset', 'create') + ? batchEditText.createUpdateDataSetInstructions() + : '' + }`, + }, + viewer: { + do: batchEditText.commit(), + doStart: batchEditText.startCommit(), + doErrors: batchEditText.commitErrors(), + doCancelled: batchEditText.commitCancelled(), + doErrorsDescription: batchEditText.commitErrorsDescription(), + doStartDescription: batchEditText.startCommitDescription(), + doCancelledDescription: batchEditText.commitCancelledDescription(), + undo: wbText.rollback(), + undoConfirm: wbText.beginRollback(), + undoStartDescription: batchEditText.startRevertDescription(), + doSuccessfulDescription: batchEditText.commitSuccessfulDescription(), + undoFinishedDescription: batchEditText.dateSetRevertDescription(), + doing: batchEditText.committing(), + doStatus: batchEditText.beStatusCommit(), + doSuccessful: batchEditText.commitSuccessful(), + }, + }, + // TODO: Change this + documentationUrl: 'https://www.youtube.com/watch?v=dQw4w9WgXcQ', + }, + bulkAttachment: { + fetchUrl: '/attachment_gw/dataset/', + sortConfig: { + key: 'attachmentDatasets', + field: 'name', + }, + canImport: () => hasPermission('/attachment_import/dataset', 'create'), + header: f.never, + onEmpty: f.never, + canEdit: () => hasPermission('/attachment_import/dataset', 'update'), + route: (id: number) => `/specify/attachments/import/${id}`, + // Actually, in retrorespect, this would be a nice feature + metaRoute: f.never, + documentationUrl: + 'https://discourse.specifysoftware.org/t/batch-attachment-uploader/1374', + }, +} as const; + +export const resolveVariantFromDataset = (dataset: Dataset) => + datasetVariants[dataset.isupdate ? 'batchEdit' : 'workbench']; From bcad1576b7157d75f3184c400c5ae50166719ac8 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 17:03:30 -0500 Subject: [PATCH 13/79] Add back tectonic tree permission --- .../js_src/lib/components/Permissions/definitions.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts b/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts index ce9481a9cc0..f2ec817812f 100644 --- a/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts +++ b/specifyweb/frontend/js_src/lib/components/Permissions/definitions.ts @@ -53,6 +53,13 @@ export const operationPolicies = { 'desynonymize', 'repair', ], + '/tree/edit/tectonicunit': [ + 'merge', + 'move', + 'synonymize', + 'desynonymize', + 'repair', + ], '/querybuilder/query': [ 'execute', 'export_csv', From db53d4394824b46e6faf010727e9bbe602869471 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 17:05:34 -0500 Subject: [PATCH 14/79] keep isStrict --- .../frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx b/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx index c9566c14906..8fa63514b2b 100644 --- a/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx +++ b/specifyweb/frontend/js_src/lib/components/QueryBuilder/Wrapped.tsx @@ -198,6 +198,7 @@ function Wrapped({ type: 'any', startValue: '', isNot: false, + isStrict: false, }, ], isDisplay: true, From e0fea3c91407ba336e822d5b08a45fcb23120d49 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Tue, 26 Nov 2024 17:08:08 -0500 Subject: [PATCH 15/79] Fix lost translations --- .../js_src/lib/localization/workbench.ts | 100 +++++++++++++++++- 1 file changed, 96 insertions(+), 4 deletions(-) diff --git a/specifyweb/frontend/js_src/lib/localization/workbench.ts b/specifyweb/frontend/js_src/lib/localization/workbench.ts index 2f1f7d438ff..01d672921c1 100644 --- a/specifyweb/frontend/js_src/lib/localization/workbench.ts +++ b/specifyweb/frontend/js_src/lib/localization/workbench.ts @@ -384,6 +384,11 @@ export const wbText = createDictionary({ }, startUpload: { 'en-us': 'Begin Data Set Upload?', + 'ru-ru': 'Начать загрузку набора данных?', + 'es-es': '¿Comenzar carga de conjunto de datos?', + 'fr-fr': "Commencer le téléchargement de l'ensemble de données ?", + 'uk-ua': 'Почати завантаження набору даних?', + 'de-ch': 'Hochladen des Datensatzes beginnen?', }, startUploadDescription: { 'en-us': @@ -639,6 +644,11 @@ export const wbText = createDictionary({ }, uploadSuccessful: { 'en-us': 'Upload Completed with No Errors', + 'ru-ru': 'Загрузка завершена без ошибок', + 'es-es': 'Carga completada sin errores', + 'fr-fr': 'Téléchargement terminé sans erreur', + 'uk-ua': 'Завантаження завершено без помилок', + 'de-ch': 'Upload ohne Fehler abgeschlossen', }, uploadSuccessfulDescription: { 'en-us': ` @@ -668,14 +678,57 @@ export const wbText = createDictionary({ }, uploadErrors: { 'en-us': 'Upload Failed due to Error Cells', + 'ru-ru': 'Ошибка загрузки из-за ошибок', + 'es-es': 'Carga fallida debido a celdas de error', + 'fr-fr': "Échec du téléchargement en raison de cellules d'erreur", + 'uk-ua': 'Помилка завантаження через клітинки помилок', + 'de-ch': 'Der Upload ist aufgrund fehlerhafter Zellen fehlgeschlagen', }, uploadErrorsDescription: { - 'en-us': 'The Upload failed due to one or more cell value errors.', + 'en-us': 'The upload failed due to one or more cell value errors.', + 'ru-ru': + 'Загрузка не удалась из-за одной или нескольких ошибок значений ячеек.', + 'es-es': 'La carga falló debido a uno o más errores de valor de celda.', + 'fr-fr': ` + Le téléchargement a échoué en raison d'une ou plusieurs erreurs de valeur + de cellule. + `, + 'uk-ua': + 'Помилка завантаження через одну або кілька помилок значення клітинки.', + 'de-ch': ` + Der Upload ist aufgrund eines oder mehrerer Zellenwertfehler + fehlgeschlagen. + `, }, uploadErrorsSecondDescription: { 'en-us': ` Validate the Data Set and review the mouseover hints for each error cell, - then make the appropriate corrections. Save and retry the {type:string}. + then make the appropriate corrections. Save and retry the Upload. + `, + 'ru-ru': ` + Проверте набор данных и наведите указатель мыши на каждую ячейку с + ошибкой, затем сделайте соответствующие исправления, сохраните и повторите + попытку. + `, + 'es-es': ` + Valide el conjunto de datos y revise las sugerencias del mouseover para + cada celda de error, luego haga las correcciones apropiadas. Guarde y + vuelva a intentar la carga. + `, + 'fr-fr': ` + Validez l'ensemble de données et examinez les conseils de passage de la + souris pour chaque cellule d'erreur, puis apportez les corrections + appropriées. Enregistrez et réessayez le téléchargement. + `, + 'uk-ua': ` + Перевірте набір даних і перегляньте підказки для кожної клітинки помилки, + а потім внесіть відповідні виправлення. Збережіть і повторіть спробу + завантаження. + `, + 'de-ch': ` + Validieren Sie den Datensatz und überprüfen Sie die Mouseover-Hinweise für + jede Fehlerzelle. Nehmen Sie dann die entsprechenden Korrekturen vor. + Speichern Sie und versuchen Sie den Upload erneut. `, }, dataSetRollback: { @@ -744,9 +797,19 @@ export const wbText = createDictionary({ }, uploadCanceled: { 'en-us': 'Upload Cancelled', + 'ru-ru': 'Загрузка отменена', + 'es-es': 'Subida cancelada', + 'de-ch': 'Datensatzvalidierung abgebrochen.', + 'fr-fr': 'Téléchargement annulé', + 'uk-ua': 'Завантаження скасовано', }, uploadCanceledDescription: { 'en-us': 'Data Set Upload cancelled.', + 'ru-ru': 'Загрузка набора данных отменена.', + 'es-es': 'Carga de conjunto de datos cancelada.', + 'fr-fr': "Téléchargement de l'ensemble de données annulé.", + 'uk-ua': 'Завантаження набору даних скасовано.', + 'de-ch': 'Der Upload des Datensatzes wurde abgebrochen.', }, coordinateConverter: { 'en-us': 'Geocoordinate Format', @@ -908,6 +971,14 @@ export const wbText = createDictionary({ 'fr-fr': 'Télécharger le forfait', 'uk-ua': 'План завантаження', }, + potentialUploadResults: { + 'en-us': 'Potential Upload Results', + 'ru-ru': 'Возможные результаты загрузки', + 'es-es': 'Resultados potenciales de la carga', + 'fr-fr': 'Résultats potentiels du téléchargement', + 'uk-ua': 'Потенційні результати завантаження', + 'de-ch': 'Mögliche Upload-Ergebnisse', + }, noUploadResultsAvailable: { 'en-us': 'No upload results are available for this cell', 'ru-ru': 'Для этой ячейки нет результатов загрузки', @@ -915,7 +986,28 @@ export const wbText = createDictionary({ 'fr-fr': "Aucun résultat de téléchargement n'est disponible pour cette cellule", 'uk-ua': 'Для цієї клітинки немає результатів завантаження', - 'de-ch': 'Für diese Zelle sind keine Uploasd-Ergebnisse verfügbar', + 'de-ch': 'Für diese Zelle sind keine Upload-Ergebnisse verfügbar', + }, + wbUploadedDescription: { + 'en-us': 'Number of new records created in each table:', + 'ru-ru': 'Количество новых записей, созданных в каждой таблице:', + 'es-es': 'Número de registros nuevos creados en cada tabla:', + 'fr-fr': 'Nombre de nouveaux enregistrements créés dans chaque table :', + 'uk-ua': 'Кількість нових записів, створених у кожній таблиці:', + 'de-ch': 'Anzahl der in jeder Tabelle erstellten neuen Datensätze:', + }, + wbUploadedPotentialDescription: { + 'en-us': 'Number of new records that would be created in each table:', + 'ru-ru': + 'Количество новых записей, которые будут созданы в каждой таблице:', + 'es-es': 'Número de registros nuevos que se crearían en cada tabla:', + 'fr-fr': ` + Nombre de nouveaux enregistrements qui seraient créés dans chaque table : + `, + 'uk-ua': 'Кількість нових записів, які будуть створені в кожній таблиці:', + 'de-ch': ` + Anzahl der neuen Datensätze, die in jeder Tabelle erstellt werden würden: + `, }, navigationOptions: { 'en-us': 'Navigation Options', @@ -1171,7 +1263,7 @@ export const wbText = createDictionary({ 'de-ch': 'Neuer Datensatz {date}', }, dataSets: { - 'en-us': '{variant:string} Data Sets', + 'en-us': 'WorkBench Data Sets', 'ru-ru': 'Наборы данных', 'es-es': 'Conjuntos de datos de WorkBench', 'fr-fr': 'Ensembles de données WorkBench', From 19f743bc6032fac1df146b7d9479af0515f80251 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Wed, 27 Nov 2024 14:32:21 -0500 Subject: [PATCH 16/79] Add batch edit route --- specifyweb/stored_queries/batch_edit.py | 1178 +++++++++++++++++++++++ specifyweb/stored_queries/urls.py | 1 + specifyweb/stored_queries/views.py | 22 +- 3 files changed, 1197 insertions(+), 4 deletions(-) create mode 100644 specifyweb/stored_queries/batch_edit.py diff --git a/specifyweb/stored_queries/batch_edit.py b/specifyweb/stored_queries/batch_edit.py new file mode 100644 index 00000000000..e2d383288e8 --- /dev/null +++ b/specifyweb/stored_queries/batch_edit.py @@ -0,0 +1,1178 @@ +# type: ignore + +# ^^ The above is because we etensively use recursive typedefs of named tuple in this file not supported on our MyPy 0.97 version. +# When typechecked in MyPy 1.11 (supports recursive typedefs), there is no type issue in the file. +# However, using 1.11 makes things slower in other files. + +from functools import reduce +from typing import ( + Any, + Callable, + Dict, + List, + NamedTuple, + Optional, + Tuple, + TypedDict, + Union, + Literal, +) +from specifyweb.permissions.permissions import has_target_permission +from specifyweb.specify.filter_by_col import CONCRETE_HIERARCHY +from specifyweb.specify.models import datamodel +from specifyweb.specify.load_datamodel import Field, Relationship, Table +from specifyweb.specify.datamodel import is_tree_table +from specifyweb.stored_queries.execution import execute +from specifyweb.stored_queries.queryfield import QueryField, fields_from_json +from specifyweb.stored_queries.queryfieldspec import ( + QueryFieldSpec, + QueryNode, + TreeRankQuery, +) +from specifyweb.workbench.models import Spdataset +from specifyweb.workbench.permissions import BatchEditDataSetPT +from specifyweb.workbench.upload.treerecord import TreeRecord +from specifyweb.workbench.upload.upload_plan_schema import parse_column_options +from specifyweb.workbench.upload.upload_table import UploadTable +from specifyweb.workbench.upload.uploadable import NULL_RECORD, Uploadable +from specifyweb.workbench.views import regularize_rows +from specifyweb.specify.func import Func +from . import models +import json + +from specifyweb.workbench.upload.upload_plan_schema import schema +from jsonschema import validate + +from django.db import transaction +from decimal import Decimal + +MaybeField = Callable[[QueryFieldSpec], Optional[Field]] + +# TODO: +# Investigate if any/some/most of the logic for making an upload plan could be moved to frontend and reused. +# - does generation of upload plan in the backend bc upload plan is not known (we don't know count of to-many). +# - seemed complicated to merge upload plan from the frontend +# - need to place id markers at correct level, so need to follow upload plan anyways. + +# TODO: Play-around with localizing +NULL_RECORD_DESCRIPTION = "(Not included in the query results)" + +# TODO: add backend support for making system tables readonly +READONLY_TABLES = [*CONCRETE_HIERARCHY] + +SHARED_READONLY_FIELDS = [ + "timestampcreated", + "timestampmodified", + "version", + "nodenumber", + "highestchildnodenumber", + "rankid", + "fullname", +] + +SHARED_READONLY_RELATIONSHIPS = ["createdbyagent", "modifiedbyagent"] + + +def get_readonly_fields(table: Table): + fields = [*SHARED_READONLY_FIELDS, table.idFieldName.lower()] + relationships = [ + rel.name + for rel in table.relationships + if rel.relatedModelName.lower() in READONLY_TABLES + ] + if table.name.lower() == "determination": + relationships = ["preferredtaxon"] + elif is_tree_table(table): + relationships = ["definitionitem"] + + return fields, [*SHARED_READONLY_RELATIONSHIPS, *relationships] + + +FLOAT_FIELDS = ["java.lang.Float", "java.lang.Double", "java.math.BigDecimal"] + + +def parse(value: Optional[Any], query_field: QueryField) -> Any: + field = query_field.fieldspec.get_field() + if field is None or value is None: + return value + if field.type in FLOAT_FIELDS: + return float(value) + return value + + +def _get_nested_order(field_spec: QueryFieldSpec): + # don't care about ordernumber if it ain't nested + # won't affect logic, just data being saved. + if len(field_spec.join_path) == 0: + return None + return field_spec.table.get_field("ordernumber") + + +batch_edit_fields: Dict[str, Tuple[MaybeField, int]] = { + # technically, if version updates are correct, this is useless beyond base tables + # and to-manys. TODO: Do just that. remove it. sorts asc. using sort, the optimized + # dataset construction takes place. + "id": (lambda field_spec: field_spec.table.idField, 1), + # version control gets added here. no sort. + "version": (lambda field_spec: field_spec.table.get_field("version"), 0), + # ordernumber. no sort (actually adding a sort here is useless) + "order": (_get_nested_order, 1), +} + + +class BatchEditFieldPack(NamedTuple): + field: Optional[QueryField] = None + idx: Optional[int] = None # default value not there, for type safety + value: Any = None # stricten this? + + +class BatchEditPack(NamedTuple): + id: BatchEditFieldPack + order: BatchEditFieldPack + version: BatchEditFieldPack + + # extends a path to contain the last field + for a defined fields + @staticmethod + def from_field_spec(field_spec: QueryFieldSpec) -> "BatchEditPack": + # don't care about which way. bad things will happen if not sorted. + # not using assert () since it can be optimised out. + if batch_edit_fields["id"][1] == 0 or batch_edit_fields["order"][1] == 0: + raise Exception("the ID field should always be sorted!") + + def extend_callback(sort_type): + def _callback(field): + return BatchEditPack._query_field( + field_spec._replace( + join_path=(*field_spec.join_path, field), date_part=None + ), + sort_type, + ) + + return _callback + + new_field_specs = { + key: BatchEditFieldPack( + idx=None, + field=Func.maybe(callback(field_spec), extend_callback(sort_type)), + value=None, + ) + for key, (callback, sort_type) in batch_edit_fields.items() + } + return BatchEditPack(**new_field_specs) + + def merge(self, other: "BatchEditPack") -> "BatchEditPack": + return BatchEditPack( + id=self.id if self.id.field is not None else other.id, + version=self.version if self.version.field is not None else other.version, + order=self.order if self.order.field is not None else other.order, + ) + + # a basic query field spec to field + @staticmethod + def _query_field(field_spec: QueryFieldSpec, sort_type: int): + return QueryField( + fieldspec=field_spec, + op_num=8, + value=None, + negate=False, + display=True, + format_name=None, + sort_type=sort_type, + ) + + def _index( + self, + start_idx: int, + current: Tuple[Dict[str, BatchEditFieldPack], List[QueryField]], + next: Tuple[int, Tuple[str, Tuple[MaybeField, int]]], + ): + current_dict, fields = current + field_idx, (field_name, _) = next + value: BatchEditFieldPack = getattr(self, field_name) + new_dict = { + **current_dict, + field_name: value._replace( + field=None, idx=((field_idx + start_idx) if value.field else None) + ), + } + new_fields = fields if value.field is None else [*fields, value.field] + return new_dict, new_fields + + def index_plan(self, start_index=0) -> Tuple["BatchEditPack", List[QueryField]]: + init: Tuple[Dict[str, BatchEditFieldPack], List[QueryField]] = ( + {}, + [], + ) + _dict, fields = reduce( + lambda accum, next: self._index( + start_idx=start_index, current=accum, next=next + ), + enumerate(batch_edit_fields.items()), + init, + ) + return BatchEditPack(**_dict), fields + + def bind(self, row: Tuple[Any]): + return BatchEditPack( + id=self.id._replace( + value=row[self.id.idx] if self.id.idx is not None else None, + ), + order=self.order._replace( + value=row[self.order.idx] if self.order.idx is not None else None + ), + version=self.version._replace( + value=row[self.version.idx] if self.version.idx is not None else None + ), + ) + + def to_json(self) -> Dict[str, Any]: + return { + "id": self.id.value, + "ordernumber": self.order.value, + "version": self.version.value, + } + + # we not only care that it is part of tree, but also care that there is rank to tree + def is_part_of_tree(self, query_fields: List[QueryField]) -> bool: + if self.id.idx is None: + return False + id_field = self.id.idx + field = query_fields[id_field - 1] + join_path = field.fieldspec.join_path + if len(join_path) < 2: + return False + return isinstance(join_path[-2], TreeRankQuery) + + +# These constants are purely for memory optimization, no code depends and/or cares if this is constant. +EMPTY_FIELD = BatchEditFieldPack() +EMPTY_PACK = BatchEditPack(id=EMPTY_FIELD, order=EMPTY_FIELD, version=EMPTY_FIELD) + + +# FUTURE: this already supports nested-to-many for most part +# wb plan, but contains query fields along with indexes to look-up in a result row. +# TODO: see if it can be moved + combined with front-end logic. I kept all parsing on backend, but there might be possible beneft in doing this +# on the frontend (it already has code from mapping path -> upload plan) +class RowPlanMap(NamedTuple): + batch_edit_pack: BatchEditPack + columns: List[BatchEditFieldPack] = [] + to_one: Dict[str, "RowPlanMap"] = {} + to_many: Dict[str, "RowPlanMap"] = {} + is_naive: bool = True + + @staticmethod + def _merge( + current: Dict[str, "RowPlanMap"], other: Tuple[str, "RowPlanMap"] + ) -> Dict[str, "RowPlanMap"]: + key, other_plan = other + return { + **current, + # merge if other is also found in ours + key: (other_plan if key not in current else current[key].merge(other_plan)), + } + + # takes two row plans, combines them together. Adjusts is_naive. + def merge(self: "RowPlanMap", other: "RowPlanMap") -> "RowPlanMap": + new_columns = [*self.columns, *other.columns] + batch_edit_pack = other.batch_edit_pack.merge(self.batch_edit_pack) + is_self_naive = self.is_naive and other.is_naive + # BUG: Handle this more gracefully for to-ones. + # That is, we'll currently incorrectly disallow making new ones. Fine for now. + to_one = reduce(RowPlanMap._merge, other.to_one.items(), self.to_one) + to_many = reduce(RowPlanMap._merge, other.to_many.items(), self.to_many) + return RowPlanMap( + batch_edit_pack, + new_columns, + to_one, + to_many, + is_naive=is_self_naive, + ) + + @staticmethod + def _index( + current: Tuple[int, Dict[str, "RowPlanMap"], List[QueryField]], + other: Tuple[str, "RowPlanMap"], + ): + next_start_index = current[0] + other_indexed, fields = other[1].index_plan(start_index=next_start_index) + to_return = ( + (next_start_index + len(fields)), + {**current[1], other[0]: other_indexed}, + [*current[2], *fields], + ) + return to_return + + # to make things simpler, returns the QueryFields along with indexed plan, which are expected to be used together + def index_plan(self, start_index=1) -> Tuple["RowPlanMap", List[QueryField]]: + next_index = len(self.columns) + start_index + # For optimization, and sanity, we remove the field from columns, as they are now completely redundant (we always know what they are using the id) + _columns = [ + column._replace(idx=index, field=None) + for index, column in zip(range(start_index, next_index), self.columns) + ] + _batch_indexed, _batch_fields = ( + self.batch_edit_pack.index_plan(start_index=next_index) + if self.batch_edit_pack + else (None, []) + ) + next_index += len(_batch_fields) + init: Callable[[int], Tuple[int, Dict[str, RowPlanMap], List[QueryField]]] = ( + lambda _start: (_start, {}, []) + ) + next_index, _to_one, to_one_fields = reduce( + RowPlanMap._index, + # makes the order deterministic, would be funny otherwise + Func.sort_by_key(self.to_one), + init(next_index), + ) + next_index, _to_many, to_many_fields = reduce( + RowPlanMap._index, Func.sort_by_key(self.to_many), (init(next_index)) + ) + column_fields = [column.field for column in self.columns if column.field] + return ( + RowPlanMap( + columns=_columns, + to_one=_to_one, + to_many=_to_many, + batch_edit_pack=_batch_indexed, + is_naive=self.is_naive, + ), + [*column_fields, *_batch_fields, *to_one_fields, *to_many_fields], + ) + + # helper for generating an row plan for a single query field + # handles formatted/aggregated self or relationships correctly (places them in upload-plan at correct level) + # it's complicated to place aggregated within the to-many table. but, since we don't map it to anything, we equivalently place it + # on the penultimate table's column. that is, say collectingevent -> collectors (aggregated). Semantically, (aggregated) should be on + # on the colletors table (as a column). Instead, we put it as a column in collectingevent. This has no visual difference (it is unmapped) anyways. + @staticmethod + def _recur_row_plan( + running_path: List[QueryNode], # using tuple causes typing issue + next_path: List[QueryNode], + next_table: Table, # bc queryfieldspecs will be terminated early on + original_field: QueryField, + ) -> "RowPlanMap": + + original_field_spec = original_field.fieldspec + + # contains partial path + partial_field_spec = original_field_spec._replace( + join_path=tuple(running_path), table=next_table + ) + + # to handle CO->(formatted), that's it. this function will never be called with empty path other than top-level formatted/aggregated + rest: List[QueryNode] = [] + + if len(next_path) == 0: + node = None + rest = [] + else: + node = next_path[0] + rest = next_path[1:] + + # Meh, simplifies other stuff going on in other places + # that is, we'll include the pack of CO if query is like CO -> (formatted) or CO -> CE (formatted). + # No, this doesn't mean IDs of the formatted/aggregated are including (that is impossible) + batch_edit_pack = BatchEditPack.from_field_spec(partial_field_spec) + + if len(rest) == 0: + # we are at the end + return RowPlanMap( + columns=[BatchEditFieldPack(field=original_field)], + batch_edit_pack=batch_edit_pack, + is_naive=(original_field.op_num == 8), + ) + + assert isinstance(node, TreeRankQuery) or isinstance( + node, Relationship + ), "using a non-relationship as a pass through!" + + rel_type = ( + "to_many" + if node.type.endswith("to-many") or node.type == "zero-to-one" + else "to_one" + ) + + rel_name = ( + node.name.lower() if not isinstance(node, TreeRankQuery) else node.name + ) + + remaining_map = RowPlanMap._recur_row_plan( + [*running_path, node], + rest, + datamodel.get_table_strict(node.relatedModelName), + original_field, + ) + + boiler = RowPlanMap(columns=[], batch_edit_pack=batch_edit_pack) + + def _augment_is_naive(rel_type: Union[Literal["to_one"], Literal["to_many"]]): + + rest_plan = {rel_name: remaining_map} + if rel_type == "to_one": + # Propagate is_naive up + return boiler._replace( + is_naive=remaining_map.is_naive, to_one=rest_plan + ) + + # bc the user eperience guys want to be able to make new dets/preps one hop away + # but, we can't allow it for ordernumber when filtering. pretty annoying. + # and definitely not naive for any tree, well, technically it is possible, but for user's sake. + is_naive = not is_tree_table(next_table) and ( + ( + len(running_path) == 0 + and (remaining_map.batch_edit_pack.order.field is None) + ) + or remaining_map.is_naive + ) + return boiler._replace( + to_many={ + # to force-naiveness + rel_name: remaining_map._replace(is_naive=is_naive) + } + ) + + return _augment_is_naive(rel_type) + + # generates multiple row plan maps, and merges them into one + # this doesn't index the row plan, bc that is complicated. + # instead, see usage of index_plan() which indexes the plan in one go. + @staticmethod + def get_row_plan(fields: List[QueryField]) -> "RowPlanMap": + start: List[QueryNode] = [] + iter = [ + RowPlanMap._recur_row_plan( + start, + list(field.fieldspec.join_path), + field.fieldspec.root_table, + field, + ) + for field in fields + ] + + plan = reduce( + lambda current, other: current.merge(other), + iter, + RowPlanMap(batch_edit_pack=EMPTY_PACK), + ) + return plan + + # def skim_plan(self: "RowPlanMap", parent_is_naive=True) -> "RowPlanMap": + # is_current_naive = parent_is_naive and self.is_naive + # to_one = { + # key: value.skim_plan(is_current_naive) + # for (key, value) in self.to_one.items() + # } + # to_many = { + # key: value.skim_plan(is_current_naive) + # for (key, value) in self.to_many.items() + # } + # return self._replace(to_one=to_one, to_many=to_many, is_naive=is_current_naive) + + @staticmethod + def _bind_null(value: "RowPlanCanonical") -> List["RowPlanCanonical"]: + if value.batch_edit_pack.id.value is None: + return [] + return [value] + + def bind( + self, row: Tuple[Any], query_fields: List[QueryField] + ) -> "RowPlanCanonical": + columns = [ + column._replace( + # accounting for id below + value=parse(row[column.idx], query_fields[column.idx - 1]), + field=None, + ) + for column in self.columns + # Careful: this can be 0, so not doing "if not column.idx" + if column.idx is not None + ] + to_ones = { + key: value.bind(row, query_fields) for (key, value) in self.to_one.items() + } + to_many = { + key: RowPlanMap._bind_null(value.bind(row, query_fields)) + for (key, value) in self.to_many.items() + } + pack = self.batch_edit_pack.bind(row) + return RowPlanCanonical(pack, columns, to_ones, to_many) + + # gets a null record to fill-out empty space + # doesn't support nested-to-many's yet - complicated + def nullify(self, parent_is_phantom=False) -> "RowPlanCanonical": + # since is_naive is set, + is_phantom = parent_is_phantom or not self.is_naive + columns = [ + pack._replace(value=NULL_RECORD_DESCRIPTION if is_phantom else None) + for pack in self.columns + ] + to_ones = { + key: value.nullify(is_phantom) for (key, value) in self.to_one.items() + } + batch_edit_pack = self.batch_edit_pack._replace( + id=self.batch_edit_pack.id._replace( + value=(NULL_RECORD if is_phantom else None) + ) + ) + return RowPlanCanonical(batch_edit_pack, columns, to_ones) + + # a fake upload plan that keeps track of the maximum ids / order numbrs seen in to-manys + def to_many_planner(self) -> "RowPlanMap": + to_one = {key: value.to_many_planner() for (key, value) in self.to_one.items()} + to_many = { + key: RowPlanMap( + batch_edit_pack=( + BatchEditPack( + order=BatchEditFieldPack(value=0), + id=EMPTY_FIELD, + version=EMPTY_FIELD, + ) + if value.batch_edit_pack.order.idx is not None + # only use id if order field is not present + else BatchEditPack( + id=BatchEditFieldPack(value=0), + order=EMPTY_FIELD, + version=EMPTY_FIELD, + ) + ) + ) + for (key, value) in self.to_many.items() + } + return RowPlanMap( + batch_edit_pack=EMPTY_PACK, + columns=[], + to_one=to_one, + to_many=to_many, + ) + + +# the main data-structure which stores the data +# RowPlanMap is just a map, this stores actual data (to many is a dict of list, rather than just a dict) +# maybe unify that with RowPlanMap? + + +class RowPlanCanonical(NamedTuple): + batch_edit_pack: BatchEditPack + columns: List[BatchEditFieldPack] = [] + to_one: Dict[str, "RowPlanCanonical"] = {} + to_many: Dict[str, List["RowPlanCanonical"]] = {} + + @staticmethod + def _maybe_extend( + values: List["RowPlanCanonical"], + result: Tuple[bool, "RowPlanCanonical"], + ): + is_new = result[0] + new_values = (is_new, [*values, result[1]] if is_new else values) + return new_values + + # FUTURE: already handles nested to-many. + def merge( + self, row: Tuple[Any], indexed_plan: RowPlanMap, query_fields: List[QueryField] + ) -> Tuple[bool, "RowPlanCanonical"]: + # nothing to compare against. useful for recursion + handing default null as default value for reduce + if self.batch_edit_pack.id.value is None: + return False, indexed_plan.bind(row, query_fields) + + # trying to defer actual bind to later + batch_fields = indexed_plan.batch_edit_pack.bind(row) + if batch_fields.id.value != self.batch_edit_pack.id.value: + # if the id itself is different, we are on a different record. just bind and return + return True, indexed_plan.bind(row, query_fields) + + # now, ids are the same. no reason to bind other's to one. + # however, still need to handle to-manys inside to-ones (this will happen when a row gets duplicated due to to-many) + def _reduce_to_one( + accum: Tuple[bool, Dict[str, "RowPlanCanonical"]], + current: Tuple[str, RowPlanCanonical], + ): + key, value = current + is_stalled, previous_chain = accum + new_stalled, result = ( + (True, value) + if is_stalled + else value.merge(row, indexed_plan.to_one[key], query_fields) + ) + return (is_stalled or new_stalled, {**previous_chain, key: result}) + + init: Tuple[bool, Dict[str, RowPlanCanonical]] = (False, {}) + to_one_stalled, to_one = reduce( + _reduce_to_one, Func.sort_by_key(self.to_one), init + ) + + # the most tricky lines in this file + def _reduce_to_many( + accum: Tuple[int, List[Tuple[str, bool, List["RowPlanCanonical"]]]], + current: Tuple[str, List[RowPlanCanonical]], + ): + key, values = current + previous_length, previous_chain = accum + is_stalled = previous_length > 1 + if len(values) == 0: + new_values = [] + new_stalled = False + else: + new_stalled, new_values = ( + (True, values) + if is_stalled + else RowPlanCanonical._maybe_extend( + values, + values[-1].merge(row, indexed_plan.to_many[key], query_fields), + ) + ) + return ( + max(len(new_values), previous_length), + [*previous_chain, (key, is_stalled or new_stalled, new_values)], + ) + + if to_one_stalled: + to_many = self.to_many + to_many_stalled = True + else: + # We got stalled early on. + init_to_many: Tuple[ + int, List[Tuple[str, bool, List["RowPlanCanonical"]]] + ] = (0, []) + most_length, to_many_result = reduce( + _reduce_to_many, Func.sort_by_key(self.to_many), init_to_many + ) + + to_many_stalled = ( + any(results[1] for results in to_many_result) or most_length > 1 + ) + to_many = {key: values for (key, _, values) in to_many_result} + + # TODO: explain why those arguments + stalled = to_one_stalled or to_many_stalled + return stalled, RowPlanCanonical( + self.batch_edit_pack, + self.columns, + to_one, + to_many, + ) + + @staticmethod + def _update_id_order(values: List["RowPlanCanonical"], plan: RowPlanMap): + is_id = plan.batch_edit_pack.order.value is None + new_value = ( + len(values) + if is_id + else ( + 0 + if len(values) == 0 + else max([value.batch_edit_pack.order.value for value in values]) + ) + ) + current_value = ( + plan.batch_edit_pack.order.value + if not is_id + else plan.batch_edit_pack.id.value + ) + new_pack = BatchEditFieldPack(field=None, value=max(new_value, current_value)) + return RowPlanMap( + batch_edit_pack=( + plan.batch_edit_pack._replace(id=new_pack) + if is_id + else plan.batch_edit_pack._replace(order=new_pack) + ) + ) + + # as we iterate through rows, need to update the to-many stats (number of ids or maximum order we saw) + # this is done to expand the rows at the end + def update_to_manys(self, to_many_planner: RowPlanMap) -> RowPlanMap: + to_one = { + key: value.update_to_manys(to_many_planner.to_one[key]) + for (key, value) in self.to_one.items() + } + to_many = { + key: RowPlanCanonical._update_id_order(values, to_many_planner.to_many[key]) + for key, values in self.to_many.items() + } + return RowPlanMap(batch_edit_pack=EMPTY_PACK, to_one=to_one, to_many=to_many) + + @staticmethod + def _extend_id_order( + values: List["RowPlanCanonical"], + to_many_planner: RowPlanMap, + indexed_plan: RowPlanMap, + ) -> List["RowPlanCanonical"]: + is_id = to_many_planner.batch_edit_pack.order.value is None + fill_out = None + # minor memoization, hehe + null_record = indexed_plan.nullify() + if not is_id: # if order is present, things are more complex + max_order = ( + 0 + if len(values) == 0 + else max([value.batch_edit_pack.order.value for value in values]) + ) + # this might be useless + assert len(values) == 0 or ( + len(set([value.batch_edit_pack.order.value for value in values])) + == len(values) + ) + # fill-in before, out happens later anyways + fill_in_range = range( + min(max_order, to_many_planner.batch_edit_pack.order.value) + 1 + ) + # TODO: this is generic and doesn't assume items aren't sorted by order. maybe we can optimize, knowing that. + filled_in = [ + next( + filter( + lambda pack: pack.batch_edit_pack.order.value == fill_in, values + ), + null_record, + ) + for fill_in in fill_in_range + ] + values = filled_in + fill_out = to_many_planner.batch_edit_pack.order.value - max_order + + if fill_out is None: + fill_out = to_many_planner.batch_edit_pack.id.value - len(values) + + assert fill_out >= 0, "filling out in opposite directon!" + rest = range(fill_out) + values = [*values, *(null_record for _ in rest)] + _ids = [ + value.batch_edit_pack.id.value + for value in values + if isinstance(value.batch_edit_pack.id.value, int) + ] + if len(_ids) != len(set(_ids)): + raise Exception("Inserted duplicate ids") + return values + + def extend( + self, to_many_planner: RowPlanMap, plan: RowPlanMap + ) -> "RowPlanCanonical": + to_ones = { + key: value.extend(to_many_planner.to_one[key], plan.to_one[key]) + for (key, value) in self.to_one.items() + } + to_many = { + key: RowPlanCanonical._extend_id_order( + values, to_many_planner.to_many[key], plan.to_many[key] + ) + for (key, values) in self.to_many.items() + } + return self._replace(to_one=to_ones, to_many=to_many) + + @staticmethod + def _make_to_one_flat( + callback: Callable[[str, Func.I], Tuple[List[Any], Dict[str, Func.O]]] + ): + def _flat( + accum: Tuple[List[Any], Dict[str, Func.O]], current: Tuple[str, Func.I] + ): + to_one_fields, to_one_pack = callback(*current) + return [*accum[0], *to_one_fields], {**accum[1], current[0]: to_one_pack} + + return _flat + + @staticmethod + def _make_to_many_flat( + callback: Callable[[str, Func.I], Tuple[List[Any], Dict[str, Func.O]]] + ): + def _flat( + accum: Tuple[List[Any], Dict[str, Func.O]], + current: Tuple[str, List[Func.I]], + ): + rel_name, to_many = current + to_many_flattened = [callback(rel_name, canonical) for canonical in to_many] + row_data = [cell for row in to_many_flattened for cell in row[0]] + to_many_pack = [cell[1] for cell in to_many_flattened] + return [*accum[0], *row_data], {**accum[1], rel_name: to_many_pack} + + return _flat + + def flatten(self) -> Tuple[List[Any], Optional[Dict[str, Any]]]: + cols = [col.value for col in self.columns] + base_pack = ( + self.batch_edit_pack.to_json() + if self.batch_edit_pack.id.value is not None + else None + ) + + def _flatten(_: str, _self: "RowPlanCanonical"): + return _self.flatten() + + _to_one_reducer = RowPlanCanonical._make_to_one_flat(_flatten) + _to_many_reducer = RowPlanCanonical._make_to_many_flat(_flatten) + + to_one_init: Tuple[List[Any], Dict[str, Any]] = ([], {}) + to_many_init: Tuple[List[Any], Dict[str, List[Any]]] = ([], {}) + + to_ones = reduce(_to_one_reducer, Func.sort_by_key(self.to_one), to_one_init) + to_many = reduce(_to_many_reducer, Func.sort_by_key(self.to_many), to_many_init) + all_data = [*cols, *to_ones[0], *to_many[0]] + + # Removing all the unnecceary keys to save up on the size of the dataset + return all_data, ( + Func.remove_keys( + { + "self": base_pack, + "to_one": Func.remove_keys(to_ones[1], Func.is_not_empty), + "to_many": Func.remove_keys( + to_many[1], + lambda key, records: any( + Func.is_not_empty(key, record) for record in records + ), + ), + }, + Func.is_not_empty, + ) + if base_pack + else None + ) + + def to_upload_plan( + self, + base_table: Table, + localization_dump: Dict[str, str], + query_fields: List[QueryField], + fields_added: Dict[str, int], + get_column_id: Callable[[str], int], + omit_relationships: bool, + ) -> Tuple[List[Tuple[Tuple[int, int], str]], Uploadable]: + # Yuk, finally. + + # Whether we are something like [det-> (T -- what we are) -> tree]. + # Set break points in handle_tree_field in query_construct.py to figure out what this means. + intermediary_to_tree = any( + canonical.batch_edit_pack is not None + and canonical.batch_edit_pack.is_part_of_tree(query_fields) + for canonical in self.to_one.values() + ) + + def _lookup_in_fields(_id: Optional[int], readonly_fields: List[str]): + assert _id is not None, "invalid lookup used!" + field = query_fields[ + _id - 1 + ] # Need to go off by 1, bc we added 1 to account for id fields + string_id = field.fieldspec.to_stringid() + localized_label = localization_dump.get( + string_id, naive_field_format(field.fieldspec) + ) + fields_added[localized_label] = fields_added.get(localized_label, 0) + 1 + _count = fields_added[localized_label] + if _count > 1: + localized_label += f" #{_count}" + fieldspec = field.fieldspec + + is_null = ( + fieldspec.needs_formatted() + or intermediary_to_tree + or (fieldspec.is_temporal() and fieldspec.date_part != "Full Date") + or fieldspec.get_field().name.lower() in readonly_fields + or fieldspec.table.name.lower() in READONLY_TABLES + ) + id_in_original_fields = get_column_id(string_id) + return ( + (id_in_original_fields, _count), + (None if is_null else fieldspec.get_field().name.lower()), + localized_label, + ) + + readonly_fields, readonly_rels = get_readonly_fields(base_table) + key_and_fields_and_headers = [ + _lookup_in_fields(column.idx, readonly_fields) for column in self.columns + ] + + wb_cols = { + key: parse_column_options(value) + for _, key, value in key_and_fields_and_headers + if key is not None # will happen for not-editable fields. + } + + def _to_upload_plan(rel_name: str, _self: "RowPlanCanonical"): + related_model = ( + base_table + if intermediary_to_tree + else datamodel.get_table_strict( + base_table.get_relationship(rel_name).relatedModelName + ) + ) + return _self.to_upload_plan( + related_model, + localization_dump, + query_fields, + fields_added, + get_column_id, + omit_relationships, + ) + + _to_one_reducer = RowPlanCanonical._make_to_one_flat(_to_upload_plan) + _to_many_reducer = RowPlanCanonical._make_to_many_flat(_to_upload_plan) + + # will don't modify the list directly, so we can use it for both to-one and to-many + headers_init: List[Tuple[Tuple[int, int], str]] = [] + _to_one_table: Dict[str, Uploadable] = {} + + to_one_headers, to_one_upload_tables = reduce( + _to_one_reducer, + Func.sort_by_key(self.to_one), + (headers_init, _to_one_table), + ) + + _to_many_table: Dict[str, List[Uploadable]] = {} + to_many_headers, to_many_upload_tables = reduce( + _to_many_reducer, + Func.sort_by_key(self.to_many), + (headers_init, _to_many_table), + ) + + raw_headers = [ + (key, header) for (key, __, header) in key_and_fields_and_headers + ] + all_headers = [*raw_headers, *to_one_headers, *to_many_headers] + + def _relationship_is_editable(name, value): + return ( + Func.is_not_empty(name, value) + and name not in readonly_rels + and not omit_relationships + ) + + if intermediary_to_tree: + assert len(to_many_upload_tables) == 0, "Found to-many for tree!" + upload_plan: Uploadable = TreeRecord( + name=base_table.django_name, + ranks={ + key: upload_table.wbcols # type: ignore + for (key, upload_table) in to_one_upload_tables.items() + }, + ) + else: + upload_plan = UploadTable( + name=base_table.django_name, + overrideScope=None, + wbcols=wb_cols, + static={}, + # FEAT: Remove this restriction to allow adding brand new data anywhere + # that's about the best we can do, to make relationships readonly. we can't really omit them during headers finding, because they are "still" there + toOne=Func.remove_keys(to_one_upload_tables, _relationship_is_editable), + toMany=Func.remove_keys( + to_many_upload_tables, _relationship_is_editable + ), + ) + + return all_headers, upload_plan + + +# TODO: This really only belongs on the front-end. +# Using this as a last resort to show fields, for unit tests +def naive_field_format(fieldspec: QueryFieldSpec): + field = fieldspec.get_field() + if field is None: + return f"{fieldspec.table.name} (formatted)" + if field.is_relationship: + return f"{fieldspec.table.name} ({'formatted' if field.type.endswith('to-one') else 'aggregatd'})" + return f"{fieldspec.table.name} {field.name}" + + +def run_batch_edit(collection, user, spquery, agent): + props = BatchEditProps( + collection=collection, + user=user, + contexttableid=int(spquery["contexttableid"]), + captions=spquery.get("captions", None), + limit=spquery.get("limit", 0), + recordsetid=spquery.get("recordsetid", None), + fields=fields_from_json(spquery["fields"]), + session_maker=models.session_context, + ) + (headers, rows, packs, json_upload_plan, visual_order) = run_batch_edit_query(props) + mapped_raws = [ + [*row, json.dumps({"batch_edit": pack})] for (row, pack) in zip(rows, packs) + ] + # Skipping empty because we can have a funny case where all the query fields don't contain any data + regularized_rows = regularize_rows(len(headers), mapped_raws, skip_empty=False) + return make_dataset( + user=user, + collection=collection, + name=spquery["name"], + headers=headers, + regularized_rows=regularized_rows, + agent=agent, + json_upload_plan=json_upload_plan, + visual_order=visual_order, + ) + + +# @transaction.atomic <--- we DONT do this because the query logic could take up possibly multiple minutes +class BatchEditProps(TypedDict): + collection: Any + user: Any + contexttableid: int + captions: Any + limit: Optional[int] + recordsetid: Optional[int] + session_maker: Any + fields: List[QueryField] + + +def run_batch_edit_query(props: BatchEditProps): + + offset = 0 + tableid = int(props["contexttableid"]) + captions = props["captions"] + limit = props["limit"] + + recordsetid = props["recordsetid"] + fields = props["fields"] + + visible_fields = [field for field in fields if field.display] + + assert captions is None or ( + len(visible_fields) == len(captions) + ), "Got misaligned captions!" + + localization_dump: Dict[str, str] = ( + { + # we cannot use numbers since they can very off + field.fieldspec.to_stringid(): caption + for field, caption in zip(visible_fields, captions) + } + if captions is not None + else {} + ) + + row_plan = RowPlanMap.get_row_plan(visible_fields) + + indexed, query_fields = row_plan.index_plan() + # we don't really care about these fields, since we'have already done the numbering (and it won't break with + # more fields). We also don't caree about their sort, since their sort is guaranteed to be after ours + query_with_hidden = [ + *query_fields, + *[field for field in fields if not field.display], + ] + + with props["session_maker"]() as session: + rows = execute( + session, + props["collection"], + props["user"], + tableid, + True, + False, + query_with_hidden, + limit, + offset, + True, + recordsetid, + False, + ) + + to_many_planner = indexed.to_many_planner() + + visited_rows: List[RowPlanCanonical] = [] + previous_id = None + previous_row = RowPlanCanonical(EMPTY_PACK) + for row in rows["results"]: + _, new_row = previous_row.merge(row, indexed, query_with_hidden) + to_many_planner = new_row.update_to_manys(to_many_planner) + if previous_id != new_row.batch_edit_pack.id.value: + visited_rows.append(previous_row) + previous_id = new_row.batch_edit_pack.id.value + previous_row = new_row + + # The very last row will not have anybody to commit by, so we need to add it. + # At this point though, we _know_ we need to commit it + visited_rows.append(previous_row) + + visited_rows = visited_rows[1:] + assert len(visited_rows) > 0, "nothing to return!" + + raw_rows: List[Tuple[List[Any], Optional[Dict[str, Any]]]] = [] + for visited_row in visited_rows: + extend_row = visited_row.extend(to_many_planner, indexed) + row_data, row_batch_edit_pack = extend_row.flatten() + raw_rows.append((row_data, row_batch_edit_pack)) + + assert ( + len(set([len(raw_row[0]) for raw_row in raw_rows])) == 1 + ), "Made irregular rows somewhere!" + + def _get_orig_column(string_id: str): + return next( + filter( + lambda field: field[1].fieldspec.to_stringid() == string_id, + enumerate(visible_fields), + ) + )[0] + + # Consider optimizing when relationships are not-editable? May not benefit actually + # This permission just gets enforced here + omit_relationships = not has_target_permission( + props["collection"].id, + props["user"].id, + [BatchEditDataSetPT.edit_multiple_tables], + ) + + # The keys are lookups into original query field (not modified by us). Used to get ids in the original one. + key_and_headers, upload_plan = extend_row.to_upload_plan( + datamodel.get_table_by_id_strict(tableid, strict=True), + localization_dump, + query_fields, + {}, + _get_orig_column, + omit_relationships, + ) + + headers_enumerated = enumerate(key_and_headers) + + # We would have arbitarily sorted the columns, so our columns will not be correct. + # Rather than sifting the data, we just add a default visual order. + visual_order = Func.first(sorted(headers_enumerated, key=lambda tup: tup[1][0])) + + headers = Func.second(key_and_headers) + + json_upload_plan = upload_plan.unparse() + validate(json_upload_plan, schema) + + return ( + headers, + Func.first(raw_rows), + Func.second(raw_rows), + json_upload_plan, + visual_order, + ) + + +def make_dataset( + user, + collection, + name, + headers, + regularized_rows, + agent, + json_upload_plan, + visual_order, +): + # We are _finally_ ready to make a new dataset + + with transaction.atomic(): + ds = Spdataset.objects.create( + specifyuser=user, + collection=collection, + name=name, + columns=headers, + data=regularized_rows, + importedfilename=name, + createdbyagent=agent, + modifiedbyagent=agent, + uploadplan=json.dumps(json_upload_plan), + visualorder=visual_order, + isupdate=True, + ) + + ds_id, ds_name = (ds.id, ds.name) + ds.id = None + ds.name = f"Backs - {ds.name}" + ds.parent_id = ds_id + # Create the backer. + ds.save() + + return (ds_id, ds_name) diff --git a/specifyweb/stored_queries/urls.py b/specifyweb/stored_queries/urls.py index d448991deb6..a06135814ec 100644 --- a/specifyweb/stored_queries/urls.py +++ b/specifyweb/stored_queries/urls.py @@ -9,4 +9,5 @@ url(r'^exportkml/$', views.export_kml), url(r'^make_recordset/$', views.make_recordset), url(r'^return_loan_preps/$', views.return_loan_preps), + url(r'^batch_edit/$', views.batch_edit) ] diff --git a/specifyweb/stored_queries/views.py b/specifyweb/stored_queries/views.py index 30499a48d36..f36b7c3ac9a 100644 --- a/specifyweb/stored_queries/views.py +++ b/specifyweb/stored_queries/views.py @@ -10,6 +10,7 @@ from django.views.decorators.http import require_POST from specifyweb.middleware.general import require_GET +from specifyweb.stored_queries.batch_edit import run_batch_edit from . import models from .execution import execute, run_ephemeral_query, do_export, recordset, \ return_loan_preps as rlp @@ -96,22 +97,35 @@ def query(request, id): @never_cache def ephemeral(request): """Executes and returns the results of the query provided as JSON in the POST body.""" + + spquery, collection = get_query(request) + data = run_ephemeral_query(collection, request.specify_user, spquery) + + return HttpResponse(toJson(data), content_type='application/json') + +def get_query(request): try: spquery = json.load(request) except ValueError as e: return HttpResponseBadRequest(e) - if 'collectionid' in spquery: collection = Collection.objects.get(pk=spquery['collectionid']) logger.debug('forcing collection to %s', collection.collectionname) else: collection = request.specify_collection - + check_permission_targets(collection.id, request.specify_user.id, [QueryBuilderPt.execute]) - data = run_ephemeral_query(collection, request.specify_user, spquery) - return HttpResponse(toJson(data), content_type='application/json') + return spquery, collection +@require_POST +@login_maybe_required +@never_cache +def batch_edit(request): + """Executes and returns the results of the query provided as JSON in the POST body.""" + spquery, collection = get_query(request) + ds_id, ds_name = run_batch_edit(collection, request.specify_user, spquery, request.specify_user_agent) + return HttpResponse(toJson({"id": ds_id, "name": ds_name}), status=201, content_type='application/json') @require_POST @login_maybe_required From c8c7f0e970b73b39b23a4b583d767f37c267c8d8 Mon Sep 17 00:00:00 2001 From: Sharad S Date: Wed, 27 Nov 2024 15:02:32 -0500 Subject: [PATCH 17/79] Use variant localization in workbench --- .../lib/components/Toolbar/WbsDialog.tsx | 89 ++++++++++--------- .../components/WbActions/WbNoUploadPlan.tsx | 1 + .../lib/components/WbActions/WbRollback.tsx | 15 ++-- .../lib/components/WbActions/WbUpload.tsx | 11 ++- .../js_src/lib/components/WbActions/index.tsx | 43 +++++---- .../js_src/lib/components/WbImport/helpers.ts | 5 +- 6 files changed, 98 insertions(+), 66 deletions(-) diff --git a/specifyweb/frontend/js_src/lib/components/Toolbar/WbsDialog.tsx b/specifyweb/frontend/js_src/lib/components/Toolbar/WbsDialog.tsx index e6de4ab12d9..f87bb4083f4 100644 --- a/specifyweb/frontend/js_src/lib/components/Toolbar/WbsDialog.tsx +++ b/specifyweb/frontend/js_src/lib/components/Toolbar/WbsDialog.tsx @@ -10,7 +10,7 @@ import type { LocalizedString } from 'typesafe-i18n'; import { useAsyncState } from '../../hooks/useAsyncState'; import { commonText } from '../../localization/common'; -import { wbPlanText } from '../../localization/wbPlan'; +import { headerText } from '../../localization/header'; import { wbText } from '../../localization/workbench'; import { ajax } from '../../utils/ajax'; import type { RA } from '../../utils/types'; @@ -27,15 +27,16 @@ import { Dialog, dialogClassNames } from '../Molecules/Dialog'; import type { SortConfig } from '../Molecules/Sorting'; import { SortIndicator, useSortConfig } from '../Molecules/Sorting'; import { TableIcon } from '../Molecules/TableIcon'; -import { hasPermission } from '../Permissions/helpers'; +import { formatUrl } from '../Router/queryString'; import { OverlayContext } from '../Router/Router'; import { uniquifyDataSetName } from '../WbImport/helpers'; import type { Dataset, DatasetBriefPlan } from '../WbPlanView/Wrapped'; +import { datasetVariants } from '../WbUtils/datasetVariants'; import { WbDataSetMeta } from '../WorkBench/DataSetMeta'; const createWorkbenchDataSet = async () => createEmptyDataSet( - '/api/workbench/dataset/', + 'workbench', wbText.newDataSetName({ date: new Date().toDateString() }), { importedfilename: '', @@ -46,14 +47,14 @@ const createWorkbenchDataSet = async () => export const createEmptyDataSet = async < DATASET extends AttachmentDataSet | Dataset >( - datasetUrl: string, + datasetVariant: keyof typeof datasetVariants, name: LocalizedString, props?: Partial ): Promise => - ajax(datasetUrl, { + ajax(datasetVariants[datasetVariant].fetchUrl, { method: 'POST', body: { - name: await uniquifyDataSetName(name, undefined, datasetUrl), + name: await uniquifyDataSetName(name, undefined, datasetVariant), rows: [], ...props, }, @@ -129,31 +130,43 @@ function TableHeader({ ); } -/** Render a dialog for choosing a data set */ -export function DataSetsDialog({ +type WB_VARIANT = keyof Omit; + +export type WbVariantLocalization = + typeof datasetVariants.workbench.localization.viewer; + +export function GenericDataSetsDialog({ onClose: handleClose, - showTemplates, onDataSetSelect: handleDataSetSelect, + wbVariant, }: { - readonly showTemplates: boolean; + readonly wbVariant: WB_VARIANT; readonly onClose: () => void; readonly onDataSetSelect?: (id: number) => void; }): JSX.Element | null { + const { + fetchUrl, + sortConfig: sortConfigSpec, + canEdit, + localization, + route, + metaRoute, + canImport, + documentationUrl, + } = datasetVariants[wbVariant]; const [unsortedDatasets] = useAsyncState( React.useCallback( async () => - ajax>( - `/api/workbench/dataset/${showTemplates ? '?with_plan' : ''}`, - { headers: { Accept: 'application/json' } } - ).then(({ data }) => data), - [showTemplates] + ajax>(formatUrl(fetchUrl, {}), { + headers: { Accept: 'application/json' }, + }).then(({ data }) => data), + [wbVariant] ), true ); - const [sortConfig, handleSort, applySortConfig] = useSortConfig( - 'listOfDataSets', - 'dateCreated', + sortConfigSpec.key, + sortConfigSpec.field, false ); @@ -169,16 +182,15 @@ export function DataSetsDialog({ ) : undefined; - const canImport = - hasPermission('/workbench/dataset', 'create') && !showTemplates; const navigate = useNavigate(); const loading = React.useContext(LoadingContext); + return Array.isArray(datasets) ? ( {commonText.cancel()} - {canImport && ( + {canImport() && ( <> {wbText.importFile()} @@ -202,25 +214,17 @@ export function DataSetsDialog({ container: dialogClassNames.wideContainer, }} dimensionsKey="DataSetsDialog" - header={ - showTemplates - ? wbPlanText.copyPlan() - : commonText.countLine({ - resource: wbText.dataSets(), - count: datasets.length, - }) - } + header={localization.datasetsDialog.header(datasets.length)} icon={icons.table} onClose={handleClose} > {datasets.length === 0 ? ( -

- {showTemplates - ? wbPlanText.noPlansToCopyFrom() - : `${wbText.wbsDialogEmpty()} ${ - canImport ? wbText.createDataSetInstructions() : '' - }`} -

+
+

{localization.datasetsDialog.empty()}

+ + {headerText.documentation()} + +
) : (