Commit 046267b8 authored by David Sikter's avatar David Sikter Committed by Markus Scheidgen
Browse files

Resolve "Possible to add and remove list items at the same time in the edit api"

parent fd7e312d
......@@ -1082,10 +1082,14 @@ class MetadataEditListAction(BaseModel):
'''
Defines an action to perform on a list quantity. This enables users to add and remove values.
'''
op: str = Field(description=strip('''
Defines the type of operation (either `set`, `add` or `remove`)'''))
values: Union[str, List[str]] = Field(description=strip('''
The value or values to set/add/remove (string or list of strings)'''))
set: Optional[Union[str, List[str]]] = Field(description=strip('''
Value(s) to set. Note, a set-operation overwrites the old list with the provided list.
If a set-operation is specified, it is therefore not possible to also specify an
add- or remove-operation.'''))
add: Optional[Union[str, List[str]]] = Field(description=strip('''
Value(s) to add to the list'''))
remove: Optional[Union[str, List[str]]] = Field(description=strip('''
Value(s) to remove from the list'''))
# Generate model for MetadataEditActions
......
......@@ -415,29 +415,42 @@ class MetadataEditRequestHandler:
else:
# We have a non-scalar quantity
if type(raw_value) == dict:
# The raw value is a dict - expected to contain op and values
assert raw_value.keys() == {'op', 'values'}, 'Expected keys `op` and `values`'
op = raw_value['op']
values = raw_value['values']
assert op in ('set', 'add', 'remove'), 'op should be `set`, `add` or `remove`'
if quantity_name == 'datasets' and op == 'set':
# The raw value is a dict - expected to contain keys add/remove/set
assert raw_value, 'No operation specified'
for key in raw_value:
assert key in ('set', 'add', 'remove'), (
f'Allowed operations are `set`, `add`, and `remove`, got {key}')
assert 'set' not in raw_value or ('add' not in raw_value and 'remove' not in raw_value), (
'Cannot specify both `set` and `add`/`remove` operations')
if quantity_name == 'datasets' and 'set' in raw_value:
self._error(
'Only `add` and `remove` operations permitted for datasets', loc)
return False, None
raw_ops = raw_value
else:
op = 'set'
values = raw_value
# The raw value is not a dict, but a value or list of values
# -> interpret as a set-operation (or, for datasets: add-operation)
if quantity_name == 'datasets':
op = 'add' # Just specifying a list will be interpreted as add, rather than fail.
values = values if type(values) == list else [values]
verified_values = [self._verified_value_single(definition, v) for v in values]
return True, dict(op=op, values=verified_values)
raw_ops = {'add': raw_value}
else:
raw_ops = {'set': raw_value}
verified_ops = {}
for op, values in raw_ops.items():
values = values if type(values) == list else [values]
verified_values = [self._verified_value_single(definition, v, op) for v in values]
verified_ops[op] = verified_values
assert set(verified_ops.get('add', [])).isdisjoint(verified_ops.get('remove', [])), (
'The same value cannot be specified for both `add` and `remove`')
return True, verified_ops
except Exception as e:
self._error(str(e), loc)
return False, None
def _verified_value_single(
self, definition: metainfo.Definition, value: Any) -> Any:
self, definition: metainfo.Definition, value: Any, op: str = None) -> Any:
'''
Verifies a *singular* raw value (i.e. for list quantities we should run this method
for each value in the list, not with the list itself as input). Returns the verified
......@@ -491,7 +504,7 @@ class MetadataEditRequestHandler:
assert dataset is not None, f'Dataset reference not found: `{value}`'
assert self.user.is_admin or dataset.user_id == self.user.user_id, (
f'Dataset `{value}` does not belong to you')
assert not dataset.doi, f'Dataset `{value}` has a doi and cannot be changed'
assert op == 'add' or not dataset.doi, f'Dataset `{value}` has a doi, can only add entries to it'
return dataset.dataset_id
else:
assert False, 'Unhandled value type' # Should not happen
......@@ -518,19 +531,19 @@ class MetadataEditRequestHandler:
if definition.type == metainfo.Datetime and verified_value:
return datetime.fromisoformat(verified_value)
return verified_value
# Non-scalar property. The verified value should be a dict with op and values
op, values = verified_value['op'], verified_value['values']
# Non-scalar property. The verified value should be a dict with operations
old_list = getattr(mongo_doc, quantity_name, [])
new_list = [] if op == 'set' else old_list.copy()
for v in values:
if op == 'add' or op == 'set':
if v not in new_list:
if quantity_name in ('coauthors', 'reviewers') and v == mongo_doc.main_author:
continue # Prevent adding the main author to coauthors or reviewers
new_list.append(v)
elif op == 'remove':
if v in new_list:
new_list.remove(v)
new_list = [] if 'set' in verified_value else old_list.copy()
for op, values in verified_value.items():
for v in values:
if op == 'add' or op == 'set':
if v not in new_list:
if quantity_name in ('coauthors', 'reviewers') and v == mongo_doc.main_author:
continue # Prevent adding the main author to coauthors or reviewers
new_list.append(v)
elif op == 'remove':
if v in new_list:
new_list.remove(v)
return new_list
def _get_entry_key(self, entry: 'Entry', entries_key: str) -> str:
......
......@@ -386,7 +386,7 @@ class TestEditRepo():
expected_error_loc=('query',)),
id='query-no-results')])
def test_post_entries_edit(
client, proc_infra, example_data_writeable, a_dataset, test_auth_dict, test_users_dict,
client, proc_infra, example_data_writeable, example_datasets, test_auth_dict, test_users_dict,
user, kwargs):
'''
Note, since the endpoint basically just forwards the request to
......
......@@ -1097,7 +1097,7 @@ def test_delete_upload_raw_path(
expected_error_loc=('query',)),
id='query-no-results')])
def test_post_upload_edit(
client, proc_infra, example_data_writeable, a_dataset, test_auth_dict, test_users_dict,
client, proc_infra, example_data_writeable, example_datasets, test_auth_dict, test_users_dict,
user, upload_id, kwargs):
'''
Note, since the endpoint basically just forwards the request to
......
......@@ -889,18 +889,30 @@ def example_data_writeable(mongo, test_user, normalized):
@pytest.fixture(scope='function')
def a_dataset(mongo, test_user):
now = datetime.utcnow()
dataset = datamodel.Dataset(
dataset_id=utils.create_uuid(),
dataset_name='a dataset',
user_id=test_user.user_id,
dataset_create_time=now,
dataset_modified_time=now,
dataset_type='owned')
dataset.a_mongo.create()
yield dataset
dataset.a_mongo.delete()
def example_datasets(mongo, test_user, other_test_user):
dataset_specs = (
('test_dataset_1', test_user, None),
('test_dataset_2', test_user, 'test_doi_2'),
('test_dataset_3', other_test_user, None)
)
datasets = []
for dataset_name, user, doi in dataset_specs:
now = datetime.utcnow()
dataset = datamodel.Dataset(
dataset_id=utils.create_uuid(),
dataset_name=dataset_name,
doi=doi,
user_id=user.user_id,
dataset_create_time=now,
dataset_modified_time=now,
dataset_type='owned')
dataset.a_mongo.create()
datasets.append(dataset)
yield datasets
while datasets:
datasets.pop().a_mongo.delete()
@pytest.fixture
......
......@@ -36,7 +36,7 @@ all_coauthor_metadata = dict(
references=['a reference', 'another reference'],
external_db='AFLOW',
reviewers=['lhofstadter'],
datasets=['a dataset'])
datasets=['test_dataset_1'])
all_coauthor_upload_metadata = {
k: v for k, v in all_coauthor_metadata.items() if k in _mongo_upload_metadata}
......@@ -240,12 +240,12 @@ def convert_to_comparable_value_single(quantity, value, format, user):
metadata=dict(comment='new comment'),
affected_upload_ids=['id_unpublished_w']),
id='query-contains-published')])
def test_edit_metadata(proc_infra, purged_app, example_data_writeable, a_dataset, test_users_dict, kwargs):
def test_edit_metadata(proc_infra, purged_app, example_data_writeable, example_datasets, test_users_dict, kwargs):
kwargs['user'] = test_users_dict[kwargs.get('user', 'test_user')]
assert_edit_request(**kwargs)
def test_set_and_clear_all(proc_infra, example_data_writeable, a_dataset, test_user):
def test_set_and_clear_all(proc_infra, example_data_writeable, example_datasets, test_user):
# Set all fields a coauthor can set
assert_edit_request(
user=test_user,
......@@ -264,6 +264,150 @@ def test_set_and_clear_all(proc_infra, example_data_writeable, a_dataset, test_u
reviewers=[]))
@pytest.mark.parametrize('kwargs', [
pytest.param(
dict(
metadata_1=dict(datasets={'add': 'test_dataset_1'}),
expected_metadata_1=dict(datasets=['test_dataset_1']),
metadata_2=dict(datasets={'add': 'test_dataset_2'}),
expected_metadata_2=dict(datasets=['test_dataset_1', 'test_dataset_2'])),
id='datasets-add'),
pytest.param(
dict(
metadata_1=dict(datasets=['test_dataset_1']),
metadata_2=dict(datasets=['ref:test_dataset_2']),
expected_metadata_2=dict(datasets=['test_dataset_1', 'test_dataset_2'])),
id='datasets-implicit-add'),
pytest.param(
dict(
metadata_1=dict(datasets=['test_dataset_1']),
metadata_2=dict(datasets=['test_dataset_3']),
expected_error_loc_2=('metadata', 'datasets')),
id='datasets-implicit-add-not-owner'),
pytest.param(
dict(
metadata_1=dict(datasets=['test_dataset_1']),
metadata_2=dict(datasets=['ref:test_dataset_3']),
expected_error_loc_2=('metadata', 'datasets')),
id='datasets-implicit-add-not-owner-ref'),
pytest.param(
dict(
metadata_1=dict(datasets=['test_dataset_1', 'test_dataset_2']),
metadata_2=dict(datasets={'remove': 'test_dataset_1'}),
expected_metadata_2=dict(datasets=['test_dataset_2'])),
id='datasets-remove'),
pytest.param(
dict(
metadata_1=dict(datasets=['test_dataset_1', 'ref:test_dataset_2']),
metadata_2=dict(datasets={'remove': 'test_dataset_2'}),
expected_error_loc_2=('metadata', 'datasets')),
id='datasets-remove-with-doi'),
pytest.param(
dict(
metadata_1=dict(datasets='test_dataset_1'),
metadata_2=dict(datasets={'add': ['test_dataset_2'], 'remove': 'ref:test_dataset_1'}),
expected_metadata_2=dict(datasets=['test_dataset_2'])),
id='datasets-add+remove'),
pytest.param(
dict(
metadata_1=dict(datasets={'set': ['test_dataset_2']}),
expected_error_loc_1=('metadata', 'datasets'),
metadata_2=dict(datasets={'add': 'test_dataset_1', 'set': ['test_dataset_2']}),
expected_error_loc_2=('metadata', 'datasets')),
id='datasets-set'),
pytest.param(
dict(
metadata_1=dict(coauthors='scooper'),
expected_metadata_1=dict(coauthors=[]),
metadata_2=dict(coauthors={'add': ['lhofstadter']}),
expected_metadata_2=dict(coauthors=['lhofstadter'])),
id='coauthors-add'),
pytest.param(
dict(
metadata_1=dict(coauthors='lhofstadter'),
expected_metadata_1=dict(coauthors=['lhofstadter']),
metadata_2=dict(coauthors={'add': ['admin'], 'remove': 'lhofstadter'}),
expected_metadata_2=dict(coauthors=['admin'])),
id='coauthors-add+remove'),
pytest.param(
dict(
metadata_1=dict(coauthors='lhofstadter'),
expected_metadata_1=dict(coauthors=['lhofstadter']),
metadata_2=dict(coauthors={'add': ['admin'], 'set': 'lhofstadter'}),
expected_error_loc_2=('metadata', 'coauthors')),
id='coauthors-add+set'),
pytest.param(
dict(
metadata_1=dict(reviewers='scooper'),
expected_metadata_1=dict(reviewers=[]),
metadata_2=dict(reviewers={'add': ['lhofstadter']}),
expected_metadata_2=dict(reviewers=['lhofstadter'])),
id='reviewers-add'),
pytest.param(
dict(
metadata_1=dict(reviewers='lhofstadter'),
expected_metadata_1=dict(reviewers=['lhofstadter']),
metadata_2=dict(reviewers={'add': ['admin'], 'remove': 'lhofstadter'}),
expected_metadata_2=dict(reviewers=['admin'])),
id='reviewers-add+remove'),
pytest.param(
dict(
metadata_1=dict(reviewers='lhofstadter'),
expected_metadata_1=dict(reviewers=['lhofstadter']),
metadata_2=dict(reviewers={'remove': ['admin'], 'set': 'lhofstadter'}),
expected_error_loc_2=('metadata', 'reviewers')),
id='reviewers-remove+set'),
pytest.param(
dict(
metadata_1=dict(references='ref1'),
expected_metadata_1=dict(references=['ref1']),
metadata_2=dict(references={'add': ['ref2']}),
expected_metadata_2=dict(references=['ref1', 'ref2'])),
id='references-add'),
pytest.param(
dict(
metadata_1=dict(references=['ref1', 'ref2']),
expected_metadata_1=dict(references=['ref1', 'ref2']),
metadata_2=dict(references={'add': 'ref3', 'remove': ['ref1']}),
expected_metadata_2=dict(references=['ref2', 'ref3'])),
id='references-add+remove'),
pytest.param(
dict(
metadata_1=dict(references='ref1'),
expected_metadata_1=dict(references=['ref1']),
metadata_2=dict(references={'remove': 'ref4', 'add': ['ref2', 'ref3', 'ref4']}),
expected_error_loc_2=('metadata', 'references')),
id='references-add+remove-incoherent')])
def test_list_quantities(proc_infra, purged_app, example_data_writeable, example_datasets, test_users_dict, kwargs):
def replace_dataset_ref(dataset_ref):
if dataset_ref.startswith('ref:'):
for dataset in example_datasets:
if dataset.dataset_name == dataset_ref[4:]:
return dataset.dataset_id
return dataset_ref
def replace_dataset_ref_or_reflist(ref_or_reflist):
if type(ref_or_reflist) == list:
return [replace_dataset_ref(ref) for ref in ref_or_reflist]
return replace_dataset_ref(ref_or_reflist)
kwargs['user'] = test_users_dict[kwargs.get('user', 'test_user')]
for suffix in ('_1', '_2'):
for arg in ('metadata', 'expected_metadata', 'expected_error_loc'):
if arg in kwargs:
kwargs.pop(arg)
if arg + suffix in kwargs:
kwargs[arg] = kwargs.pop(arg + suffix)
datasets = kwargs['metadata'].get('datasets')
if datasets is not None:
if type(datasets) == dict:
datasets = {op: replace_dataset_ref_or_reflist(v) for op, v in datasets.items()}
else:
datasets = replace_dataset_ref_or_reflist(datasets)
kwargs['metadata']['datasets'] = datasets
assert_edit_request(**kwargs)
def test_admin_quantities(proc_infra, example_data_writeable, test_user, other_test_user, admin_user):
assert_edit_request(
user=admin_user, upload_id='id_published_w', metadata=all_admin_metadata)
......@@ -273,7 +417,7 @@ def test_admin_quantities(proc_infra, example_data_writeable, test_user, other_t
user=test_user, upload_id='id_unpublished_w', metadata={k: v}, expected_error_loc=('metadata', k))
def test_query_cannot_set_upload_attributes(proc_infra, example_data_writeable, a_dataset, test_user):
def test_query_cannot_set_upload_attributes(proc_infra, example_data_writeable, example_datasets, test_user):
query = {'and': [{'upload_create_time:gt': '2021-01-01'}, {'published': False}]}
for k, v in all_coauthor_upload_metadata.items():
# Attempting to edit an upload level attribute with query should always fail,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment