archive.py 17.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2018 Markus Scheidgen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
18
19
"""
The archive API of the nomad@FAIRDI APIs. This API is about serving processed
(parsed and normalized) calculation data in nomad's *meta-info* format.
"""

20
from typing import Dict, Any
21
from io import BytesIO
22
import os.path
23
from flask import send_file, request
24
from flask_restplus import abort, Resource, fields
25
import json
26
import importlib
27
import urllib.parse
28

29
30
import nomad_meta_info

31
from nomad.files import UploadFiles, Restricted
32
from nomad import utils, search, config
33
from nomad.archive_library.filedb import ArchiveFileDB
34

35
from .auth import authenticate, create_authorization_predicate
Markus Scheidgen's avatar
Markus Scheidgen committed
36
from .api import api
37
38
39
from .common import calc_route, streamed_zipfile, search_model, add_pagination_parameters,\
    add_scroll_parameters, add_search_parameters, apply_search_parameters,\
    query_api_python, query_api_curl
40
41

ns = api.namespace(
42
43
    'archive',
    description='Access archive data and archive processing logs.')
44
45
46
47


@calc_route(ns, '/logs')
class ArchiveCalcLogResource(Resource):
48
    @api.doc('get_archive_logs')
49
    @api.response(404, 'The upload or calculation does not exist')
50
    @api.response(401, 'Not authorized to access the data.')
51
    @api.response(200, 'Archive data send', headers={'Content-Type': 'application/plain'})
52
    @authenticate(signature_token=True)
53
    def get(self, upload_id, calc_id):
54
55
56
        """
        Get calculation processing log.

57
        Calcs are references via *upload_id*, *calc_id* pairs.
58
        """
59
        archive_id = '%s/%s' % (upload_id, calc_id)
60

61
        upload_files = UploadFiles.get(
62
            upload_id, is_authorized=create_authorization_predicate(upload_id, calc_id))
63

64
        if upload_files is None:
65
            abort(404, message='Upload %s does not exist.' % upload_id)
66
67
68

        try:
            return send_file(
69
                upload_files.archive_log_file(calc_id, 'rb'),
70
71
                mimetype='text/plain',
                as_attachment=True,
72
                cache_timeout=0,
73
74
                attachment_filename='%s.log' % archive_id)
        except Restricted:
75
            abort(401, message='Not authorized to access %s/%s.' % (upload_id, calc_id))
76
77
        except KeyError:
            abort(404, message='Calculation %s does not exist.' % archive_id)
78
79
80
81


@calc_route(ns)
class ArchiveCalcResource(Resource):
82
    @api.doc('get_archive_calc')
83
    @api.response(404, 'The upload or calculation does not exist')
84
    @api.response(401, 'Not authorized to access the data.')
85
    @api.response(200, 'Archive data send')
86
    @authenticate(signature_token=True)
87
    def get(self, upload_id, calc_id):
88
89
90
        """
        Get calculation data in archive form.

91
        Calcs are references via *upload_id*, *calc_id* pairs.
92
        """
93
        archive_id = '%s/%s' % (upload_id, calc_id)
94

95
        upload_file = UploadFiles.get(
96
            upload_id, is_authorized=create_authorization_predicate(upload_id, calc_id))
97

98
        if upload_file is None:
99
            abort(404, message='Archive %s does not exist.' % upload_id)
100
101
102

        try:
            return send_file(
103
                upload_file.archive_file(calc_id, 'rb'),
104
105
                mimetype='application/json',
                as_attachment=True,
106
                cache_timeout=0,
107
108
                attachment_filename='%s.json' % archive_id)
        except Restricted:
109
            abort(401, message='Not authorized to access %s/%s.' % (upload_id, calc_id))
110
        except KeyError:
111
            abort(404, message='Calculation %s does not exist.' % archive_id)
112
113


114
115
116
_archive_download_parser = api.parser()
add_search_parameters(_archive_download_parser)
_archive_download_parser.add_argument(
117
118
119
    name='compress', type=bool, help='Use compression on .zip files, default is not.',
    location='args')

120
121
122

@ns.route('/download')
class ArchiveDownloadResource(Resource):
123
124
    manifest_quantities = ['upload_id', 'calc_id', 'external_id', 'raw_id', 'pid', 'calc_hash']

125
    @api.doc('archive_download')
126
    @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
127
    @api.expect(_archive_download_parser, validate=True)
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
    @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
    @authenticate(signature_token=True)
    def get(self):
        """
        Get calculation data in archive form from all query results.

        See ``/repo`` endpoint for documentation on the search
        parameters.

        Zip files are streamed; instead of 401 errors, the zip file will just not contain
        any files that the user is not authorized to access.

        The zip file will contain a ``manifest.json`` with the repository meta data.
        """
        try:
143
            args = _archive_download_parser.parse_args()
144
145
146
147
148
            compress = args.get('compress', False)
        except Exception:
            abort(400, message='bad parameter types')

        search_request = search.SearchRequest()
149
        apply_search_parameters(search_request, args)
150

151
152
153
154
        calcs = search_request.execute_scan(
            order_by='upload_id',
            size=config.services.download_scan_size,
            scroll=config.services.download_scan_timeout)
155
156

        def generator():
157
            try:
158
159
160
                manifest = {}
                upload_files = None

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
                for entry in calcs:
                    upload_id = entry['upload_id']
                    calc_id = entry['calc_id']
                    if upload_files is None or upload_files.upload_id != upload_id:
                        if upload_files is not None:
                            upload_files.close_zipfile_cache()

                        upload_files = UploadFiles.get(
                            upload_id, create_authorization_predicate(upload_id))

                        if upload_files is None:
                            utils.get_logger(__name__).error('upload files do not exist', upload_id=upload_id)
                            continue

                        upload_files.open_zipfile_cache()

                    yield (
                        '%s.%s' % (calc_id, upload_files._archive_ext), calc_id,
                        lambda calc_id: upload_files.archive_file(calc_id, 'rb'),
                        lambda calc_id: upload_files.archive_file_size(calc_id))

                    manifest[calc_id] = {
                        key: entry[key]
                        for key in ArchiveDownloadResource.manifest_quantities
                        if entry.get(key) is not None
                    }
187

188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
                if upload_files is not None:
                    upload_files.close_zipfile_cache()

                try:
                    manifest_contents = json.dumps(manifest).encode('utf-8')
                except Exception as e:
                    manifest_contents = json.dumps(
                        dict(error='Could not create the manifest: %s' % (e))).encode('utf-8')
                    utils.get_logger(__name__).error(
                        'could not create raw query manifest', exc_info=e)

                yield (
                    'manifest.json', 'manifest',
                    lambda *args: BytesIO(manifest_contents),
                    lambda *args: len(manifest_contents))
Markus Scheidgen's avatar
Markus Scheidgen committed
203

204
            except Exception as e:
205
206
207
208
                utils.get_logger(__name__).warning(
                    'unexpected error while streaming raw data from query',
                    exc_info=e,
                    query=urllib.parse.urlencode(request.args, doseq=True))
209

210
211
212
213
        return streamed_zipfile(
            generator(), zipfile_name='nomad_archive.zip', compress=compress)


214
215
216
_archive_query_parser = api.parser()
add_pagination_parameters(_archive_query_parser)
add_scroll_parameters(_archive_query_parser)
217
add_search_parameters(_archive_query_parser)
218
219
220
221
_archive_query_parser.add_argument(
    'db', type=str, help='Database to use, zip or msg', default='zip', location='args')
_archive_query_parser.add_argument(
    'qschema', type=str, help='Serialized archive dict with null values as placeholder for data.')
222
223

_archive_query_model_fields = {
224
    'python': fields.String(allow_null=True, skip_none=True, description=(
225
        'A string of python code snippet which can be executed to reproduce the api result.')),
226
    'curl': fields.String(allow_null=True, skip_none=True, description=(
227
228
        'A string of curl command which can be executed to reproduce the api result.')),
}
229
230
231
232
_archive_query_model = api.clone('ArchiveCalculations', search_model, _archive_query_model_fields)
# scroll model should be capitalized to prevent ambiguity with scroll flag
_archive_query_model['Scroll'] = _archive_query_model.pop('scroll')
_archive_query_model['Pagination'] = _archive_query_model.pop('pagination')
233
234


235
236
@ns.route('/query')
class ArchiveQueryResource(Resource):
237
238
239
240
241
    @api.doc('post_archive_query')
    @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
    @api.response(401, 'Not authorized to access the data.')
    @api.response(404, 'The upload or calculation does not exist')
    @api.response(200, 'Archive data send')
242
    @api.expect(_archive_query_model)
243
    @api.marshal_with(_archive_query_model, skip_none=True, code=200, description='Search results sent')
244
    @authenticate()
245
246
247
248
    def post(self):
        """
        Post an query schema and return it filled with archive data in json format from
        all query results.
249

250
251
        See ``/repo`` endpoint for documentation on the search
        parameters.
252

253
254
255
        The actual data are in results and a supplementary python code (curl) to
        execute search is in python (curl).
        """
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
        try:
            data_in = request.get_json()
            scroll = data_in.get('scroll', None)
            scroll_id = data_in.get('scroll_id', None)
            Scroll = data_in.get('Scroll', None)
            if Scroll:
                scroll = Scroll.get('scroll', scroll)
                scroll_id = Scroll.get('scroll_id', scroll_id)
            pagination = data_in.get('Pagination', None)
            page = data_in.get('page', 1)
            per_page = data_in.get('per_page', 10 if not scroll else 1000)
            order = data_in.get('order', -1)
            order_by = data_in.get('order_by', 'upload_id')
            if pagination:
                page = pagination.get('page', page)
                per_page = pagination.get('per_page', per_page)
                order = pagination.get('order', order)
                order_by = pagination.get('order_by', order_by)
            db = data_in.get('db')
            qschema = data_in.get('results', None)
            if qschema is not None:
                qschema = qschema[-1]
        except Exception:
            abort(400, message='bad parameter types')

        try:
            assert page >= 1
            assert per_page > 0
        except AssertionError:
            abort(400, message='invalid pagination')

        if order not in [-1, 1]:
            abort(400, message='invalid pagination')

        search_request = search.SearchRequest()
        apply_search_parameters(search_request, data_in)

        try:
            if scroll:
                results = search_request.execute_scrolled(scroll_id=scroll_id, size=per_page)
                results['scroll']['scroll'] = True

            else:
                results = search_request.execute_paginated(
                    per_page=per_page, page=page, order=order, order_by=order_by)

        except search.ScrollIdNotFound:
            abort(400, 'The given scroll_id does not exist.')
        except KeyError as e:
            import traceback
            traceback.print_exc()
            abort(400, str(e))

        data = []
        calcs = results['results']
        try:
            upload_files = None
            for entry in calcs:
                upload_id = entry['upload_id']
                calc_id = entry['calc_id']
                if upload_files is None or upload_files.upload_id != upload_id:
                    if upload_files is not None:
                        upload_files.close_zipfile_cache()

                    upload_files = UploadFiles.get(
                        upload_id, create_authorization_predicate(upload_id))

                    if upload_files is None:
                        raise KeyError

                    upload_files.open_zipfile_cache()

                    if db == 'msg':
                        fos = upload_files.archive_file_msg(calc_id)
                        msgdbs = [ArchiveFileDB(fo) for fo in fos if fo is not None]

                if db == 'zip':
                    fo = upload_files.archive_file(calc_id, 'rb')
                    data.append({calc_id: json.loads(fo.read())})

                elif db == 'msg':
                    for msgdb in msgdbs:
                        data.append(msgdb.query({calc_id: qschema}))

            if upload_files is not None:
                upload_files.close_zipfile_cache()

        except Restricted:
            abort(401, message='Not authorized to access %s/%s.' % (upload_id, calc_id))

        except KeyError:
            abort(404, message='Calculation %s/%s does not exist.' % (upload_id, calc_id))

        # assign archive data to results
        results['results'] = data

        # build python code and curl snippet
        if 'python' in data_in:
            results['python'] = query_api_python('archive', 'query', query_string=request.args)
        if 'curl' in data_in:
            results['curl'] = query_api_curl('archive', 'query', query_string=request.args)

        # for compatibility with archive model
        # TODO should be changed in search
        results['Scroll'] = results.pop('scroll', None)
        results['Pagination'] = results.pop('pagination', None)
362
363

        return results, 200
364
365


366
367
@ns.route('/metainfo/<string:metainfo_package_name>')
@api.doc(params=dict(metainfo_package_name='The name of the metainfo package.'))
368
369
370
371
class MetainfoResource(Resource):
    @api.doc('get_metainfo')
    @api.response(404, 'The metainfo does not exist')
    @api.response(200, 'Metainfo data send')
372
    def get(self, metainfo_package_name):
373
374
375
376
        """
        Get a metainfo definition file.
        """
        try:
377
            return load_metainfo(metainfo_package_name), 200
378
        except FileNotFoundError:
379
            parser_prefix = metainfo_package_name[:-len('.nomadmetainfo.json')]
380

381
            try:
382
383
384
                return load_metainfo(dict(
                    parser='%sparser' % parser_prefix,
                    path='%s.nomadmetainfo.json' % parser_prefix)), 200
385
386
            except FileNotFoundError:
                abort(404, message='The metainfo %s does not exist.' % metainfo_package_name)
387
388
389
390
391


metainfo_main_path = os.path.dirname(os.path.abspath(nomad_meta_info.__file__))


392
393
394
def load_metainfo(
        package_name_or_dependency: str, dependency_source: str = None,
        loaded_packages: Dict[str, Any] = None) -> Dict[str, Any]:
395
396
397
398
399
    """
    Loads the given metainfo package and all its dependencies. Returns a dict with
    all loaded package_names and respective packages.

    Arguments:
400
401
        package_name_or_dependency: The name of the package, or a nomadmetainfo dependency object.
        dependency_source: The path of the metainfo that uses this function to load a relative dependency.
402
403
404
405
406
407
        loaded_packages: Give a dict and the function will added freshly loaded packages
            to it and return it.
    """
    if loaded_packages is None:
        loaded_packages = {}

408
409
    if isinstance(package_name_or_dependency, str):
        package_name = package_name_or_dependency
410
        metainfo_path = os.path.join(metainfo_main_path, package_name)
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
    else:
        dependency = package_name_or_dependency
        if 'relativePath' in dependency:
            if dependency_source is None:
                raise Exception(
                    'Can only load relative dependency from within another metainfo package')

            metainfo_path = os.path.join(
                os.path.dirname(dependency_source), dependency['relativePath'])

        elif 'metainfoPath' in dependency:
            metainfo_path = os.path.join(metainfo_main_path, dependency['metainfoPath'])

        elif 'parser' in dependency:
            parser = dependency['parser']
            path = dependency['path']
            try:
                parser_module = importlib.import_module(parser).__file__
            except Exception:
                raise Exception('Parser not installed %s for metainfo path %s' % (parser, metainfo_path))

            parser_directory = os.path.dirname(parser_module)
            metainfo_path = os.path.join(parser_directory, path)

        else:
            raise Exception('Invalid dependency type in metainfo package %s' % metainfo_path)

        package_name = os.path.basename(metainfo_path)
439
440
441
442
443
444
445
446
447
448
449
450

    package_name = os.path.basename(package_name)

    if package_name in loaded_packages:
        return loaded_packages

    with open(metainfo_path, 'rt') as f:
        metainfo_json = json.load(f)

    loaded_packages[package_name] = metainfo_json

    for dependency in metainfo_json.get('dependencies', []):
451
        load_metainfo(dependency, dependency_source=metainfo_path, loaded_packages=loaded_packages)
452
453

    return loaded_packages