data.py 45 KB
Newer Older
Markus Scheidgen's avatar
Markus Scheidgen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Copyright 2018 Markus Scheidgen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
This module comprises a set of persistent document classes that hold all user related
data. These are information about users, their uploads and datasets, the associated
calculations, and files


.. autoclass:: Calc
Markus Scheidgen's avatar
Markus Scheidgen committed
22

Markus Scheidgen's avatar
Markus Scheidgen committed
23
.. autoclass:: Upload
Markus Scheidgen's avatar
Markus Scheidgen committed
24

Markus Scheidgen's avatar
Markus Scheidgen committed
25
26
"""

Markus Scheidgen's avatar
Markus Scheidgen committed
27
from typing import cast, List, Any, ContextManager, Tuple, Generator, Dict, cast
28
from mongoengine import StringField, DateTimeField, DictField, BooleanField, IntField
Markus Scheidgen's avatar
Markus Scheidgen committed
29
import logging
30
from structlog import wrap_logger
31
from contextlib import contextmanager
32
import os.path
33
34
from datetime import datetime
from pymongo import UpdateOne
35
import hashlib
36
37
from structlog.processors import StackInfoRenderer, format_exc_info, TimeStamper
import json
Markus Scheidgen's avatar
Markus Scheidgen committed
38

39
from nomad import utils, config, infrastructure, search, datamodel
40
from nomad.files import PathObject, UploadFiles, ExtractError, ArchiveBasedStagingUploadFiles, PublicUploadFiles, StagingUploadFiles
41
from nomad.processing.base import Proc, process, task, PENDING, SUCCESS, FAILURE
42
from nomad.parsing import parser_dict, match_parser, LocalBackend
Markus Scheidgen's avatar
Markus Scheidgen committed
43
from nomad.normalizing import normalizers
44
from nomad.datamodel import UploadWithMetadata, Domain
Markus Scheidgen's avatar
Markus Scheidgen committed
45
46


47
48
49
50
51
52
53
def _pack_log_event(logger, method_name, event_dict):
    try:
        log_data = dict(event_dict)
        log_data.update(**{
            key: value
            for key, value in getattr(logger, '_context', {}).items()
            if key not in ['service', 'release', 'upload_id', 'calc_id', 'mainfile', 'process_status']})
54
        log_data.update(logger=logger.name)
55
56
57
58
59
60
61
62
63
64
65

        return log_data
    except Exception:
        # raising an exception would cause an indefinite loop
        return event_dict


_log_processors = [
    StackInfoRenderer(),
    _pack_log_event,
    format_exc_info,
66
    TimeStamper(fmt="%Y-%m-%d %H:%M.%S", utc=False)]
67
68


Markus Scheidgen's avatar
Markus Scheidgen committed
69
class Calc(Proc):
Markus Scheidgen's avatar
Markus Scheidgen committed
70
71
72
73
74
75
    """
    Instances of this class represent calculations. This class manages the elastic
    search index entry, files, and archive for the respective calculation.

    It also contains the calculations processing and its state.

76
77
    The attribute list, does not include the various metadata properties generated
    while parsing, including ``code_name``, ``code_version``, etc.
Markus Scheidgen's avatar
Markus Scheidgen committed
78
79

    Attributes:
80
        calc_id: the calc_id of this calc
Markus Scheidgen's avatar
Markus Scheidgen committed
81
82
83
        parser: the name of the parser used to process this calc
        upload_id: the id of the upload used to create this calculation
        mainfile: the mainfile (including path in upload) that was used to create this calc
84

85
        metadata: the metadata record wit calc and user metadata, see :class:`datamodel.CalcWithMetadata`
Markus Scheidgen's avatar
Markus Scheidgen committed
86
    """
87
    calc_id = StringField(primary_key=True)
Markus Scheidgen's avatar
Markus Scheidgen committed
88
89
90
91
    upload_id = StringField()
    mainfile = StringField()
    parser = StringField()

92
93
    metadata = DictField()

Markus Scheidgen's avatar
Markus Scheidgen committed
94
    meta: Any = {
Markus Scheidgen's avatar
Markus Scheidgen committed
95
        'indexes': [
96
97
98
99
            'upload_id',
            ('upload_id', 'mainfile'),
            ('upload_id', 'parser'),
            ('upload_id', 'tasks_status'),
100
            ('upload_id', 'process_status'),
101
102
103
            ('upload_id', 'metadata.nomad_version'),
            'metadata.published',
            'metadata.datasets'
Markus Scheidgen's avatar
Markus Scheidgen committed
104
            'metadata.pid'
Markus Scheidgen's avatar
Markus Scheidgen committed
105
106
107
108
109
        ]
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
110
        self._parser_backend: LocalBackend = None
111
112
        self._upload: Upload = None
        self._upload_files: ArchiveBasedStagingUploadFiles = None
113
        self._calc_proc_logwriter = None
114
        self._calc_proc_logwriter_ctx: ContextManager = None
Markus Scheidgen's avatar
Markus Scheidgen committed
115

116
117
118
119
120
121
122
123
124
125
    @classmethod
    def from_calc_with_metadata(cls, calc_with_metadata):
        calc = Calc.create(
            calc_id=calc_with_metadata.calc_id,
            upload_id=calc_with_metadata.upload_id,
            mainfile=calc_with_metadata.mainfile,
            metadata=calc_with_metadata.to_dict())

        return calc

Markus Scheidgen's avatar
Markus Scheidgen committed
126
127
    @classmethod
    def get(cls, id):
128
        return cls.get_by_id(id, 'calc_id')
Markus Scheidgen's avatar
Markus Scheidgen committed
129

Markus Scheidgen's avatar
Markus Scheidgen committed
130
    @property
131
132
    def mainfile_file(self) -> PathObject:
        return self.upload_files.raw_file_object(self.mainfile)
Markus Scheidgen's avatar
Markus Scheidgen committed
133

134
135
136
137
    @property
    def upload(self) -> 'Upload':
        if not self._upload:
            self._upload = Upload.get(self.upload_id)
138
            self._upload.worker_hostname = self.worker_hostname
139
140
        return self._upload

141
142
143
    @property
    def upload_files(self) -> ArchiveBasedStagingUploadFiles:
        if not self._upload_files:
144
145
            self._upload_files = ArchiveBasedStagingUploadFiles(
                self.upload_id, is_authorized=lambda: True, upload_path=self.upload.upload_path)
146
147
        return self._upload_files

Markus Scheidgen's avatar
Markus Scheidgen committed
148
    def get_logger(self, **kwargs):
149
150
151
152
        """
        Returns a wrapped logger that additionally saves all entries to the calculation
        processing log in the archive.
        """
153
154
155
        logger = super().get_logger()
        logger = logger.bind(
            upload_id=self.upload_id, mainfile=self.mainfile, calc_id=self.calc_id, **kwargs)
156

157
        if self._calc_proc_logwriter_ctx is None:
158
159
160
161
162
163
164
165
166
167
168
169
            try:
                self._calc_proc_logwriter_ctx = self.upload_files.archive_log_file(self.calc_id, 'wt')
                self._calc_proc_logwriter = self._calc_proc_logwriter_ctx.__enter__()  # pylint: disable=E1101
            except KeyError:
                # cannot open log file
                pass

        if self._calc_proc_logwriter_ctx is None:
            return logger
        else:
            def save_to_calc_log(logger, method_name, event_dict):
                if self._calc_proc_logwriter is not None:
170
                    try:
171
172
173
                        dump_dict = dict(event_dict)
                        dump_dict.update(level=method_name.upper())
                        json.dump(dump_dict, self._calc_proc_logwriter, sort_keys=True)
174
                        self._calc_proc_logwriter.write('\n')
175

176
177
178
                    except Exception:
                        # Exceptions here will cause indefinite loop
                        pass
179
180
181

                return event_dict

182
            return wrap_logger(logger, processors=_log_processors + [save_to_calc_log])
183

184
185
186
187
188
189
190
    @process
    def re_process_calc(self):
        """
        Processes a calculation again. This means there is already metadata and
        instead of creating it initially, we are just updating the existing
        records.
        """
191
192
        parser = match_parser(self.mainfile, self.upload_files, strict=False)
        if parser is None:
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
            # Remove the logsfile and set a fake logwriter to avoid creating a log file,
            # because we will probably remove this calc and don't want to have ghost logfiles.
            if self._calc_proc_logwriter_ctx is not None:
                self._calc_proc_logwriter_ctx.__exit__(None, None, None)
                self.upload_files.archive_log_file_object(self.calc_id).delete()

            self._calc_proc_logwriter_ctx = open('/dev/null', 'wt')
            self._calc_proc_logwriter = self._calc_proc_logwriter_ctx.__enter__()
            self.get_logger().error(
                'no parser matches during re-process, use the old parser')

            # mock the steps of actual processing
            self._continue_with('parsing')
            self._continue_with('normalizing')
            self._continue_with('archiving')
            self._complete()
209
210
            return

211
        logger = self.get_logger(parser=parser.name)
212
        if self.parser != parser.name:
213
214
            self.parser = parser.name
            logger.info(
215
                'different parser matches during re-process, use new parser')
216

217
        try:
218
219
220
221
222
223
224
            calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
            calc_with_metadata.upload_id = self.upload_id
            calc_with_metadata.calc_id = self.calc_id
            calc_with_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
            calc_with_metadata.mainfile = self.mainfile
            calc_with_metadata.nomad_version = config.version
            calc_with_metadata.nomad_commit = config.commit
225
            calc_with_metadata.last_processing = datetime.utcnow()
226
            calc_with_metadata.files = self.upload_files.calc_files(self.mainfile)
227
            self.metadata = calc_with_metadata.to_dict()
228
229
230
231
232
233
234
235
236
237
238
239
240

            self.parsing()
            self.normalizing()
            self.archiving()
        finally:
            # close loghandler that was not closed due to failures
            try:
                if self._calc_proc_logwriter is not None:
                    self._calc_proc_logwriter.close()
                    self._calc_proc_logwriter = None
            except Exception as e:
                logger.error('could not close calculation proc log', exc_info=e)

Markus Scheidgen's avatar
Markus Scheidgen committed
241
    @process
242
    def process_calc(self):
243
244
245
246
        """
        Processes a new calculation that has no prior records in the mongo, elastic,
        or filesystem storage. It will create an initial set of (user) metadata.
        """
247
        logger = self.get_logger()
248
        if self.upload is None:
249
            logger.error('calculation upload does not exist')
Markus Scheidgen's avatar
Markus Scheidgen committed
250
251

        try:
252
253
            # save preliminary minimum calc metadata in case processing fails
            # successful processing will replace it with the actual metadata
254
            calc_with_metadata = datamodel.CalcWithMetadata(
255
256
257
258
259
                upload_id=self.upload_id,
                calc_id=self.calc_id,
                calc_hash=self.upload_files.calc_hash(self.mainfile),
                mainfile=self.mainfile)
            calc_with_metadata.published = False
260
            calc_with_metadata.uploader = self.upload.user_id
261
            calc_with_metadata.upload_time = self.upload.upload_time
262
            calc_with_metadata.upload_name = self.upload.name
263
            calc_with_metadata.nomad_version = config.version
264
            calc_with_metadata.nomad_commit = config.commit
265
            calc_with_metadata.last_processing = datetime.utcnow()
266
267
268
            calc_with_metadata.files = self.upload_files.calc_files(self.mainfile)
            self.metadata = calc_with_metadata.to_dict()

269
270
271
272
273
            if len(calc_with_metadata.files) >= config.auxfile_cutoff:
                self.warning(
                    'This calc has many aux files in its directory. '
                    'Have you placed many calculations in the same directory?')

Markus Scheidgen's avatar
Markus Scheidgen committed
274
275
276
277
            self.parsing()
            self.normalizing()
            self.archiving()
        finally:
Markus Scheidgen's avatar
Markus Scheidgen committed
278
            # close loghandler that was not closed due to failures
279
            try:
280
281
282
                if self._calc_proc_logwriter is not None:
                    self._calc_proc_logwriter.close()
                    self._calc_proc_logwriter = None
283
284
285
            except Exception as e:
                logger.error('could not close calculation proc log', exc_info=e)

286
287
288
289
    def fail(self, *errors, log_level=logging.ERROR, **kwargs):
        # in case of failure, index a minimum set of metadata and mark
        # processing failure
        try:
290
            calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
291
292
293
294
295
296
297
298
            calc_with_metadata.formula = config.services.not_processed_value
            calc_with_metadata.basis_set = config.services.not_processed_value
            calc_with_metadata.xc_functional = config.services.not_processed_value
            calc_with_metadata.system = config.services.not_processed_value
            calc_with_metadata.crystal_system = config.services.not_processed_value
            calc_with_metadata.spacegroup = config.services.not_processed_value
            calc_with_metadata.spacegroup_symbol = config.services.not_processed_value
            calc_with_metadata.code_version = config.services.not_processed_value
299
300
301
302
303
304
305

            calc_with_metadata.code_name = config.services.not_processed_value
            if self.parser is not None:
                parser = parser_dict[self.parser]
                if hasattr(parser, 'code_name'):
                    calc_with_metadata.code_name = parser.code_name

306
307
308
309
310
311
312
313
            calc_with_metadata.processed = False
            self.metadata = calc_with_metadata.to_dict()
            search.Entry.from_calc_with_metadata(calc_with_metadata).save()
        except Exception as e:
            self.get_logger().error('could not index after processing failure', exc_info=e)

        super().fail(*errors, log_level=log_level, **kwargs)

314
315
316
317
    def on_process_complete(self, process_name):
        # the save might be necessary to correctly read the join condition from the db
        self.save()
        # in case of error, the process_name might be unknown
318
        if process_name == 'process_calc' or process_name == 're_process_calc' or process_name is None:
319
320
            self.upload.reload()
            self.upload.check_join()
Markus Scheidgen's avatar
Markus Scheidgen committed
321
322
323

    @task
    def parsing(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
324
        """ The *task* that encapsulates all parsing related actions. """
325
        context = dict(parser=self.parser, step=self.parser)
326
        logger = self.get_logger(**context)
327
        parser = parser_dict[self.parser]
328
        self.metadata['parser_name'] = self.parser
Markus Scheidgen's avatar
Markus Scheidgen committed
329

330
        with utils.timer(logger, 'parser executed', input_size=self.mainfile_file.size):
331
332
333
334
            try:
                self._parser_backend = parser.run(
                    self.upload_files.raw_file_object(self.mainfile).os_path, logger=logger)
            except Exception as e:
335
                self.fail('parser failed with exception', exc_info=e, error=str(e), **context)
336
                return
337
            except SystemExit:
338
                self.fail('parser raised system exit', error='system exit', **context)
339
                return
Markus Scheidgen's avatar
Markus Scheidgen committed
340

341
342
        # add the non code specific calc metadata to the backend
        # all other quantities have been determined by parsers/normalizers
343
        self._parser_backend.openNonOverlappingSection('section_entry_info')
344
        self._parser_backend.addValue('upload_id', self.upload_id)
345
        self._parser_backend.addValue('calc_id', self.calc_id)
346
        self._parser_backend.addValue('calc_hash', self.metadata['calc_hash'])
347
        self._parser_backend.addValue('mainfile', self.mainfile)
348
        self._parser_backend.addValue('parser_name', self.parser)
349
350
351
352
353
354
355
356
357
358
359
360
        filepaths = self.metadata['files']
        self._parser_backend.addValue('number_of_files', len(filepaths))
        self._parser_backend.addValue('filepaths', filepaths)
        uploader = self.upload.uploader
        self._parser_backend.addValue(
            'entry_uploader_name', '%s, %s' % (uploader.first_name, uploader.last_name))
        self._parser_backend.addValue(
            'entry_uploader_id', str(uploader.user_id))
        self._parser_backend.addValue('entry_upload_time', int(self.upload.upload_time.timestamp()))
        self._parser_backend.closeNonOverlappingSection('section_entry_info')

        self.add_processor_info(self.parser)
361

Markus Scheidgen's avatar
Markus Scheidgen committed
362
363
        if self._parser_backend.status[0] != 'ParseSuccess':
            error = self._parser_backend.status[1]
364
            self.fail('parser failed', error=error, **context)
365
366
367
368
369
370
371
372

    @contextmanager
    def use_parser_backend(self, processor_name):
        self._parser_backend.reset_status()
        yield self._parser_backend
        self.add_processor_info(processor_name)

    def add_processor_info(self, processor_name: str) -> None:
373
        self._parser_backend.openContext('/section_entry_info/0')
374
375
376
377
378
379
380
        self._parser_backend.openNonOverlappingSection('section_archive_processing_info')
        self._parser_backend.addValue('archive_processor_name', processor_name)

        if self._parser_backend.status[0] == 'ParseSuccess':
            warnings = getattr(self._parser_backend, '_warnings', [])
            if len(warnings) > 0:
                self._parser_backend.addValue('archive_processor_status', 'WithWarnings')
381
                self._parser_backend.addValue('number_of_archive_processor_warnings', len(warnings))
382
383
384
385
                self._parser_backend.addArrayValues('archive_processor_warnings', [str(warning) for warning in warnings])
            else:
                self._parser_backend.addValue('archive_processor_status', 'Success')
        else:
386
            errors = self._parser_backend.status[1]
387
388
389
            self._parser_backend.addValue('archive_processor_error', str(errors))

        self._parser_backend.closeNonOverlappingSection('section_archive_processing_info')
390
        self._parser_backend.closeContext('/section_entry_info/0')
Markus Scheidgen's avatar
Markus Scheidgen committed
391
392
393

    @task
    def normalizing(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
394
        """ The *task* that encapsulates all normalizing related actions. """
Markus Scheidgen's avatar
Markus Scheidgen committed
395
        for normalizer in normalizers:
396
397
398
            if normalizer.domain != config.domain:
                continue

Markus Scheidgen's avatar
Markus Scheidgen committed
399
            normalizer_name = normalizer.__name__
400
            context = dict(normalizer=normalizer_name, step=normalizer_name)
401
            logger = self.get_logger(**context)
Markus Scheidgen's avatar
Markus Scheidgen committed
402
403

            with utils.timer(
404
                    logger, 'normalizer executed', input_size=self.mainfile_file.size):
405
                with self.use_parser_backend(normalizer_name) as backend:
406
407
408
                    try:
                        normalizer(backend).normalize(logger=logger)
                    except Exception as e:
409
                        self._parser_backend.finishedParsingSession('ParseFailure', [str(e)])
410
                        self.fail(
411
412
413
414
415
416
417
418
419
420
                            'normalizer failed with exception', exc_info=e, error=str(e), **context)
                        break
                    else:
                        if self._parser_backend.status[0] != 'ParseSuccess':
                            error = self._parser_backend.status[1]
                            self.fail('normalizer failed', error=error, **context)
                            break
                        else:
                            logger.debug(
                                'completed normalizer successfully', normalizer=normalizer_name)
Markus Scheidgen's avatar
Markus Scheidgen committed
421
422
423

    @task
    def archiving(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
424
        """ The *task* that encapsulates all archival related actions. """
425
426
        logger = self.get_logger()

427
        calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
428
        calc_with_metadata.apply_domain_metadata(self._parser_backend)
429
        calc_with_metadata.processed = True
430

431
432
        # persist the calc metadata
        with utils.timer(logger, 'saved calc metadata', step='metadata'):
433
            self.metadata = calc_with_metadata.to_dict()
434
435

        # index in search
436
        with utils.timer(logger, 'indexed', step='index'):
437
            search.Entry.from_calc_with_metadata(calc_with_metadata).save()
438

439
        # persist the archive
Markus Scheidgen's avatar
Markus Scheidgen committed
440
        with utils.timer(
441
                logger, 'archived', step='archive',
Markus Scheidgen's avatar
Markus Scheidgen committed
442
                input_size=self.mainfile_file.size) as log_data:
443
            with self.upload_files.archive_file(self.calc_id, 'wt') as out:
444
                self._parser_backend.write_json(out, pretty=True, root_sections=Domain.instance.root_sections)
445

446
            log_data.update(archive_size=self.upload_files.archive_file_object(self.calc_id).size)
Markus Scheidgen's avatar
Markus Scheidgen committed
447
448
449
450

        # close loghandler
        if self._calc_proc_logwriter is not None:
            with utils.timer(
451
                    logger, 'archived log', step='logs',
Markus Scheidgen's avatar
Markus Scheidgen committed
452
                    input_size=self.mainfile_file.size) as log_data:
453
                self._calc_proc_logwriter_ctx.__exit__(None, None, None)  # pylint: disable=E1101
454
                self._calc_proc_logwriter = None
Markus Scheidgen's avatar
Markus Scheidgen committed
455

456
                log_data.update(log_size=self.upload_files.archive_log_file_object(self.calc_id).size)
Markus Scheidgen's avatar
Markus Scheidgen committed
457

458
    def __str__(self):
459
        return 'calc %s calc_id=%s upload_id%s' % (super().__str__(), self.calc_id, self.upload_id)
460

461

462
class Upload(Proc):
Markus Scheidgen's avatar
Markus Scheidgen committed
463
464
465
466
467
468
    """
    Represents uploads in the databases. Provides persistence access to the files storage,
    and processing state.

    Attributes:
        name: optional user provided upload name
469
470
        upload_path: the path were the uploaded files was stored
        temporary: True if the uploaded file should be removed after extraction
Markus Scheidgen's avatar
Markus Scheidgen committed
471
472
        upload_id: the upload id generated by the database
        upload_time: the timestamp when the system realised the upload
473
        user_id: the id of the user that created this upload
Markus Scheidgen's avatar
Markus Scheidgen committed
474
475
        published: Boolean that indicates the publish status
        publish_time: Date when the upload was initially published
476
        last_update: Date of the last publishing/re-processing
Markus Scheidgen's avatar
Markus Scheidgen committed
477
        joined: Boolean indicates if the running processing has joined (:func:`check_join`)
Markus Scheidgen's avatar
Markus Scheidgen committed
478
479
480
481
    """
    id_field = 'upload_id'

    upload_id = StringField(primary_key=True)
482
483
    upload_path = StringField(default=None)
    temporary = BooleanField(default=False)
484
    embargo_length = IntField(default=36)
Markus Scheidgen's avatar
Markus Scheidgen committed
485
486
487

    name = StringField(default=None)
    upload_time = DateTimeField()
488
    user_id = StringField(required=True)
489
490
    published = BooleanField(default=False)
    publish_time = DateTimeField()
491
    last_update = DateTimeField()
Markus Scheidgen's avatar
Markus Scheidgen committed
492

493
494
    joined = BooleanField(default=False)

Markus Scheidgen's avatar
Markus Scheidgen committed
495
496
    meta: Any = {
        'indexes': [
497
            'user_id', 'tasks_status', 'process_status', 'published', 'upload_time'
Markus Scheidgen's avatar
Markus Scheidgen committed
498
499
500
501
502
        ]
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
503
        self._upload_files: ArchiveBasedStagingUploadFiles = None
Markus Scheidgen's avatar
Markus Scheidgen committed
504

505
506
    @property
    def metadata(self) -> dict:
Markus Scheidgen's avatar
Markus Scheidgen committed
507
508
509
510
511
512
513
        """
        Getter, setter for user metadata. Metadata is pickled to and from the public
        bucket to allow sharing among all processes. Usually uploads do not have (much)
        user defined metadata, but users provide all metadata per upload as part of
        the publish process. This will change, when we introduce editing functionality
        and metadata will be provided through different means.
        """
514
515
516
517
518
        try:
            upload_files = PublicUploadFiles(self.upload_id, is_authorized=lambda: True)
        except KeyError:
            return None
        return upload_files.user_metadata
519
520
521

    @metadata.setter
    def metadata(self, data: dict) -> None:
522
523
        upload_files = PublicUploadFiles(self.upload_id, is_authorized=lambda: True, create=True)
        upload_files.user_metadata = data
524

Markus Scheidgen's avatar
Markus Scheidgen committed
525
    @classmethod
526
    def get(cls, id: str, include_published: bool = True) -> 'Upload':
527
        return cls.get_by_id(id, 'upload_id')
Markus Scheidgen's avatar
Markus Scheidgen committed
528
529

    @classmethod
530
    def user_uploads(cls, user: datamodel.User, **kwargs) -> List['Upload']:
531
532
        """ Returns all uploads for the given user. Kwargs are passed to mongo query. """
        return cls.objects(user_id=str(user.user_id), **kwargs)
Markus Scheidgen's avatar
Markus Scheidgen committed
533

534
535
    @property
    def uploader(self):
536
        return datamodel.User.get(self.user_id)
537

Markus Scheidgen's avatar
Markus Scheidgen committed
538
539
    def get_logger(self, **kwargs):
        logger = super().get_logger()
540
541
        user = self.uploader
        user_name = '%s %s' % (user.first_name, user.last_name)
542
        # We are not using 'user_id' because logstash (?) will filter these entries ?!
543
544
        logger = logger.bind(
            upload_id=self.upload_id, upload_name=self.name, user_name=user_name,
545
            user=user.user_id, **kwargs)
Markus Scheidgen's avatar
Markus Scheidgen committed
546
547
548
549
550
551
552
553
        return logger

    @classmethod
    def create(cls, **kwargs) -> 'Upload':
        """
        Creates a new upload for the given user, a user given name is optional.
        It will populate the record with a signed url and pending :class:`UploadProc`.
        The upload will be already saved to the database.
554
555

        Arguments:
556
            user: The user that created the upload.
Markus Scheidgen's avatar
Markus Scheidgen committed
557
        """
558
559
        # use kwargs to keep compatibility with super method
        user: datamodel.User = kwargs['user']
560
        del(kwargs['user'])
561

562
563
        if 'upload_id' not in kwargs:
            kwargs.update(upload_id=utils.create_uuid())
564
        kwargs.update(user_id=user.user_id)
Markus Scheidgen's avatar
Markus Scheidgen committed
565
        self = super().create(**kwargs)
566

Markus Scheidgen's avatar
Markus Scheidgen committed
567
        self._continue_with('uploading')
568

Markus Scheidgen's avatar
Markus Scheidgen committed
569
570
        return self

571
572
573
574
575
    def delete(self):
        """ Deletes this upload process state entry and its calcs. """
        Calc.objects(upload_id=self.upload_id).delete()
        super().delete()

576
    def delete_upload_local(self):
577
        """
578
        Deletes the upload, including its processing state and
579
        staging files. Local version without celery processing.
580
581
582
583
        """
        logger = self.get_logger()

        with utils.lnr(logger, 'staged upload delete failed'):
584
            with utils.timer(
585
                    logger, 'upload deleted from index', step='index',
586
                    upload_size=self.upload_files.size):
587
                search.delete_upload(self.upload_id)
588

589
            with utils.timer(
590
                    logger, 'staged upload deleted', step='files',
591
592
                    upload_size=self.upload_files.size):
                self.upload_files.delete()
593
594

            self.delete()
595

596
    @process
597
    def delete_upload(self):
598
599
600
601
        """
        Deletes of the upload, including its processing state and
        staging files. This starts the celery process of deleting the upload.
        """
602
        self.delete_upload_local()
603

604
        return True  # do not save the process status on the delete upload
605

606
    @process
607
    def publish_upload(self):
608
        """
609
610
        Moves the upload out of staging to the public area. It will
        pack the staging upload files in to public upload files.
611
        """
612
613
        assert self.processed_calcs > 0

614
        logger = self.get_logger()
615
        logger.info('started to publish')
616

617
        with utils.lnr(logger, 'publish failed'):
618
            upload_with_metadata = self.to_upload_with_metadata(self.metadata)
619
            calcs = upload_with_metadata.calcs
620

621
            with utils.timer(
622
                    logger, 'upload metadata updated', step='metadata',
623
                    upload_size=self.upload_files.size):
624
625

                def create_update(calc):
626
                    calc.published = True
627
                    calc.with_embargo = calc.with_embargo if calc.with_embargo is not None else False
628
629
630
631
632
                    return UpdateOne(
                        {'_id': calc.calc_id},
                        {'$set': {'metadata': calc.to_dict()}})

                Calc._get_collection().bulk_write([create_update(calc) for calc in calcs])
633

634
635
636
637
638
            if isinstance(self.upload_files, StagingUploadFiles):
                with utils.timer(
                        logger, 'staged upload files packed', step='pack',
                        upload_size=self.upload_files.size):
                    self.upload_files.pack(upload_with_metadata)
639
640
641
642

            with utils.timer(
                    logger, 'index updated', step='index',
                    upload_size=self.upload_files.size):
643
                search.publish(calcs)
644

645
646
647
648
649
650
            if isinstance(self.upload_files, StagingUploadFiles):
                with utils.timer(
                        logger, 'staged upload deleted', step='delete staged',
                        upload_size=self.upload_files.size):
                    self.upload_files.delete()
                    self.published = True
651
652
                    self.publish_time = datetime.utcnow()
                    self.last_update = datetime.utcnow()
653
654
                    self.save()
            else:
655
                self.last_update = datetime.utcnow()
656
                self.save()
657

658
659
660
    @process
    def re_process_upload(self):
        """
Markus Scheidgen's avatar
Markus Scheidgen committed
661
662
663
        A *process* that performs the re-processing of a earlier processed
        upload.

664
665
666
667
668
669
670
671
672
        Runs the distributed process of fully reparsing/renormalizing an existing and
        already published upload. Will renew the archive part of the upload and update
        mongo and elastic search entries.

        TODO this implementation does not do any re-matching. This will be more complex
        due to handling of new or missing matches.
        """
        assert self.published

673
674
675
        logger = self.get_logger()
        logger.info('started to re-process')

676
677
678
679
680
681
        # mock the steps of actual processing
        self._continue_with('uploading')

        # extract the published raw files into a staging upload files instance
        self._continue_with('extracting')
        public_upload_files = cast(PublicUploadFiles, self.upload_files)
682
        staging_upload_files = public_upload_files.to_staging_upload_files(create=True)
683
684

        self._continue_with('parse_all')
685
        try:
686
            # check if a calc is already/still processing
687
688
689
690
691
692
693
694
            processing = Calc.objects(
                upload_id=self.upload_id,
                **Calc.process_running_mongoengine_query()).count()

            if processing > 0:
                logger.warn(
                    'processes are still/already running on calc, they will be resetted',
                    count=processing)
695
696

            # reset all calcs
697
            Calc._get_collection().update_many(
698
699
700
                dict(upload_id=self.upload_id),
                {'$set': Calc.reset_pymongo_update(worker_hostname=self.worker_hostname)})

701
            # process call calcs
702
703
704
            Calc.process_all(Calc.re_process_calc, dict(upload_id=self.upload_id), exclude=['metadata'])

            logger.info('completed to trigger re-process of all calcs')
705
706
        except Exception as e:
            # try to remove the staging copy in failure case
707
708
            logger.error('failed to trigger re-process of all calcs', exc_info=e)

709
            if staging_upload_files is not None and staging_upload_files.exists():
710
711
712
                staging_upload_files.delete()

            raise e
713
714
715
716

        # the packing and removing of the staging upload files, will be trigged by
        # the 'cleanup' task after processing all calcs

717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
    @process
    def re_pack(self):
        """ A *process* that repacks the raw and archive data based on the current embargo data. """
        assert self.published

        # mock the steps of actual processing
        self._continue_with('uploading')
        self._continue_with('extracting')
        self._continue_with('parse_all')
        self._continue_with('cleanup')

        self.upload_files.re_pack(self.to_upload_with_metadata())
        self.joined = True
        self._complete()

Markus Scheidgen's avatar
Markus Scheidgen committed
732
    @process
733
    def process_upload(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
734
        """ A *process* that performs the initial upload processing. """
Markus Scheidgen's avatar
Markus Scheidgen committed
735
736
737
738
739
        self.extracting()
        self.parse_all()

    @task
    def uploading(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
740
        """ A no-op *task* as a stand-in for receiving upload data. """
Markus Scheidgen's avatar
Markus Scheidgen committed
741
742
        pass

743
    @property
744
745
    def upload_files(self) -> UploadFiles:
        upload_files_class = ArchiveBasedStagingUploadFiles if not self.published else PublicUploadFiles
746
        kwargs = dict(upload_path=self.upload_path) if not self.published else {}
747
748
749

        if not self._upload_files or not isinstance(self._upload_files, upload_files_class):
            self._upload_files = upload_files_class(
750
                self.upload_id, is_authorized=lambda: True, **kwargs)
751

752
        return self._upload_files
753

754
755
756
757
758
    @property
    def staging_upload_files(self) -> ArchiveBasedStagingUploadFiles:
        assert not self.published
        return cast(ArchiveBasedStagingUploadFiles, self.upload_files)

Markus Scheidgen's avatar
Markus Scheidgen committed
759
760
    @task
    def extracting(self):
761
        """
762
763
        The *task* performed before the actual parsing/normalizing: extracting
        the uploaded files.
764
        """
765
766
767
768
769
        # extract the uploaded file
        self._upload_files = ArchiveBasedStagingUploadFiles(
            upload_id=self.upload_id, is_authorized=lambda: True, create=True,
            upload_path=self.upload_path)

Markus Scheidgen's avatar
Markus Scheidgen committed
770
771
        logger = self.get_logger()
        try:
Markus Scheidgen's avatar
Markus Scheidgen committed
772
773
            with utils.timer(
                    logger, 'upload extracted', step='extracting',
774
775
                    upload_size=self.upload_files.size):
                self.upload_files.extract()
776
777
778
779

            if self.temporary:
                os.remove(self.upload_path)
                self.upload_path = None
780

781
        except KeyError:
782
            self.fail('processing requested for non existing upload', log_level=logging.ERROR)
783
784
785
            return
        except ExtractError:
            self.fail('bad .zip/.tar file', log_level=logging.INFO)
Markus Scheidgen's avatar
Markus Scheidgen committed
786
787
            return

788
789
790
791
792
    def _preprocess_files(self, path):
        """
        Some files need preprocessing. Currently we need to add a stripped POTCAR version
        and always restrict/embargo the original.
        """
793
        if os.path.basename(path).startswith('POTCAR'):
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
            # create checksum
            hash = hashlib.sha224()
            with open(self.staging_upload_files.raw_file_object(path).os_path, 'rb') as orig_f:
                for line in orig_f.readlines():
                    hash.update(line)

            checksum = hash.hexdigest()

            # created stripped POTCAR
            stripped_path = path + '.stripped'
            with open(self.staging_upload_files.raw_file_object(stripped_path).os_path, 'wt') as stripped_f:
                stripped_f.write('Stripped POTCAR file. Checksum of original file (sha224): %s\n' % checksum)
            os.system(
                '''
                    awk < %s >> %s '
                    BEGIN { dump=1 }
                    /End of Dataset/ { dump=1 }
                    dump==1 { print }
                    /END of PSCTR/ { dump=0 }'
                ''' % (
                    self.staging_upload_files.raw_file_object(path).os_path,
                    self.staging_upload_files.raw_file_object(stripped_path).os_path))

817
    def match_mainfiles(self) -> Generator[Tuple[str, object], None, None]:
818
819
820
821
822
823
824
        """
        Generator function that matches all files in the upload to all parsers to
        determine the upload's mainfiles.

        Returns:
            Tuples of mainfile, filename, and parsers
        """
825
        directories_with_match: Dict[str, str] = dict()
826
827
        upload_files = self.staging_upload_files
        for filename in upload_files.raw_file_manifest():
828
            self._preprocess_files(filename)
829
            try:
830
                parser = match_parser(filename, upload_files)
831
                if parser is not None:
832
833
                    directory = os.path.dirname(filename)
                    if directory in directories_with_match:
834
835
836
                        # TODO this might give us the chance to store directory based relationship
                        # between calcs for the future?
                        pass
837
838
                    else:
                        directories_with_match[directory] = filename
839
840

                    yield filename, parser
841
842
843
844
            except Exception as e:
                self.get_logger().error(
                    'exception while matching pot. mainfile',
                    mainfile=filename, exc_info=e)
845

Markus Scheidgen's avatar
Markus Scheidgen committed
846
847
    @task
    def parse_all(self):
848
        """
Markus Scheidgen's avatar
Markus Scheidgen committed
849
        The *task* used to identify mainfile/parser combinations among the upload's files, creates
850
851
        respective :class:`Calc` instances, and triggers their processing.
        """
852
853
        logger = self.get_logger()

Markus Scheidgen's avatar
Markus Scheidgen committed
854
855
        with utils.timer(
                logger, 'upload extracted', step='matching',
856
857
                upload_size=self.upload_files.size):
            for filename, parser in self.match_mainfiles():
858
                calc = Calc.create(
859
                    calc_id=self.upload_files.calc_id(filename),
860
                    mainfile=filename, parser=parser.name,
861
                    worker_hostname=self.worker_hostname,
862
863
                    upload_id=self.upload_id)

864
                calc.process_calc()
Markus Scheidgen's avatar
Markus Scheidgen committed
865

866
    def on_process_complete(self, process_name):
867
        if process_name == 'process_upload' or process_name == 're_process_upload':
868
869
870
            self.check_join()

    def check_join(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
871
872
873
874
875
876
877
878
        """
        Performs an evaluation of the join condition and triggers the :func:`cleanup`
        task if necessary. The join condition allows to run the ``cleanup`` after
        all calculations have been processed. The upload processing stops after all
        calculation processings have been triggered (:func:`parse_all` or
        :func:`re_process_upload`). The cleanup task is then run within the last
        calculation process (the one that triggered the join by calling this method).
        """
879
880
881
882
        total_calcs = self.total_calcs
        processed_calcs = self.processed_calcs

        self.get_logger().debug('check join', processed_calcs=processed_calcs, total_calcs=total_calcs)
883
884
        # check if process is not running anymore, i.e. not still spawining new processes to join
        # check the join condition, i.e. all calcs have been processed
885
        if not self.process_running and processed_calcs >= total_calcs:
886
            # this can easily be called multiple times, e.g. upload finished after all calcs finished
887
888
889
            modified_upload = self._get_collection().find_one_and_update(
                {'_id': self.upload_id, 'joined': {'$ne': True}},
                {'$set': {'joined': True}})
890
891
892
893
894
895
            if modified_upload is not None:
                self.get_logger().debug('join')
                self.cleanup()
            else:
                # the join was already done due to a prior call
                pass
896

897
898
899
    def reset(self):
        self.joined = False
        super().reset()
Markus Scheidgen's avatar
Markus Scheidgen committed
900

901
902
903
904
905
906
    @classmethod
    def reset_pymongo_update(cls, worker_hostname: str = None):
        update = super().reset_pymongo_update()
        update.update(joined=False)
        return update

907
    def _cleanup_after_processing(self):
908
909
910
911
912
913
        # send email about process finish
        user = self.uploader
        name = '%s %s' % (user.first_name, user.last_name)
        message = '\n'.join([
            'Dear %s,' % name,
            '',
914
            'your data %suploaded at %s has completed processing.' % (
915
                '"%s" ' % self.name if self.name else '', self.upload_time.isoformat()),  # pylint: disable=no-member
916
            'You can review your data on your upload page: %s' % config.gui_url(),
917
918
919
920
            '',
            'If you encouter any issues with your upload, please let us know and replay to this email.',
            '',
            'The nomad team'
921
        ])
922
923
924
925
926
927
928
        try:
            infrastructure.send_mail(
                name=name, email=user.email, message=message, subject='Processing completed')
        except Exception as e:
            # probably due to email configuration problems
            # don't fail or present this error to clients
            self.logger.error('could not send after processing email', exc_info=e)
Markus Scheidgen's avatar
Markus Scheidgen committed
929

930
931
932
933
934
935
936
937
938
939
    def _cleanup_after_re_processing(self):
        logger = self.get_logger()
        logger.info('started to repack re-processed upload')

        staging_upload_files = self.upload_files.to_staging_upload_files()

        with utils.timer(
                logger, 'reprocessed staged upload packed', step='delete staged',
                upload_size=self.upload_files.size):

940
            staging_upload_files.pack(self.to_upload_with_metadata(), skip_raw=True)
941
942
943
944
945
946

        with utils.timer(
                logger, 'reprocessed staged upload deleted', step='delete staged',
                upload_size=self.upload_files.size):

            staging_upload_files.delete()
947
            self.last_update = datetime.utcnow()
948
949
950
951
            self.save()

    @task
    def cleanup(self):
Markus Scheidgen's avatar
Markus Scheidgen committed
952
953
954
955
        """
        The *task* that "cleans" the processing, i.e. removed obsolete files and performs
        pending archival operations. Depends on the type of processing.
        """
956
957
958
959
960
961
962
        search.refresh()

        if self.current_process == 're_process_upload':
            self._cleanup_after_re_processing()
        else:
            self._cleanup_after_processing()

963
    def get_calc(self, calc_id) -> Calc:
Markus Scheidgen's avatar
Markus Scheidgen committed
964
        """ Returns the upload calc with the given id or ``None``. """
965
966
        return Calc.objects(upload_id=self.upload_id, calc_id=calc_id).first()

Markus Scheidgen's avatar
Markus Scheidgen committed
967
    @property
968
    def processed_calcs(self):