data.py 14.6 KB
Newer Older
Markus Scheidgen's avatar
Markus Scheidgen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Copyright 2018 Markus Scheidgen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
This module comprises a set of persistent document classes that hold all user related
data. These are information about users, their uploads and datasets, the associated
calculations, and files


.. autoclass:: Calc
    :members:
.. autoclass:: Upload
    :members:
.. autoclass:: DataSet
.. autoclass:: User

"""

from typing import List, Any
import sys
from datetime import datetime
33
from elasticsearch.exceptions import NotFoundError
Markus Scheidgen's avatar
Markus Scheidgen committed
34
35
36
37
38
39
40
from mongoengine import \
    Document, EmailField, StringField, BooleanField, DateTimeField, \
    ListField, DictField, ReferenceField, IntField, connect
import mongoengine.errors
import logging

from nomad import config, files, utils
Markus Scheidgen's avatar
Markus Scheidgen committed
41
from nomad.repo import RepoCalc
Markus Scheidgen's avatar
Markus Scheidgen committed
42
from nomad.user import User, me
43
from nomad.processing.base import Proc, Chord, process, task, PENDING, SUCCESS, FAILURE, RUNNING
Markus Scheidgen's avatar
Markus Scheidgen committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from nomad.parsing import LocalBackend, parsers, parser_dict
from nomad.normalizing import normalizers
from nomad.utils import get_logger, lnr


class NotAllowedDuringProcessing(Exception): pass


class Calc(Proc):
    """
    Instances of this class represent calculations. This class manages the elastic
    search index entry, files, and archive for the respective calculation.

    It also contains the calculations processing and its state.

    The attribute list, does not include the various repository properties generated
    while parsing, including ``program_name``, ``program_version``, etc.

    Attributes:
        archive_id: the hash based archive id of the calc
        parser: the name of the parser used to process this calc
        upload_id: the id of the upload used to create this calculation
        mainfile: the mainfile (including path in upload) that was used to create this calc
        mainfile_tmp_path: path to the mainfile extracted for processing
    """
    archive_id = StringField(primary_key=True)
    upload_id = StringField()
    mainfile = StringField()
    parser = StringField()
    mainfile_tmp_path = StringField()

    meta: Any = {
        'indices': [
77
            'upload_id', 'mainfile', 'code', 'parser', 'status'
Markus Scheidgen's avatar
Markus Scheidgen committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
        ]
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._parser_backend = None
        self._upload = None

    @classmethod
    def get(cls, id):
        return cls.get_by_id(id, 'archive_id')

    def delete(self):
        """
        Delete this calculation and all associated data. This includes all files,
        the archive, and this search index entry.
94
        TODO is this needed? Or do we always delete hole uploads in bulk.
Markus Scheidgen's avatar
Markus Scheidgen committed
95
96
97
98
99
100
        """
        # delete the archive
        if self.archive_id is not None:
            files.delete_archive(self.archive_id)

        # delete the search index entry
101
102
103
104
105
106
        try:
            elastic_entry = RepoCalc.get(self.archive_id)
            if elastic_entry is not None:
                elastic_entry.delete()
        except NotFoundError:
            pass
Markus Scheidgen's avatar
Markus Scheidgen committed
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

        # delete this mongo document
        super().delete()

    def get_logger(self, **kwargs):
        upload_hash, calc_hash = self.archive_id.split('/')
        logger = super().get_logger()
        logger = logger.bind(
            upload_id=self.upload_id, mainfile=self.mainfile,
            upload_hash=upload_hash, calc_hash=calc_hash, **kwargs)
        return logger

    @property
    def json_dict(self):
        """ A json serializable dictionary representation. """
        data = {
            'archive_id': self.archive_id,
            'mainfile': self.mainfile,
            'upload_id': self.upload_id,
            'parser': self.parser
        }
        data.update(super().json_dict)
        return {key: value for key, value in data.items() if value is not None}

    @process
    def process(self):
        self._upload = Upload.get(self.upload_id)
        if self._upload is None:
            get_logger().error('calculation upload does not exist')

        try:
            self.parsing()
            self.normalizing()
            self.archiving()
        finally:
142
            self._upload.completed_child()
Markus Scheidgen's avatar
Markus Scheidgen committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165

    @task
    def parsing(self):
        self._parser_backend = parser_dict[self.parser].run(self.mainfile_tmp_path)
        if self._parser_backend.status[0] != 'ParseSuccess':
            error = self._parser_backend.status[1]
            self.fail(error, level=logging.DEBUG)

    @task
    def normalizing(self):
        for normalizer in normalizers:
            normalizer_name = normalizer.__name__
            normalizer(self._parser_backend).normalize()
            if self._parser_backend.status[0] != 'ParseSuccess':
                error = self._parser_backend.status[1]
                self.fail(error, normalizer=normalizer_name, level=logging.WARNING)
                return
            self.get_logger().debug(
                'completed normalizer successfully', normalizer=normalizer_name)

    @task
    def archiving(self):
        upload_hash, calc_hash = self.archive_id.split('/')
166
        additional = dict(
Markus Scheidgen's avatar
Markus Scheidgen committed
167
            mainfile=self.mainfile,
168
169
170
171
            upload_time=self._upload.upload_time,
            staging=True,
            restricted=False,
            user_id=self._upload.user_id)
172
173
174
175
176
177
178
        # persist to elastic search
        RepoCalc.create_from_backend(
            self._parser_backend,
            additional=additional,
            upload_hash=upload_hash,
            calc_hash=calc_hash,
            upload_id=self.upload_id)
Markus Scheidgen's avatar
Markus Scheidgen committed
179
180
181
182
183
184

        # persist the archive
        with files.write_archive_json(self.archive_id) as out:
            self._parser_backend.write_json(out, pretty=True)


185
class Upload(Chord):
Markus Scheidgen's avatar
Markus Scheidgen committed
186
187
188
189
190
191
192
193
194
195
196
197
198
    """
    Represents uploads in the databases. Provides persistence access to the files storage,
    and processing state.

    Attributes:
        name: optional user provided upload name
        additional_metadata: optional user provided additional meta data
        upload_id: the upload id generated by the database
        in_staging: true if the upload is still in staging and can be edited by the uploader
        is_private: true if the upload and its derivitaves are only visible to the uploader
        presigned_url: the presigned url for file upload
        upload_time: the timestamp when the system realised the upload
        upload_hash: the hash of the uploaded file
199
        user_id: the id of the user that created this upload
Markus Scheidgen's avatar
Markus Scheidgen committed
200
201
202
203
204
205
206
207
208
209
210
211
    """
    id_field = 'upload_id'

    upload_id = StringField(primary_key=True)

    name = StringField(default=None)
    additional_metadata = DictField(default=None)

    in_staging = BooleanField(default=True)
    is_private = BooleanField(default=False)

    presigned_url = StringField()
212
    upload_command = StringField()
Markus Scheidgen's avatar
Markus Scheidgen committed
213
214
215
    upload_time = DateTimeField()
    upload_hash = StringField(default=None)

216
    user_id = StringField(required=True)
Markus Scheidgen's avatar
Markus Scheidgen committed
217

218
219
    _initiated_parsers = IntField(default=-1)

Markus Scheidgen's avatar
Markus Scheidgen committed
220
221
    meta: Any = {
        'indexes': [
222
            'upload_hash', 'user_id', 'status'
Markus Scheidgen's avatar
Markus Scheidgen committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        ]
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._upload = None

    @classmethod
    def get(cls, id):
        return cls.get_by_id(id, 'upload_id')

    @classmethod
    def user_uploads(cls, user: User) -> List['Upload']:
        """ Returns all uploads for the given user. Currently returns all uploads. """
Markus Scheidgen's avatar
Markus Scheidgen committed
237
        return cls.objects(user_id=user.email, in_staging=True)
Markus Scheidgen's avatar
Markus Scheidgen committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261

    def get_logger(self, **kwargs):
        logger = super().get_logger()
        logger = logger.bind(upload_id=self.upload_id, **kwargs)
        return logger

    def delete(self):
        logger = self.get_logger(task='delete')

        if not (self.completed or self.is_stale or self.current_task == 'uploading'):
            raise NotAllowedDuringProcessing()

        with lnr(logger, 'delete upload file'):
            try:
                files.Upload(self.upload_id).delete()
            except KeyError:
                if self.current_task == 'uploading':
                    logger.debug(
                        'Upload exist, but file does not exist. '
                        'It was probably aborted and deleted.')
                else:
                    logger.debug('Upload exist, but uploaded file does not exist.')

        with lnr(logger, 'deleting calcs'):
262
263
264
265
            # delete archive files
            files.delete_archives(upload_hash=self.upload_hash)

            # delete repo entries
Markus Scheidgen's avatar
Markus Scheidgen committed
266
            RepoCalc.delete_upload(upload_id=self.upload_id)
267
268
269

            # delete calc processings
            Calc.objects(upload_id=self.upload_id).delete()
Markus Scheidgen's avatar
Markus Scheidgen committed
270
271
272
273

        with lnr(logger, 'deleting upload'):
            super().delete()

274
275
276
277
278
279
280
281
282
283
284
285
286
    @classmethod
    def _external_objects_url(cls, url):
        """ Replaces the given internal object storage url (minio) with an URL that allows
            external access.
        """
        port_with_colon = ''
        if config.services.objects_port > 0:
            port_with_colon = ':%d' % config.services.objects_port

        return url.replace(
            '%s:%s' % (config.minio.host, config.minio.port),
            '%s%s%s' % (config.services.objects_host, port_with_colon, config.services.objects_base_path))

Markus Scheidgen's avatar
Markus Scheidgen committed
287
288
289
290
291
292
293
294
    @classmethod
    def create(cls, **kwargs) -> 'Upload':
        """
        Creates a new upload for the given user, a user given name is optional.
        It will populate the record with a signed url and pending :class:`UploadProc`.
        The upload will be already saved to the database.
        """
        self = super().create(**kwargs)
295
        self.presigned_url = cls._external_objects_url(files.get_presigned_upload_url(self.upload_id))
296
        self.upload_command = files.create_curl_upload_cmd(self.presigned_url, 'your_file')
Markus Scheidgen's avatar
Markus Scheidgen committed
297
298
299
300
301
302
303
304
305
306
        self._continue_with('uploading')
        return self

    @property
    def is_stale(self) -> bool:
        if self.current_task == 'uploading' and self.upload_time is None:
            return (datetime.now() - self.create_time).days > 1
        else:
            return False

Markus Scheidgen's avatar
Markus Scheidgen committed
307
    def unstage(self):
308
        self.get_logger().info('unstage')
Markus Scheidgen's avatar
Markus Scheidgen committed
309
        self.in_staging = False
310
        RepoCalc.unstage(upload_id=self.upload_id)
Markus Scheidgen's avatar
Markus Scheidgen committed
311
312
        self.save()

Markus Scheidgen's avatar
Markus Scheidgen committed
313
314
315
316
317
318
319
    @property
    def json_dict(self) -> dict:
        """ A json serializable dictionary representation. """
        data = {
            'name': self.name,
            'additional_metadata': self.additional_metadata,
            'upload_id': self.upload_id,
320
321
            'presigned_url': self.presigned_url,
            'upload_command': self.upload_command,
Markus Scheidgen's avatar
Markus Scheidgen committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
            'upload_time': self.upload_time.isoformat() if self.upload_time is not None else None,
            'is_stale': self.is_stale,
        }
        data.update(super().json_dict)
        return {key: value for key, value in data.items() if value is not None}

    @process
    def process(self):
        self.extracting()
        self.parse_all()

    @task
    def uploading(self):
        pass

    @task
    def extracting(self):
        logger = self.get_logger()
        try:
            self._upload = files.Upload(self.upload_id)
            self._upload.open()
            logger.debug('opened upload')
        except KeyError as e:
            self.fail('process request for non existing upload', level=logging.INFO)
            return

        try:
            self.upload_hash = self._upload.hash()
        except files.UploadError as e:
            self.fail('could not create upload hash', e)
            return

Markus Scheidgen's avatar
Markus Scheidgen committed
354
        if RepoCalc.upload_exists(self.upload_hash):
Markus Scheidgen's avatar
Markus Scheidgen committed
355
356
357
358
359
360
            self.fail('The same file was already uploaded and processed.', level=logging.INFO)
            return

    @task
    def parse_all(self):
        # TODO: deal with multiple possible parser specs
361
        total_calcs = 0
Markus Scheidgen's avatar
Markus Scheidgen committed
362
363
364
365
366
367
368
369
370
371
372
373
        for filename in self._upload.filelist:
            for parser in parsers:
                try:
                    if parser.is_mainfile(filename, lambda fn: self._upload.open_file(fn)):
                        tmp_mainfile = self._upload.get_path(filename)
                        calc = Calc.create(
                            archive_id='%s/%s' % (self.upload_hash, utils.hash(filename)),
                            mainfile=filename, parser=parser.name,
                            mainfile_tmp_path=tmp_mainfile,
                            upload_id=self.upload_id)

                        calc.process()
374
                        total_calcs += 1
Markus Scheidgen's avatar
Markus Scheidgen committed
375
                except Exception as e:
376
                    self.warning(
Markus Scheidgen's avatar
Markus Scheidgen committed
377
378
379
                        'exception while matching pot. mainfile',
                        mainfile=filename, exc_info=e)

380
381
382
383
384
        # have to save the total_calcs information for chord management
        self.spwaned_childred(total_calcs)

    def join(self):
        self.cleanup()
Markus Scheidgen's avatar
Markus Scheidgen committed
385
386
387
388
389
390
391
392
393
394
395
396
397

    @task
    def cleanup(self):
        try:
            upload = files.Upload(self.upload_id)
        except KeyError as e:
            upload_proc.fail('Upload does not exist', exc_info=e)
            return

        upload.close()
        self.get_logger().debug('closed upload')

    @property
398
399
400
401
402
403
404
405
406
407
408
    def processed_calcs(self):
        return Calc.objects(upload_id=self.upload_id, status__in=[SUCCESS, FAILURE]).count()

    @property
    def total_calcs(self):
        return Calc.objects(upload_id=self.upload_id).count()

    @property
    def failed_calcs(self):
        return Calc.objects(upload_id=self.upload_id, status=FAILURE).count()

409
410
411
412
    @property
    def pending_calcs(self):
        return Calc.objects(upload_id=self.upload_id, status=PENDING).count()

413
414
    def all_calcs(self, start, end, order_by='mainfile'):
        return Calc.objects(upload_id=self.upload_id)[start:end].order_by(order_by)
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

    @staticmethod
    def repair_all():
        """
        Utitlity function that will look for suspiciously looking conditions in
        all uncompleted downloads. It ain't a perfect world.
        """
        uploads = Upload.objects(status__in=[PENDING, RUNNING])
        for upload in uploads:
            completed = upload.processed_calcs
            total = upload.total
            pending = upload.pending_calcs

            if completed + pending == total:
                time.sleep(2)
                if pending == upload.pending_calcs:
                    Calc.objects(upload_id=upload.upload_id, status=PENDING).delete()