admin.py 21.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2018 Markus Scheidgen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Markus Scheidgen's avatar
Markus Scheidgen committed
14
import typing
15
import click
16
import datetime
Markus Scheidgen's avatar
Markus Scheidgen committed
17
18
import elasticsearch_dsl
import elasticsearch
19
import sys
20
21
22
23
import io
import re
import uuid
import json
24
import threading
25
26
27

import numpy as np
import requests
Markus Scheidgen's avatar
Markus Scheidgen committed
28
29
import ase
import bs4
30
import matid
31

32
from nomad import processing as proc, search, datamodel, infrastructure, utils, config
33
from nomad import atomutils
34
35
36
from nomad.cli.cli import cli


Markus Scheidgen's avatar
Markus Scheidgen committed
37
38
def __run_parallel(
        uploads, parallel: int, callable, label: str):
39
40
41
42
43
44
45
46
    if isinstance(uploads, (tuple, list)):
        uploads_count = len(uploads)

    else:
        uploads_count = uploads.count()
        uploads = list(uploads)  # copy the whole mongo query set to avoid cursor timeouts

    cv = threading.Condition()
Markus Scheidgen's avatar
Markus Scheidgen committed
47
    threads: typing.List[threading.Thread] = []
48
49
50
51
52
53
54
55
56
57
58
59
60
61

    state = dict(
        completed_count=0,
        skipped_count=0,
        available_threads_count=parallel)

    logger = utils.get_logger(__name__)

    print('%d uploads selected, %s ...' % (uploads_count, label))

    def process_upload(upload: proc.Upload):
        logger.info('%s started' % label, upload_id=upload.upload_id)

        completed = False
Markus Scheidgen's avatar
Markus Scheidgen committed
62
        if callable(upload, logger):
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
            completed = True

        with cv:
            state['completed_count'] += 1 if completed else 0
            state['skipped_count'] += 1 if not completed else 0
            state['available_threads_count'] += 1

            print(
                '   %s %s and skipped %s of %s uploads' %
                (label, state['completed_count'], state['skipped_count'], uploads_count))

            cv.notify()

    for upload in uploads:
        with cv:
            cv.wait_for(lambda: state['available_threads_count'] > 0)
            state['available_threads_count'] -= 1
            thread = threading.Thread(target=lambda: process_upload(upload))
            threads.append(thread)
            thread.start()

    for thread in threads:
        thread.join()


Markus Scheidgen's avatar
Markus Scheidgen committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
def __run_processing(
        uploads, parallel: int, process, label: str, reprocess_running: bool = False):

    def run_process(upload, logger):
        if upload.process_running and not reprocess_running:
            logger.warn(
                'cannot trigger %s, since the upload is already/still processing' % label,
                current_process=upload.current_process,
                current_task=upload.current_task, upload_id=upload.upload_id)
            return False
        else:
            upload.reset()
            process(upload)
            upload.block_until_complete(interval=.5)

            if upload.tasks_status == proc.FAILURE:
                logger.info('%s with failure' % label, upload_id=upload.upload_id)

            logger.info('%s complete' % label, upload_id=upload.upload_id)
            return True

    __run_parallel(uploads, parallel=parallel, callable=run_process, label=label)


112
113
114
115
116
@cli.group(help='''The nomad admin commands to do nasty stuff directly on the databases.
                     Remember: With great power comes great responsibility!''')
@click.pass_context
def admin(ctx):
    pass
117
118


119
120
121
122
123
124
125
126
127
128
129
130
131
132
@admin.command(help='Reset/remove all databases.')
@click.option('--remove', is_flag=True, help='Do not just reset all dbs, but also remove them.')
@click.option('--i-am-really-sure', is_flag=True, help='Must be set for the command to to anything.')
def reset(remove, i_am_really_sure):
    if not i_am_really_sure:
        print('You do not seem to be really sure about what you are doing.')
        sys.exit(1)

    infrastructure.setup_mongo()
    infrastructure.setup_elastic()

    infrastructure.reset(remove)


133
134
135
136
137
138
@admin.command(help='Reset all "stuck" in processing uploads and calc in low level mongodb operations.')
@click.option('--zero-complete-time', is_flag=True, help='Sets the complete time to epoch zero.')
def reset_processing(zero_complete_time):
    infrastructure.setup_mongo()

    def reset_collection(cls):
139
        in_processing = cls.objects(process_status__in=[proc.PROCESS_RUNNING, proc.base.PROCESS_CALLED])
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
        print('%d %s processes need to be reset due to incomplete process' % (in_processing.count(), cls.__name__))
        in_processing.update(
            process_status=None,
            current_process=None,
            worker_hostname=None,
            celery_task_id=None,
            errors=[], warnings=[],
            complete_time=datetime.datetime.fromtimestamp(0) if zero_complete_time else datetime.datetime.now(),
            current_task=None,
            tasks_status=proc.base.CREATED)

        in_tasks = cls.objects(tasks_status__in=[proc.PENDING, proc.RUNNING])
        print('%d %s processes need to be reset due to incomplete tasks' % (in_tasks.count(), cls.__name__))
        in_tasks.update(
            current_task=None,
            tasks_status=proc.base.CREATED,
            errors=[], warnings=[],
            complete_time=datetime.datetime.fromtimestamp(0) if zero_complete_time else datetime.datetime.now())

    reset_collection(proc.Calc)
    reset_collection(proc.Upload)


163
164
165
166
167
168
169
170
@admin.command(help='Check and lift embargo of data with expired embargo period.')
@click.option('--dry', is_flag=True, help='Do not lift the embargo, just show what needs to be done.')
@click.option('--parallel', default=1, type=int, help='Use the given amount of parallel processes. Default is 1.')
def lift_embargo(dry, parallel):
    infrastructure.setup_mongo()
    infrastructure.setup_elastic()

    request = search.SearchRequest()
Markus Scheidgen's avatar
Markus Scheidgen committed
171
    request.q = elasticsearch_dsl.Q('term', with_embargo=True) & elasticsearch_dsl.Q('term', published=True)
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
    request.quantity('upload_id', 1000)
    result = request.execute()

    uploads_to_repack = []
    for upload_id in result['quantities']['upload_id']['values']:
        upload = proc.Upload.get(upload_id)
        embargo_length = upload.embargo_length
        if embargo_length is None:
            embargo_length = 36
            upload.embargo_length = 36

        if upload.upload_time + datetime.timedelta(days=int(embargo_length * 365 / 12)) < datetime.datetime.now():
            print('need to lift the embargo of %s (upload_time=%s, embargo=%d)' % (
                upload.upload_id, upload.upload_time, embargo_length))

            if not dry:
                proc.Calc._get_collection().update_many(
                    {'upload_id': upload_id},
                    {'$set': {'metadata.with_embargo': False}})
                uploads_to_repack.append(upload)
                upload.save()

194
195
                with upload.entries_metadata() as entries:
                    search.index_all(entries)
196
197
198
199
200

    if not dry:
        __run_processing(uploads_to_repack, parallel, lambda upload: upload.re_pack(), 're-packing')


201
@admin.command(help='(Re-)index all calcs.')
202
@click.option('--threads', type=int, default=1, help='Number of threads to use.')
203
204
@click.option('--dry', is_flag=True, help='Do not index, just compute entries.')
def index(threads, dry):
205
206
207
208
209
210
    infrastructure.setup_mongo()
    infrastructure.setup_elastic()

    all_calcs = proc.Calc.objects().count()
    print('indexing %d ...' % all_calcs)

211
    def elastic_updates():
212
213
214
        with utils.ETA(all_calcs, '   index %10d or %10d calcs, ETA %s') as eta:
            for calc in proc.Calc.objects():
                eta.add()
215
216
                entry_metadata = datamodel.EntryMetadata.m_from_dict(calc.metadata)
                entry = entry_metadata.a_elastic.create_index_entry().to_dict(include_meta=True)
217
218
                entry['_op_type'] = 'index'
                yield entry
Markus Scheidgen's avatar
Markus Scheidgen committed
219

220
221
222
    if dry:
        for _ in elastic_updates():
            pass
223
    if threads > 1:
224
        print('  use %d threads' % threads)
225
        for _ in elasticsearch.helpers.parallel_bulk(
226
                infrastructure.elastic_client, elastic_updates(), chunk_size=500,
227
228
                thread_count=threads):
            pass
229
    else:
230
231
232
        elasticsearch.helpers.bulk(
            infrastructure.elastic_client, elastic_updates())
    search.refresh()
233

234
    print('')
235
    print('indexing completed')
236
237


238
@admin.group(help='Generate scripts and commands for nomad operation.')
239
240
241
242
def ops():
    pass


243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
@ops.command(help=('Dump the mongo (calculation metadata) db.'))
@click.option('--restore', is_flag=True, help='Do not dump, but restore.')
def dump(restore: bool):
    date_str = datetime.datetime.utcnow().strftime('%Y_%m_%d')
    print('mongodump --host {} --port {} --db {} -o /backup/fairdi/mongo/{}'.format(
        config.mongo.host, config.mongo.port, config.mongo.db_name, date_str))


@ops.command(help=('Restore the mongo (calculation metadata) db.'))
@click.argument('PATH_TO_DUMP', type=str, nargs=1)
def restore(path_to_dump):
    print('mongorestore --host {} --port {} --db {} {}'.format(
        config.mongo.host, config.mongo.port, config.mongo.db_name, path_to_dump))


258
259
260
261
262
263
264
265
266
267
@ops.command(help=('Generate an nginx.conf to serve the GUI and proxy pass to API container.'))
@click.option('--prefix', type=str, default='/example_nomad', help='Url path prefix. Default is /example_nomd, can be empty str.')
def nginx_conf(prefix):
    prefix = prefix.rstrip('/')
    prefix = '/%s' % prefix.lstrip('/')

    print('''\
server {{
    listen        80;
    server_name   www.example.com;
268
    proxy_set_header Host $host;
269

270
271
    location / {{
        proxy_pass http://app:8000;
272
273
    }}

274
275
    location ~ {1}\\/?(gui)?$ {{
        rewrite ^ {1}/gui/ permanent;
276
277
    }}

278
279
280
281
282
283
284
285
286
    location {1}/gui/ {{
        proxy_intercept_errors on;
        error_page 404 = @redirect_to_index;
        proxy_pass http://app:8000;
    }}

    location @redirect_to_index {{
        rewrite ^ {1}/gui/index.html break;
        proxy_pass http://app:8000;
287
288
    }}

289
    location ~ \\/gui\\/(service-worker\\.js|meta\\.json)$ {{
290
291
292
293
294
        add_header Last-Modified $date_gmt;
        add_header Cache-Control 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
        if_modified_since off;
        expires off;
        etag off;
295
        proxy_pass http://app:8000;
296
297
    }}

298
    location ~ \\/api\\/uploads\\/?$ {{
299
300
        client_max_body_size 35g;
        proxy_request_buffering off;
301
        proxy_pass http://app:8000;
302
303
    }}

304
    location ~ \\/api\\/(raw|archive) {{
305
        proxy_buffering off;
306
        proxy_pass http://app:8000;
307
308
    }}

309
    location ~ \\/api\\/mirror {{
310
        proxy_buffering off;
311
312
        proxy_read_timeout 600;
        proxy_pass http://app:8000;
313
    }}
314
}}'''.format(prefix))
315
316
317


@ops.command(help=('Generate a proxy pass config for apache2 reverse proxy servers.'))
318
@click.option('--prefix', type=str, default='app', help='The path prefix under which everything is proxy passed.')
319
320
@click.option('--host', type=str, default='130.183.207.104', help='The host to proxy to.')
@click.option('--port', type=str, default='30001', help='The port to proxy to.')
321
322
def apache_conf(prefix, host, port):
    print('''\
323
324
325
326
ProxyPass "/{0}" "http://{1}:{2}/{0}"
ProxyPassReverse "/{0}" "http://{1}:{2}/{0}"
<Proxy http://{1}:{2}/{0}>
    ProxyPreserveHost On
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    <IfModule !mod_access_compat.c>
         Require all granted
     </IfModule>
     <IfModule mod_access_compat.c>
         Order allow,deny
         Allow from all
     </IfModule>
</Proxy>

RequestHeader set "X-Forwarded-Proto" expr=%{{REQUEST_SCHEME}}
RequestHeader set "X-Forwarded-SSL" expr=%{{HTTPS}}

ProxyPass /fairdi/keycloak http://{1}:8002/fairdi/keycloak
ProxyPassReverse /fairdi/keycloak http://{1}:8002/fairdi/keycloak
<Proxy http://{1}:8002/app>
     ProxyPreserveHost On
     <IfModule !mod_access_compat.c>
         Require all granted
     </IfModule>
     <IfModule mod_access_compat.c>
         Order allow,deny
         Allow from all
     </IfModule>
</Proxy>

RewriteEngine on
RewriteCond %{QUERY_STRING} ^pid=([^&]+)$
RewriteRule ^/NomadRepository-1.1/views/calculation.zul$ /{0}/gui/entry/pid/%1? [R=301]

AllowEncodedSlashes On
357
'''.format(prefix, host, port))  # type: ignore
358
359
360


def write_prototype_data_file(aflow_prototypes: dict, filepath) -> None:
361
    '''Writes the prototype data file in a compressed format to a python
362
363
364
365
    module.

    Args:
        aflow_prototypes
366
    '''
367
368
369
370
371
    class NoIndent(object):
        def __init__(self, value):
            self.value = value

    class NoIndentEncoder(json.JSONEncoder):
372
        '''A custom JSON encoder that can pretty-print objects wrapped in the
373
        NoIndent class.
374
        '''
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
        def __init__(self, *args, **kwargs):
            super(NoIndentEncoder, self).__init__(*args, **kwargs)
            self.kwargs = dict(kwargs)
            del self.kwargs['indent']
            self._replacement_map = {}

        def default(self, o):  # pylint: disable=E0202
            if isinstance(o, NoIndent):
                key = uuid.uuid4().hex
                self._replacement_map[key] = json.dumps(o.value, **self.kwargs)
                return "@@%s@@" % (key,)
            else:
                return super(NoIndentEncoder, self).default(o)

        def encode(self, o):
            result = super(NoIndentEncoder, self).encode(o)
            for k, v in self._replacement_map.items():
                result = result.replace('"@@%s@@"' % (k,), v)
            return result

    prototype_dict = aflow_prototypes["prototypes_by_spacegroup"]
    for prototypes in prototype_dict.values():
        for prototype in prototypes:
            # Save the information back in a prettified form
            prototype["atom_positions"] = NoIndent(prototype["atom_positions"])
            prototype["atom_labels"] = NoIndent(prototype["atom_labels"])
            prototype["lattice_vectors"] = NoIndent(prototype["lattice_vectors"])
            try:
                prototype["normalized_wyckoff_matid"] = NoIndent(prototype["normalized_wyckoff_matid"])
            except KeyError:
                pass

    # Save the updated data
    with io.open(filepath, "w", encoding="utf8") as f:
        json_dump = json.dumps(aflow_prototypes, ensure_ascii=False, indent=4, sort_keys=True, cls=NoIndentEncoder)
        json_dump = re.sub(r"\"(-?\d+(?:[\.,]\d+)?)\"", r'\1', json_dump)  # Removes quotes around numbers
        f.write("# -*- coding: utf-8 -*-\naflow_prototypes = {}\n".format(json_dump))


@ops.command(help='Updates the AFLOW prototype information using the latest online version and writes the results to a python module in the given FILEPATH.')
@click.argument('FILEPATH', nargs=1, type=str)
@click.option('--matches-only', is_flag=True, help='Only update the match information that depends on the symmetry analysis settings. Will not perform and online update.')
@click.pass_context
def prototypes_update(ctx, filepath, matches_only):

    if matches_only:
421
        from nomad.aflow_prototypes import aflow_prototypes
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
    else:
        # The basic AFLOW prototype data is available in a Javascript file. Here we
        # retrieve it and read only the prototype list from it.
        prototypes_file_url = 'http://aflowlib.org/CrystalDatabase/js/table_sort.js'
        r = requests.get(prototypes_file_url, allow_redirects=True)
        datastring = r.content.decode("utf-8")
        datastring = datastring.split('];')[0]
        datastring = datastring.split('= [')[1]
        data = json.loads('[' + datastring + ']')

        newdictarray = []
        n_prototypes = 0
        n_missing = 0
        for protodict in data:
            n_prototypes += 1
            newdict = {}

            # Make prototype plaintext
Markus Scheidgen's avatar
Markus Scheidgen committed
440
            prototype = bs4.BeautifulSoup(protodict["Prototype"], "html5lib").getText()
441
442
443
444
445
446
447
448
449
450
451

            # Add to new dictionary
            newdict['Notes'] = protodict['Notes']
            newdict['Prototype'] = prototype
            newdict['Space Group Symbol'] = protodict['Space Group Symbol']
            newdict['Space Group Number'] = protodict['Space Group Number']
            newdict['Pearsons Symbol'] = protodict['Pearson Symbol']
            newdict['Strukturbericht Designation'] = protodict['Strukturbericht Designation']
            newdict['aflow_prototype_id'] = protodict['AFLOW Prototype']
            newdict['aflow_prototype_url'] = 'http://www.aflowlib.org/CrystalDatabase/' + protodict['href'][2:]

Markus Scheidgen's avatar
Markus Scheidgen committed
452
            # Download cif or poscar if possible make ASE ase.Atoms object if possible
453
454
455
456
            # to obtain labels, positions, cell
            cifurl = 'http://www.aflowlib.org/CrystalDatabase/CIF/' + protodict['href'][2:-5] + '.cif'
            r = requests.get(cifurl, allow_redirects=True)
            cif_str = r.content.decode("utf-8")
Markus Scheidgen's avatar
Markus Scheidgen committed
457
            cif_file = io.StringIO()
458
459
460
461
462
463
464
465
466
467
468
            cif_file.write(cif_str)
            cif_file.seek(0)
            try:
                atoms = ase.io.read(cif_file, format='cif')
            except Exception:
                print("Error in getting prototype structure from CIF: {}", format(cifurl))
                # Then try to get structure from POSCAR
                try:
                    poscarurl = 'http://www.aflowlib.org/CrystalDatabase/POSCAR/' + protodict['href'][2:-5] + '.poscar'
                    r = requests.get(poscarurl, allow_redirects=True)
                    poscar_str = r.content.decode("utf-8")
Markus Scheidgen's avatar
Markus Scheidgen committed
469
                    poscar_file = io.StringIO()
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
                    poscar_file.write(poscar_str)
                    poscar_file.seek(0)
                    atoms = ase.io.read(poscar_file, format='vasp')
                except Exception:
                    print("Error in getting prototype structure from POSCAR: {}".format(poscarurl))
                    print("Could not read prototype structure from CIF or POSCAR file for prototype: {}, {}, ".format(prototype, newdict['aflow_prototype_url']))
                    n_missing += 1
                    continue

            atom_positions = atoms.get_positions()
            atom_labels = atoms.get_chemical_symbols()
            cell = atoms.get_cell()

            newdict['lattice_vectors'] = cell.tolist()
            newdict['atom_positions'] = atom_positions.tolist()
            newdict['atom_labels'] = atom_labels
            newdictarray.append(newdict)

            print("Processed: {}".format(len(newdictarray)))

        # Sort prototype dictionaries by spacegroup and make dictionary
        structure_types_by_spacegroup = {}
        for i_sg in range(1, 231):
            protos_sg = []
            for newdict in newdictarray:
                if newdict['Space Group Number'] == i_sg:
                    protos_sg.append(newdict)
            structure_types_by_spacegroup[i_sg] = protos_sg

        # Wrap in a dictionary that can hold other data, e.g. the symmemtry tolerance parameter.
        aflow_prototypes = {
            "prototypes_by_spacegroup": structure_types_by_spacegroup
        }
        print(
            "Extracted latest AFLOW prototypes online. Total number of "
            "successfully fetched prototypes: {}, missing: {}"
            .format(n_prototypes, n_missing)
        )

    # Update matches
    n_prototypes = 0
    n_failed = 0
    n_unmatched = 0
    prototype_dict = aflow_prototypes["prototypes_by_spacegroup"]

    for aflow_spg_number, prototypes in prototype_dict.items():
        n_prototypes += len(prototypes)
        for prototype in prototypes:

            # Read prototype structure
            pos = np.array(prototype["atom_positions"])
            labels = prototype["atom_labels"]
            cell = np.array(prototype["lattice_vectors"])
Markus Scheidgen's avatar
Markus Scheidgen committed
523
            atoms = ase.Atoms(
524
525
526
527
528
529
530
531
                symbols=labels,
                positions=pos,
                cell=cell,
                pbc=True
            )

            # Try to first see if the space group can be matched with the one in AFLOW
            try:
532
                symm = matid.SymmetryAnalyzer(atoms, config.normalize.prototype_symmetry_tolerance)
533
534
535
536
537
538
539
540
541
542
                spg_number = symm.get_space_group_number()
                wyckoff_matid = symm.get_wyckoff_letters_conventional()
                norm_system = symm.get_conventional_system()
            except Exception:
                n_failed += 1
            else:
                # If the space group is matched, add the MatID normalized Wyckoff
                # letters to the data.
                if spg_number == aflow_spg_number:
                    atomic_numbers = norm_system.get_atomic_numbers()
543
                    normalized_wyckoff_matid = atomutils.get_normalized_wyckoff(atomic_numbers, wyckoff_matid)
544
545
546
547
548
549
550
551
552
553
554
                    prototype["normalized_wyckoff_matid"] = normalized_wyckoff_matid
                else:
                    n_unmatched += 1
    print(
        "Updated matches in AFLOW prototype library. Total number of "
        "prototypes: {}, unmatched: {}, failed: {}"
        .format(n_prototypes, n_unmatched, n_failed)
    )

    # Write data file to the specified path
    write_prototype_data_file(aflow_prototypes, filepath)
555
556
557
558
559
560


@admin.command(help='Updates the springer database in nomad.config.springer_msg_db_path.')
@click.option('--max-n-query', default=10, type=int, help='Number of unsuccessful springer request before returning an error. Default is 10.')
@click.option('--retry-time', default=120, type=int, help='Time in seconds to retry after unsuccessful request. Default is 120.')
def springer_update(max_n_query, retry_time):
561
562
    from nomad.cli.admin import springer
    springer.update_springer_data(max_n_query, retry_time)