encyclopedia.py 56.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2018 Markus Scheidgen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
"""
16
The encyclopedia API of the nomad@FAIRDI APIs.
17
"""
18
import re
19
import math
20
import numpy as np
21

22
from flask_restplus import Resource, abort, fields, marshal
23
from flask import request, g
24
from elasticsearch_dsl import Search, Q, A
25
from elasticsearch_dsl.utils import AttrDict
26

27
28
from nomad import config, infrastructure, search
from nomad.files import UploadFiles
29
from nomad.units import ureg
Lauri Himanen's avatar
Lauri Himanen committed
30
from nomad.atomutils import get_hill_decomposition
31
from nomad.datamodel.datamodel import EntryArchive
32
from nomad.datamodel.material import Material
33
from .api import api
34
from .auth import authenticate, create_authorization_predicate
35

36
ns = api.namespace("encyclopedia", description="Access encyclopedia metadata.")
37
38
re_formula = re.compile(r"([A-Z][a-z]?)(\d*)")

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
material_prop_map = {
    # General
    "material_id": "encyclopedia.material.material_id",
    "formula": "encyclopedia.material.formula",
    "formula_reduced": "encyclopedia.material.formula_reduced",
    "system_type": "encyclopedia.material.material_type",
    # Bulk only
    "has_free_wyckoff_parameters": "encyclopedia.material.bulk.has_free_wyckoff_parameters",
    "strukturbericht_designation": "encyclopedia.material.bulk.strukturbericht_designation",
    "material_name": "encyclopedia.material.material_name",
    "bravais_lattice": "encyclopedia.material.bulk.bravais_lattice",
    "crystal_system": "encyclopedia.material.bulk.crystal_system",
    "point_group": "encyclopedia.material.bulk.point_group",
    "space_group_number": "encyclopedia.material.bulk.space_group_number",
    "space_group_international_short_symbol": "encyclopedia.material.bulk.space_group_international_short_symbol",
    "structure_prototype": "encyclopedia.material.bulk.structure_prototype",
    "structure_type": "encyclopedia.material.bulk.structure_type",
}
57
58


59
60
61
62
63
64
65
66
67
68
69
70
def rgetattr(obj, attr_name):
    """Used to perform attribute access based on a (possibly nested) attribute
    name given as string.
    """
    try:
        for attr in attr_name.split("."):
            obj = obj[attr]
    except KeyError:
        return None
    return obj


71
def get_es_doc_values(es_doc, mapping, keys=None):
72
73
    """Used to form a material definition for "materials/<material_id>" from
    the given ElasticSearch root document.
74
    """
75
76
77
    if keys is None:
        keys = mapping.keys()

78
    result = {}
79
    for key in keys:
80
        es_key = mapping[key]
81
        value = rgetattr(es_doc, es_key)
82
        result[key] = value
83
84
85
86

    return result


87
def get_enc_filter():
88
89
    """Returns a shared term filter that will leave out unpublished (of other
    users), embargoed or invalid entries.
90
    """
91
92
93
94
95
96
    # Handle authentication
    s = search.SearchRequest()
    if g.user is not None:
        s.owner('visible', user_id=g.user.user_id)
    else:
        s.owner('public')
97
    return [
98
        s.q,
99
100
101
102
        Q("term", encyclopedia__status="success"),
    ]


103
104
105
106
107
108
109
similarity = api.model("similarity", {
    # General
    "material_id": fields.String,
    "value": fields.Float,
    "formula": fields.String,
    "space_group_number": fields.Integer,
})
110
material_query = api.parser()
111
112
113
114
115
116
117
118
material_query.add_argument(
    "property",
    type=str,
    choices=tuple(material_prop_map.keys()),
    help="Optional single property to retrieve for the given material. If not specified, all properties will be returned.",
    location="args"
)
material_result = api.model("material_result", {
119
120
    # General
    "material_id": fields.String,
121
122
    "formula": fields.String,
    "formula_reduced": fields.String,
123
    "system_type": fields.String,
124
    "n_matches": fields.Integer,
125
    # Bulk only
126
    "has_free_wyckoff_parameters": fields.Boolean,
127
    "strukturbericht_designation": fields.String,
128
    "material_name": fields.String,
129
130
    "bravais_lattice": fields.String,
    "crystal_system": fields.String,
131
    "point_group": fields.String,
132
133
134
    "space_group_number": fields.Integer,
    "space_group_international_short_symbol": fields.String,
    "structure_prototype": fields.String,
135
    "structure_type": fields.String,
136
    "similarity": fields.List(fields.Nested(similarity, skip_none=True), skip_none=True),
137
})
138
139


140
@ns.route("/materials/<string:material_id>")
141
class EncMaterialResource(Resource):
142
143
    @api.response(404, "The material does not exist")
    @api.response(200, "Metadata send", fields.Raw)
144
    @api.doc("get_material")
145
    @api.expect(material_query)
146
    @api.marshal_with(material_result, skip_none=True)
147
    @authenticate()
148
    def get(self, material_id):
149
150
        """Used to retrieve basic information related to the specified
        material.
151
        """
152
153
154
155
        # Parse request arguments
        args = material_query.parse_args()
        prop = args.get("property", None)
        if prop is not None:
156
157
            keys = [prop]
            es_keys = [material_prop_map[prop]]
158
159
        else:
            keys = list(material_prop_map.keys())
160
            es_keys = list(material_prop_map.values())
161

162
163
164
165
166
        # Find the first public entry with this material id and take
        # information from there. In principle all other entries should have
        # the same information.
        s = Search(index=config.elastic.index_name)
        query = Q(
167
            "bool",
168
            filter=get_enc_filter() + [
169
                Q("term", encyclopedia__material__material_id=material_id),
170
171
172
            ]
        )
        s = s.query(query)
173

174
        # Only one representative entry is returned by collapsing the results.
175
176
        s = s.extra(**{
            "_source": {"includes": es_keys},
177
            "size": 1,
178
179
            "collapse": {"field": "encyclopedia.material.material_id"},
        })
180
181
        response = s.execute()

182
        # No such material
183
        if len(response) == 0:
184
            abort(404, message="There is no material {}".format(material_id))
185

186
        # Add values from ES entry
187
        entry = response[0]
188
        result = get_es_doc_values(entry, material_prop_map, keys)
189

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
        # Add similarity data that is currently stored in MongoDB. In the
        # future a lot of the data will be accessed here.
        try:
            material = Material.m_def.a_mongo.get(material_id=material_id)
            dos_similarity = material.similarity.electronic_dos
        except KeyError:
            # No similarity data for this material
            pass
        else:
            # Only include similarity for materials that exist on the current
            # deployment to avoid dead links.
            similar_ids = dos_similarity.material_ids
            id_value_map = {key: value for key, value in zip(dos_similarity.material_ids, dos_similarity.values)}
            bool_query = Q(
                "bool",
                filter=get_enc_filter() + [Q("terms", encyclopedia__material__material_id=similar_ids)],
            )
            s = Search(index=config.elastic.index_name)
            s = s.query(bool_query)
            s = s.extra(**{
                "_source": {"includes": [
                    "encyclopedia.material.material_id",
                    "encyclopedia.material.formula_reduced",
                    "encyclopedia.material.bulk.space_group_number",
                ]},
                "size": 5,
                "collapse": {"field": "encyclopedia.material.material_id"},
            })
            response = s.execute()
            similarity = []
            for hit in response.hits:
                try:
                    similarity.append({
                        "material_id": hit.encyclopedia.material.material_id,
                        "value": id_value_map[hit.encyclopedia.material.material_id],
                        "formula": hit.encyclopedia.material.formula_reduced,
                        "space_group_number": hit.encyclopedia.material.bulk.space_group_number,
                    })
                except AttributeError:
                    pass
            if similarity:
                result["similarity"] = similarity

233
234
235
        return result, 200


236
range_query = api.model("range_query", {
237
238
239
    "max": fields.Float,
    "min": fields.Float,
})
240
241
242
materials_after = api.model("materials_after", {
    "materials": fields.String,
})
243
244
materials_query = api.model("materials_input", {
    "search_by": fields.Nested(api.model("search_query", {
245
246
        "exclusive": fields.Boolean(default=False),
        "formula": fields.String,
Lauri Himanen's avatar
Lauri Himanen committed
247
        "element": fields.String,
248
        "page": fields.Integer(default=1),
249
        "after": fields.Nested(materials_after, allow_null=True),
250
251
252
        "per_page": fields.Integer(default=25),
        "pagination": fields.Boolean,
    })),
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
    "material_name": fields.List(fields.String),
    "structure_type": fields.List(fields.String),
    "space_group_number": fields.List(fields.Integer),
    "system_type": fields.List(fields.String),
    "crystal_system": fields.List(fields.String),
    "band_gap": fields.Nested(range_query, description="Band gap range in eV."),
    "band_gap_direct": fields.Boolean,
    "has_band_structure": fields.Boolean,
    "has_dos": fields.Boolean,
    "has_fermi_surface": fields.Boolean,
    "has_thermal_properties": fields.Boolean,
    "functional_type": fields.List(fields.String),
    "basis_set_type": fields.List(fields.String),
    "code_name": fields.List(fields.String),
    "mass_density": fields.Nested(range_query, description="Mass density range in kg / m ** 3."),
268
})
269
270
271
272
273
pages_result = api.model("page_info", {
    "per_page": fields.Integer,
    "total": fields.Integer,
    "page": fields.Integer,
    "pages": fields.Integer,
274
    "after": fields.Nested(materials_after),
275
276
})

277
278
materials_result = api.model("materials_result", {
    "total_results": fields.Integer(allow_null=False),
279
280
    "results": fields.List(fields.Nested(material_result, skip_none=True)),
    "pages": fields.Nested(pages_result, skip_none=True),
281
    "es_query": fields.String(allow_null=False),
282
283
284
})


285
@ns.route("/materials")
286
class EncMaterialsResource(Resource):
287
288
289
    @api.response(404, "No materials found")
    @api.response(400, "Bad request")
    @api.response(200, "Metadata send", fields.Raw)
290
    @api.expect(materials_query, validate=False)
291
    @api.marshal_with(materials_result, skip_none=True)
292
    @api.doc("materials")
293
    @authenticate()
294
295
296
297
298
299
300
301
302
    def post(self):
        """Used to query a list of materials with the given search options.
        """
        # Get query parameters as json
        try:
            data = marshal(request.get_json(), materials_query)
        except Exception as e:
            abort(400, message=str(e))

303
304
305
        def add_terms_filter(filters, source, target, query_type="terms"):
            """For adding terms filters
            """
306
            if data[source] is not None:
307
308
                filters.append(Q(query_type, **{target: data[source]}))

309
310
311
        def add_exists_filter(filters, must_nots, source, target):
            """For adding exists filters
            """
312
313
314
315
316
317
318
319
            param = data[source]
            if param is not None:
                query = Q("exists", field=target)
                if param is True:
                    filters.append(query)
                elif param is False:
                    must_nots.append(query)

320
321
322
        def add_range_filter(filters, source, target, source_unit=None, target_unit=None):
            """For adding range filters
            """
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
            param = data[source]
            query_dict = {}
            if param["min"] is not None:
                if source_unit is None and target_unit is None:
                    gte = param["min"]
                else:
                    gte = (param["min"] * source_unit).to(target_unit).magnitude
                query_dict["gte"] = gte
            if param["max"] is not None:
                if source_unit is None and target_unit is None:
                    lte = param["max"]
                else:
                    lte = (param["max"] * source_unit).to(target_unit).magnitude
                query_dict["lte"] = lte
            if len(query_dict) != 0:
                query = Q("range", **{target: query_dict})
                filters.append(query)

341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
        property_map = {
            "has_thermal_properties": "encyclopedia.properties.thermodynamical_properties",
            "has_band_structure": "encyclopedia.properties.electronic_band_structure",
            "has_dos": "encyclopedia.properties.electronic_dos",
            "has_fermi_surface": "encyclopedia.properties.fermi_surface",
        }
        requested_properties = []
        filters = get_enc_filter()
        must_nots = []
        musts = []
        add_terms_filter(filters, "material_name", "encyclopedia.material.material_name")
        add_terms_filter(filters, "structure_type", "encyclopedia.material.bulk.structure_type")
        add_terms_filter(filters, "space_group_number", "encyclopedia.material.bulk.space_group_number")
        add_terms_filter(filters, "system_type", "encyclopedia.material.material_type")
        add_terms_filter(filters, "crystal_system", "encyclopedia.material.bulk.crystal_system")
        add_terms_filter(filters, "band_gap_direct", "encyclopedia.properties.band_gap_direct", query_type="term")
        add_terms_filter(filters, "functional_type", "encyclopedia.method.functional_type")
        add_terms_filter(filters, "basis_set_type", "dft.basis_set")
        add_terms_filter(filters, "code_name", "dft.code_name")
        add_range_filter(filters, "band_gap", "encyclopedia.properties.band_gap", ureg.eV, ureg.J)
        add_range_filter(filters, "mass_density", "encyclopedia.properties.mass_density")
362

363
364
365
        # Create query for elements or formula
        search_by = data["search_by"]
        formula = search_by["formula"]
Lauri Himanen's avatar
Lauri Himanen committed
366
        elements = search_by["element"]
367
368
369
        exclusive = search_by["exclusive"]

        if formula is not None:
Lauri Himanen's avatar
Lauri Himanen committed
370
371
            # Here we determine a list of atom types. The types may occur
            # multiple times and at multiple places.
372
373
374
375
376
377
378
379
380
381
            element_list = []
            matches = re_formula.finditer(formula)
            for match in matches:
                groups = match.groups()
                symbol = groups[0]
                count = groups[1]
                if symbol != "":
                    if count == "":
                        element_list.append(symbol)
                    else:
Lauri Himanen's avatar
Lauri Himanen committed
382
383
384
385
386
                        element_list += [symbol] * int(count)

            # The given list of species is reformatted with the Hill system
            # into a query string. The counts are reduced by the greatest
            # common divisor.
387
            names, reduced_counts = get_hill_decomposition(element_list, reduced=True)
Lauri Himanen's avatar
Lauri Himanen committed
388
389
390
391
392
            query_string = []
            for name, count in zip(names, reduced_counts):
                if count == 1:
                    query_string.append(name)
                else:
393
                    query_string.append("{}{}".format(name, int(count)))
Lauri Himanen's avatar
Lauri Himanen committed
394
            query_string = " ".join(query_string)
395
396
397

            # With exclusive search we look for exact match
            if exclusive:
Lauri Himanen's avatar
Lauri Himanen committed
398
                filters.append(Q("term", **{"encyclopedia.material.species_and_counts.keyword": query_string}))
399
400
401
402
403
            # With non-exclusive search we look for match that includes at
            # least all parts of the formula, possibly even more.
            else:
                musts.append(Q(
                    "match",
Lauri Himanen's avatar
Lauri Himanen committed
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
                    encyclopedia__material__species_and_counts={"query": query_string, "operator": "and"}
                ))
        elif elements is not None:
            # The given list of species is reformatted with the Hill system into a query string
            species, _ = get_hill_decomposition(elements.split(","))
            query_string = " ".join(species)

            # With exclusive search we look for exact match
            if exclusive:
                filters.append(Q("term", **{"encyclopedia.material.species.keyword": query_string}))
            # With non-exclusive search we look for match that includes at
            # least all species, possibly even more.
            else:
                musts.append(Q(
                    "match",
                    encyclopedia__material__species={"query": query_string, "operator": "and"}
420
421
                ))

422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
        # The queries that correspond to AND queries typically need to access
        # multiple calculations at once to find the material ids that
        # correspond to the query. To implement this behaviour we need to run
        # an initial aggregation that checks that the requested properties are
        # present for a material. This is a a very crude solution that does not
        # scale to complex queries, but I'm not sure we can do much better
        # until we have a separate index for materials. The size is set very
        # large because all the results need to be returned. We cannot get the
        # results in a paginated way with composite aggregation, because
        # pipeline aggregations are not compatible with them.
        agg_parent = A("terms", field="encyclopedia.material.material_id", size=500000)
        for key, value in property_map.items():
            if data[key] is True:
                agg = A("filter", exists={"field": value})
                agg_parent.bucket(key, agg)
                requested_properties.append(key)
        if len(requested_properties) > 1:
            # First we setup a boolean filter query that filters for of the
            # requested properties. This will reduce the size of the initial
            # set on top of which the more expensive aggregation stack is run
            # on.
            bool_query = Q(
                "bool",
                filter=filters,
                must_not=must_nots,
                must=musts,
                should=[Q("exists", field=property_map[x]) for x in requested_properties],
                minimum_should_match=1,  # At least one of the should query must match
            )
            s = Search(index=config.elastic.index_name)
            s = s.query(bool_query)

            # The remaining requested properties have to be queried as a nested
            # aggregation.
            s.aggs.bucket("materials", agg_parent)
            buckets_path = {x: "{}._count".format(x) for x in requested_properties}
            script = " && ".join(["params.{} > 0".format(x) for x in requested_properties])
            agg_parent.pipeline("selector", A(
                "bucket_selector",
                buckets_path=buckets_path,
                script=script,
            ))
            s = s.extra(**{
                "size": 0,
            })
            response = s.execute()
            material_ids = [x["key"] for x in response.aggs.materials.buckets]
            if len(material_ids) == 0:
                abort(404, message="No materials found for the given search criteria or pagination.")

        # Add pre-selected material ids if multiple exists filters were
        # requested. These IDs are already filtered based on the user query so
        # none of the other search terms need be used.
        if len(requested_properties) > 1:
            must_nots = []
            musts = []
            filters = []
            filters.append(Q("terms", encyclopedia__material__material_id=material_ids))
        if len(requested_properties) == 1:
            prop_name = requested_properties[0]
            add_exists_filter(filters, must_nots, prop_name, property_map[prop_name])

        # The top query filters out entries based on the user query
485
486
        page = search_by["page"]
        per_page = search_by["per_page"]
487
        after = search_by["after"]
488
        bool_query = Q(
489
            "bool",
490
491
492
493
            filter=filters,
            must_not=must_nots,
            must=musts,
        )
494
495
496
        s = Search(index=config.elastic.index_name)
        s = s.query(bool_query)

497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
        # 1: The paginated approach: No way to know the amount of materials,
        # but can return aggregation results in a quick fashion including
        # the number of calculation entries per material.
        mode = "collapse"
        if mode == "aggregation":
            # The materials are grouped by using three aggregations:
            # "Composite" to enable scrolling, "Terms" to enable selecting
            # by material_id and "Top Hits" to fetch a single
            # representative material document. Unnecessary fields are
            # filtered to reduce data transfer.
            terms_agg = A("terms", field="encyclopedia.material.material_id")
            composite_kwargs = {"sources": {"materials": terms_agg}, "size": per_page}

            # The number of matched materials is only requested on the first
            # search, not for each page.
            if after is not None:
                composite_kwargs["after"] = after
            else:
                cardinality_agg = A("cardinality", field="encyclopedia.material.material_id", precision_threshold=1000)
                s.aggs.metric("n_materials", cardinality_agg)

            composite_agg = A("composite", **composite_kwargs)
            composite_agg.metric("representative", A(
                "top_hits",
                size=1,
                _source={"includes": list(material_prop_map.values())},
            ))
            s.aggs.bucket("materials", composite_agg)

            # We ignore the top level hits and sort by reduced material formula.
            s = s.extra(**{
                "size": 0,
            })

            response = s.execute()
            materials = response.aggs.materials.buckets
            if len(materials) == 0:
                abort(404, message="No materials found for the given search criteria or pagination.")
            after_new = response.aggs.materials["after_key"]

            # Gather results from aggregations
            result_list = []
            materials = response.aggs.materials.buckets
            keys = list(material_prop_map.keys())
            for material in materials:
                representative = material["representative"][0]
                mat_dict = get_es_doc_values(representative, material_prop_map, keys)
                mat_dict["n_matches"] = material.doc_count
                result_list.append(mat_dict)

            # Page information is incomplete for aggregations
            pages = {
                "page": page,
                "per_page": per_page,
                "after": after_new,
            }
            if after is None:
                n_materials = response.aggs.n_materials.value
                pages["total"] = n_materials

        # 2. Collapse approach. Quickly provides a list of materials
558
559
        # corresponding to the query, offers full pagination, the number of
        # matches per material needs to be requested with a separate query.
560
561
562
563
564
        elif mode == "collapse":
            s = Search(index=config.elastic.index_name)
            s = s.query(bool_query)

            # Add cardinality aggregation that gives out the total number of materials
565
            cardinality_agg = A("cardinality", field="encyclopedia.material.material_id", precision_threshold=1000)
566
            s.aggs.metric("n_materials", cardinality_agg)
567

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
            s = s.extra(**{
                "collapse": {"field": "encyclopedia.material.material_id"},
                "size": per_page,
                "from": (page - 1) * per_page,
                "sort": [{"encyclopedia.material.formula_reduced": {"order": "asc"}}],
                "explain": True,
            })

            # Execute query
            response = s.execute()

            # No matches
            if len(response) == 0:
                abort(404, message="No materials found for the given search criteria or pagination.")

            # Gather number of entries per material with a separate query
            material_ids = [x.encyclopedia.material.material_id for x in response]
            s = Search(index=config.elastic.index_name)
            bool_query = Q(
                "bool",
                filter=Q("terms", encyclopedia__material__material_id=material_ids),
            )
            s2 = s.query(bool_query)
            s2.aggs.bucket("n_matches", A("terms", field="encyclopedia.material.material_id"))
            response2 = s2.execute()
            matmap = {x.key: x.doc_count for x in response2.aggs.n_matches}
594

595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
            # Loop over materials
            result_list = []
            keys = list(material_prop_map.keys())
            for material in response:
                # Get values from the collapsed doc
                mat_result = get_es_doc_values(material, material_prop_map, keys)
                mat_id = material.encyclopedia.material.material_id
                mat_result["n_matches"] = matmap[mat_id]
                result_list.append(mat_result)

            # Full page information available for collapse
            pages = {
                "page": page,
                "per_page": per_page,
                "pages": math.ceil(response.hits.total / per_page),
                "total": response.aggs.n_materials.value,
            }
612
613
614

        result = {
            "results": result_list,
615
            "pages": pages,
616
        }
617
        return result, 200
618
619


620
groups_result = api.model("groups_result", {
621
622
    "groups_eos": fields.Raw,
    "groups_par": fields.Raw,
623
624
625
})


626
@ns.route("/materials/<string:material_id>/groups")
Lauri Himanen's avatar
Lauri Himanen committed
627
class EncGroupsResource(Resource):
628
629
630
    @api.response(404, "Material not found")
    @api.response(400, "Bad request")
    @api.response(200, "Metadata send", fields.Raw)
631
    @api.marshal_with(groups_result)
632
    @api.doc("enc_materials")
633
    @authenticate()
Lauri Himanen's avatar
Lauri Himanen committed
634
    def get(self, material_id):
635
636
637
        """Returns a summary of the calculation groups that were identified for
        this material.
        """
638
639
640
        # Find entries for the given material, which have EOS or parameter
        # variation hashes set.
        bool_query = Q(
641
            "bool",
642
            filter=get_enc_filter() + [Q("term", encyclopedia__material__material_id=material_id)],
643
644
            must=[
                Q("exists", field="encyclopedia.properties.energies.energy_total"),
645
                Q("exists", field="encyclopedia.material.idealized_structure.cell_volume"),
646
647
            ],
            should=[
648
649
                Q("exists", field="encyclopedia.method.group_eos_id"),
                Q("exists", field="encyclopedia.method.group_parametervariation_id"),
650
651
652
            ],
            minimum_should_match=1,  # At least one of the should query must match
        )
Lauri Himanen's avatar
Lauri Himanen committed
653
654

        s = Search(index=config.elastic.index_name)
655
656
657
658
        s = s.query(bool_query)

        # Bucket the calculations by the group hashes. Only create a bucket if an
        # above-minimum number of documents are found.
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
        group_eos_bucket = A("terms", field="encyclopedia.method.group_eos_id", min_doc_count=4)
        group_param_bucket = A("terms", field="encyclopedia.method.group_parametervariation_id", min_doc_count=2)
        calc_aggregation = A(
            "top_hits",
            _source={"includes": ["calc_id"]},
            sort=[{"encyclopedia.properties.energies.energy_total": {"order": "asc"}}],
            size=100,
        )
        group_eos_bucket.bucket("calculations", calc_aggregation)
        group_param_bucket.bucket("calculations", calc_aggregation)
        s.aggs.bucket("groups_eos", group_eos_bucket)
        s.aggs.bucket("groups_param", group_param_bucket)

        # We ignore the top level hits
        s = s.extra(**{
            "size": 0,
        })

        # Collect information for each group from the aggregations
        response = s.execute()
        groups_eos = {group.key: [calc.calc_id for calc in group.calculations.hits] for group in response.aggs.groups_eos.buckets}
        groups_param = {group.key: [calc.calc_id for calc in group.calculations.hits] for group in response.aggs.groups_param.buckets}

        # Return results
        result = {
            "groups_eos": groups_eos,
            "groups_par": groups_param,
        }

        return result, 200


group_result = api.model("group_result", {
    "calculations": fields.List(fields.String),
    "energies": fields.List(fields.Float),
    "volumes": fields.List(fields.Float),
})
group_source = {
    "includes": [
        "calc_id",
        "encyclopedia.properties.energies.energy_total",
        "encyclopedia.material.idealized_structure.cell_volume",
    ]
}


@ns.route("/materials/<string:material_id>/groups/<string:group_type>/<string:group_id>")
class EncGroupResource(Resource):
    @api.response(404, "Group not found")
    @api.response(400, "Bad request")
    @api.response(200, "Metadata send", fields.Raw)
    @api.marshal_with(group_result)
    @api.doc("enc_group")
712
    @authenticate()
713
714
715
716
717
718
719
720
721
722
723
724
725
726
    def get(self, material_id, group_type, group_id):
        """Used to query detailed information for a specific calculation group.
        """
        # Find entries for the given material, which have EOS or parameter
        # variation hashes set.
        if group_type == "eos":
            group_id_source = "encyclopedia.method.group_eos_id"
        elif group_type == "par":
            group_id_source = "encyclopedia.method.group_parametervariation_id"
        else:
            abort(400, message="Unsupported group type.")

        bool_query = Q(
            "bool",
727
            filter=get_enc_filter() + [
728
729
730
731
732
733
734
                Q("term", encyclopedia__material__material_id=material_id),
                Q("term", **{group_id_source: group_id}),
            ],
        )

        s = Search(index=config.elastic.index_name)
        s = s.query(bool_query)
735
736
737
738
739

        # calc_id and energy should be extracted for each matched document. The
        # documents are sorted by energy so that the minimum energy one can be
        # easily extracted. A maximum request size is set in order to limit the
        # result size. ES also has an index-level property
740
        # "index.max_inner_result_window" that limits the number of results
741
742
743
744
745
746
747
        # that an inner result can contain.
        energy_aggregation = A(
            "top_hits",
            _source=group_source,
            sort=[{"encyclopedia.properties.energies.energy_total": {"order": "asc"}}],
            size=100,
        )
748
        s.aggs.bucket("groups_eos", energy_aggregation)
749

750
751
752
753
        # We ignore the top level hits
        s = s.extra(**{
            "size": 0,
        })
754

755
        # Collect information for each group from the aggregations
756
        response = s.execute()
757

758
759
760
761
762
763
764
765
        hits = response.aggs.groups_eos.hits
        calculations = [doc.calc_id for doc in hits]
        energies = [doc.encyclopedia.properties.energies.energy_total for doc in hits]
        volumes = [doc.encyclopedia.material.idealized_structure.cell_volume for doc in hits]
        group_dict = {
            "calculations": calculations,
            "energies": energies,
            "volumes": volumes,
766
        }
767
768

        return group_dict, 200
769
770


771
772
773
774
suggestions_map = {
    "code_name": "dft.code_name",
    "structure_type": "encyclopedia.material.bulk.structure_type",
}
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
suggestions_query = api.parser()
suggestions_query.add_argument(
    "property",
    type=str,
    choices=("code_name", "structure_type"),
    help="The property name for which suggestions are returned.",
    location="args"
)
suggestions_result = api.model("suggestions_result", {
    "code_name": fields.List(fields.String),
    "structure_type": fields.List(fields.String),
})


@ns.route("/suggestions")
class EncSuggestionsResource(Resource):
    @api.response(404, "Suggestion not found")
    @api.response(400, "Bad request")
    @api.response(200, "Metadata send", fields.Raw)
    @api.expect(suggestions_query, validate=False)
    @api.marshal_with(suggestions_result, skip_none=True)
    @api.doc("enc_suggestions")
797
    @authenticate()
798
799
800
801
802
803
    def get(self):

        # Parse request arguments
        args = suggestions_query.parse_args()
        prop = args.get("property", None)

804
805
806
807
808
809
810
        # Use aggregation to return all unique terms for the requested field.
        # Without using composite aggregations there is a size limit for the
        # number of aggregation buckets. This should, however, not be a problem
        # since the number of unique values is low for all supported properties.
        s = Search(index=config.elastic.index_name)
        query = Q(
            "bool",
811
            filter=get_enc_filter()
812
813
814
815
816
817
818
819
820
821
822
823
824
825
        )
        s = s.query(query)
        s = s.extra(**{
            "size": 0,
        })

        terms_agg = A("terms", field=suggestions_map[prop])
        s.aggs.bucket("suggestions", terms_agg)

        # Gather unique values into a list
        response = s.execute()
        suggestions = [x.key for x in response.aggs.suggestions.buckets]

        return {prop: suggestions}, 200
826
827
828
829


calc_prop_map = {
    "calc_id": "calc_id",
830
    "upload_id": "upload_id",
831
832
833
834
    "code_name": "dft.code_name",
    "code_version": "dft.code_version",
    "functional_type": "encyclopedia.method.functional_type",
    "basis_set_type": "dft.basis_set",
835
    "core_electron_treatment": "encyclopedia.method.core_electron_treatment",
836
837
838
839
    "run_type": "encyclopedia.calculation.calculation_type",
    "has_dos": "encyclopedia.properties.electronic_dos",
    "has_band_structure": "encyclopedia.properties.electronic_band_structure",
    "has_thermal_properties": "encyclopedia.properties.thermodynamical_properties",
840
841
    "has_phonon_dos": "encyclopedia.properties.phonon_dos",
    "has_phonon_band_structure": "encyclopedia.properties.phonon_band_structure",
842
843
844
}
calculation_result = api.model("calculation_result", {
    "calc_id": fields.String,
845
    "upload_id": fields.String,
846
847
848
849
    "code_name": fields.String,
    "code_version": fields.String,
    "functional_type": fields.String,
    "basis_set_type": fields.String,
850
    "core_electron_treatment": fields.String,
851
852
853
854
    "run_type": fields.String,
    "has_dos": fields.Boolean,
    "has_band_structure": fields.Boolean,
    "has_thermal_properties": fields.Boolean,
855
856
    "has_phonon_dos": fields.Boolean,
    "has_phonon_band_structure": fields.Boolean,
857
})
858
859
860
861
862
863
representatives_result = api.model("representatives_result", {
    "idealized_structure": fields.String,
    "electronic_band_structure": fields.String,
    "electronic_dos": fields.String,
    "thermodynamical_properties": fields.String,
})
864
865
866
867
calculations_result = api.model("calculations_result", {
    "total_results": fields.Integer,
    "pages": fields.Nested(pages_result),
    "results": fields.List(fields.Nested(calculation_result)),
868
    "representatives": fields.Nested(representatives_result, skip_none=True),
869
870
871
872
})


@ns.route("/materials/<string:material_id>/calculations")
873
class EncCalculationsResource(Resource):
874
875
876
    @api.response(404, "Suggestion not found")
    @api.response(400, "Bad request")
    @api.response(200, "Metadata send", fields.Raw)
877
    @api.doc("get_calculations")
878
    @authenticate()
879
    def get(self, material_id):
880
881
882
        """Used to return all calculations related to the given material. Also
        returns a representative calculation for each property shown in the
        overview page.
883
884
885
886
        """
        s = Search(index=config.elastic.index_name)
        query = Q(
            "bool",
887
            filter=get_enc_filter() + [
888
889
890
891
892
893
894
895
                Q("term", encyclopedia__material__material_id=material_id),
            ]
        )
        s = s.query(query)

        # The query is filtered already on the ES side so we don"t need to
        # transfer so much data.
        s = s.extra(**{
896
897
898
            "_source": {"includes": list(calc_prop_map.values()) + ["dft.xc_functional"]},
            "size": 10000,
            "from": 0,
899
900
901
902
903
904
905
        })
        response = s.execute()

        # No such material
        if len(response) == 0:
            abort(404, message="There is no material {}".format(material_id))

906
907
908
909
910
911
912
        # Add representative properties. It might be possible to write a custom
        # ES scoring mechanism or aggregation to also perform the selection.
        representatives = {}

        def calc_score(entry):
            """Custom scoring function used to sort results by their
            "quality". Currently built to mimic the scoring that was used
913
914
915
            in the old Encyclopedia GUI. Primarily sorts by quality measure,
            ties are broken by alphabetic sorting of entry_id in order to
            return consistent results.
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
            """
            score = 0
            functional_score = {
                "GGA": 100
            }
            code_score = {
                "FHI-aims": 3,
                "VASP": 2,
                "Quantum Espresso": 1,
            }
            code_name = entry.dft.code_name
            functional = entry.dft.xc_functional
            has_dos = rgetattr(entry, "encyclopedia.properties.electronic_band_structure") is not None
            has_bs = rgetattr(entry, "encyclopedia.properties.electronic_dos") is not None
            score += functional_score.get(functional, 0)
            score += code_score.get(code_name, 0)
            if has_dos and has_bs:
                score += 10

935
            return (score, entry["calc_id"])
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957

        # The calculations are first sorted by "quality"
        sorted_calc = sorted(response, key=lambda x: calc_score(x), reverse=True)

        # Get the requested representative properties
        representatives["idealized_structure"] = sorted_calc[0].calc_id
        thermo_found = False
        bs_found = False
        dos_found = False
        for calc in sorted_calc:
            if rgetattr(calc, "encyclopedia.properties.thermodynamical_properties") is not None:
                representatives["thermodynamical_properties"] = calc.calc_id
                thermo_found = True
            if rgetattr(calc, "encyclopedia.properties.electronic_band_structure") is not None:
                representatives["electronic_band_structure"] = calc.calc_id
                bs_found = True
            if rgetattr(calc, "encyclopedia.properties.electronic_dos") is not None:
                representatives["electronic_dos"] = calc.calc_id
                dos_found = True
            if thermo_found and bs_found and dos_found:
                break

958
959
960
961
962
        # Create result JSON
        results = []
        for entry in response:
            calc_dict = get_es_doc_values(entry, calc_prop_map)
            calc_dict["has_dos"] = calc_dict["has_dos"] is not None
963
            calc_dict["has_band_structure"] = calc_dict["has_band_structure"] is not None
964
            calc_dict["has_thermal_properties"] = calc_dict["has_thermal_properties"] is not None
965
966
            calc_dict["has_phonon_dos"] = calc_dict["has_phonon_dos"] is not None
            calc_dict["has_phonon_band_structure"] = calc_dict["has_phonon_band_structure"] is not None
967
968
969
970
971
            results.append(calc_dict)

        result = {
            "total_results": len(results),
            "results": results,
972
            "representatives": representatives,
973
974
975
976
977
        }

        return result, 200


978
979
980
981
histogram = api.model("histogram", {
    "occurrences": fields.List(fields.Integer),
    "values": fields.List(fields.Float),
})
982
983
statistics_query = api.model("statistics_query", {
    "calculations": fields.List(fields.String),
984
    "properties": fields.List(fields.String),
985
    "n_histogram_bins": fields.Integer,
986
987
988
989
990
})
statistics = api.model("statistics", {
    "min": fields.Float,
    "max": fields.Float,
    "avg": fields.Float,
991
    "histogram": fields.Nested(histogram, skip_none=True)
992
993
})
statistics_result = api.model("statistics_result", {
994
995
996
997
998
999
1000
    "cell_volume": fields.Nested(statistics, skip_none=True),
    "atomic_density": fields.Nested(statistics, skip_none=True),
    "mass_density": fields.Nested(statistics, skip_none=True),
    "lattice_a": fields.Nested(statistics, skip_none=True),
    "lattice_b": fields.Nested(statistics, skip_none=True),
    "lattice_c": fields.Nested(statistics, skip_none=True),
    "alpha": fields.Nested(statistics, skip_none=True),