implementations.py 26 KB
Newer Older
Lauri Himanen's avatar
Lauri Himanen committed
1
import re
Lauri Himanen's avatar
Lauri Himanen committed
2
import os
Lauri Himanen's avatar
Lauri Himanen committed
3
import logging
4
from cp2kparser.engines.csvengine import CSVEngine
5
6
from cp2kparser.implementation.cp2kinputparser import CP2KInputParser
from cp2kparser.implementation.cp2kinputenginedata.input_tree import CP2KInput
Lauri Himanen's avatar
Lauri Himanen committed
7
from cp2kparser.implementation.outputparsers import *
8
from nomadcore.coordinate_reader import CoordinateReader
9
from cp2kparser.generics.parser import Parser
Lauri Himanen's avatar
Lauri Himanen committed
10
logger = logging.getLogger(__name__)
Lauri Himanen's avatar
Lauri Himanen committed
11
12
13


#===============================================================================
14
15
class CP2KImplementation262(Parser):
    """The default implementation for a CP2K parser based on version 2.6.2.
Lauri Himanen's avatar
Lauri Himanen committed
16
    """
Lauri Himanen's avatar
Lauri Himanen committed
17
    def __init__(self, parser_context):
Lauri Himanen's avatar
Lauri Himanen committed
18

19
        # Initialize the base class
20
        Parser.__init__(self, parser_context)
21

22
23
        # Initialize the parsing tools. The input and output parsers need to
        # know the version id.
24
        self.csvengine = CSVEngine(self)
25
        self.atomsengine = CoordinateReader()
26
27
28
        self.inputparser = CP2KInputParser()
        self.inputparser.setup_version(self.version_id)
        self.outputparser = globals()["CP2KOutputParser{}".format(self.version_id)](self, self.metainfo_to_keep, self.metainfo_to_skip)
29
        self.input_tree = None
30
        self.extended_input = None
31
32

        self.determine_file_ids_pre_setup()
33
        self.input_preprocessor()
34
        self.determine_file_ids_post_setup()
Lauri Himanen's avatar
Lauri Himanen committed
35

Lauri Himanen's avatar
Lauri Himanen committed
36
    def determine_file_ids_pre_setup(self):
37
38
        """Resolve the input and output files based on extension and the
        include files by looking for @INCLUDE commands in the input file.
Lauri Himanen's avatar
Lauri Himanen committed
39
        """
Lauri Himanen's avatar
Lauri Himanen committed
40
41
42
43
44
45
        # Input and output files
        for file_path in self.files.iterkeys():
            if file_path.endswith(".inp"):
                self.setup_file_id(file_path, "input")
            if file_path.endswith(".out"):
                self.setup_file_id(file_path, "output")
Lauri Himanen's avatar
Lauri Himanen committed
46

Lauri Himanen's avatar
Lauri Himanen committed
47
48
49
50
51
52
53
54
55
56
57
        # Include files
        input_file = self.get_file_contents("input")
        for line in input_file.split("\n"):
            line = line.strip()
            if line.startswith("@INCLUDE") or line.startswith("@include"):
                split = line.split(None, 1)
                filename = split[1]
                if filename.startswith(('\"', '\'')) and filename.endswith(('\"', '\'')):
                    filename = filename[1:-1]
                filepath = self.search_file(filename)
                self.setup_file_id(filepath, "include")
Lauri Himanen's avatar
Lauri Himanen committed
58

59
    def input_preprocessor(self):
60
61
        """Preprocess the input file. Concatenate .inc files into the main
        input file and explicitly state all variables.
62
63
        """
        # Merge include files to input
64
        include_files = self.get_file_handles("include", show_warning=False)
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
        input_file = self.get_file_contents("input")
        input_lines = input_file.split("\n")
        extended_input = input_lines[:]  # Make a copy
        if include_files:
            i_line = 0
            for line in input_lines:
                line = line.strip()
                if line.startswith("@INCLUDE") or line.startswith("@include"):
                    split = line.split(None, 1)
                    filename = split[1]
                    if filename.startswith(('\"', '\'')) and filename.endswith(('\"', '\'')):
                        filename = filename[1:-1]
                    filepath = self.search_file(filename)

                    # Get the content from include file
                    for handle in include_files:
                        name = handle.name
                        if name == filepath:
                            contents = handle.read()
                            contents = contents.split('\n')
                            del extended_input[i_line]
                            extended_input[i_line:i_line] = contents
                            i_line += len(contents)
                i_line += 1

        # Gather the variable definitions
        variables = {}
        input_set_removed = []
        for i_line, line in enumerate(extended_input):
            if line.startswith("@SET") or line.startswith("@set"):
                components = line.split(None, 2)
                name = components[1]
                value = components[2]
                variables[name] = value
                logger.debug("Variable '{}' found with value '{}'".format(name, value))
            else:
                input_set_removed.append(line)

        # Place the variables
        variable_pattern = r"\@\{(\w+)\}|@(\w+)"
        compiled = re.compile(variable_pattern)
        reserved = ("include", "set", "if", "endif")
        input_variables_replaced = []
        for line in input_set_removed:
            results = compiled.finditer(line)
            new_line = line
            offset = 0
            for result in results:
                options = result.groups()
                first = options[0]
                second = options[1]
                if first:
                    name = first
                elif second:
                    name = second
                if name in reserved:
                    continue
                value = variables.get(name)
                if not value:
                    logger.error("Value for variable '{}' not set.".format(name))
                    continue
                len_value = len(value)
                len_name = len(name)
                start = result.start()
                end = result.end()
                beginning = new_line[:offset+start]
                rest = new_line[offset+end:]
                new_line = beginning + value + rest
                offset += len_value - len_name - 1
            input_variables_replaced.append(new_line)

        self.extended_input = '\n'.join(input_variables_replaced)
137
        self.input_tree = self.inputparser.parse(self.extended_input)
138

139
    def determine_file_ids_post_setup(self):
140
        """Determines the file id's after the CP2K verion has been set
141
        up. This includes force files, coordinate files, cell files, etc.
142
        """
143
        # Determine the presence of force file
144
145
        force_path = self.input_tree.get_keyword("FORCE_EVAL/PRINT/FORCES/FILENAME")
        project_name = self.input_tree.get_keyword("GLOBAL/PROJECT_NAME")
146
        if force_path is not None and force_path != "__STD_OUT__":
147
148
149

            # The force path is not typically exactly as written in input
            if force_path.startswith("="):
Lauri Himanen's avatar
Lauri Himanen committed
150
                logger.debug("Using single force file.")
151
152
                force_path = force_path[1:]
            elif re.match(r".?/", force_path):
Lauri Himanen's avatar
Lauri Himanen committed
153
                logger.debug("Using separate force file for each step.")
154
155
                force_path = "{}-1_0.xyz".format(force_path)
            else:
Lauri Himanen's avatar
Lauri Himanen committed
156
                logger.debug("Using separate force file for each step.")
157
158
                force_path = "{}-{}-1_0.xyz".format(project_name, force_path)
            force_path = os.path.basename(force_path)
Lauri Himanen's avatar
Lauri Himanen committed
159

160
161
            # Check against the given files
            file_path = self.search_file(force_path)
162
            self.setup_file_id(file_path, "forces")
163
164
165
166

        # Determine the presence of an initial coordinate file
        init_coord_file = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/TOPOLOGY/COORD_FILE_NAME")
        if init_coord_file is not None:
167
            logger.debug("Initial coordinate file found.")
168
169
            # Check against the given files
            file_path = self.search_file(init_coord_file)
170
            self.setup_file_id(file_path, "initial_coordinates")
171

172
173
174
        # Determine the presence of a trajectory file
        traj_file = self.input_tree.get_keyword("MOTION/PRINT/TRAJECTORY/FILENAME")
        if traj_file is not None:
175
176
177
178
179
180
181
182
            file_format = self.input_tree.get_keyword("MOTION/PRINT/TRAJECTORY/FORMAT")
            extension = {
                "PDB": "pdb",
                "XYZ": "xyz",
                "XMOL": "xyz",
                "ATOMIC": "xyz",
                "DCD": "dcd",
            }[file_format]
183
            logger.debug("Trajectory file found.")
184
            normalized_path = self.normalize_cp2k_path(traj_file, extension, "pos")
185
            file_path = self.search_file(normalized_path)
186
187
            self.setup_file_id(file_path, "trajectory")

188
        # Determine the presence of a cell output file
189
190
191
192
193
194
        cell_motion_file = self.input_tree.get_keyword("MOTION/PRINT/CELL/FILENAME")
        if cell_motion_file is not None:
            logger.debug("Cell file found.")
            extension = "cell"
            normalized_path = self.normalize_cp2k_path(cell_motion_file, extension)
            file_path = self.search_file(normalized_path)
195
196
197
198
199
200
201
            self.setup_file_id(file_path, "cell_output")

        # Determine the presence of a cell input file
        cell_input_file = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/CELL_FILE_NAME")
        if cell_input_file is not None:
            file_path = self.search_file(cell_input_file)
            self.setup_file_id(file_path, "cell_input")
202

203
    def normalize_cp2k_path(self, path, extension, name=""):
204
205
206
        """The paths in CP2K input can be given in many ways. This function
        tries to normalize these paths to a common form.
        """
207
208
        if name:
            name = "-" + name
209
210
211
212
213
        logger.debug("Normalizing trajectory path")
        project_name = self.input_tree.get_keyword("GLOBAL/PROJECT_NAME")
        if path.startswith("="):
            normalized_path = path[1:]
        elif re.match(r"./", path):
214
            normalized_path = "{}{}-1.{}".format(path, name, extension)
215
        else:
216
            normalized_path = "{}-{}{}-1.{}".format(project_name, path, name, extension)
217
        return normalized_path
218
219
220
221
222

    def search_file(self, path):
        """Searches the list of given files for a file that is defined in the
        CP2K input file.

223
        First compares the filename, and if multiple matches found descends
224
        the path until only only one or zero matches found.
225
226
        """
        matches = {}
227
        resolvable = [x for x in self.files.iterkeys() if x not in self.file_ids.itervalues()]
228

229
        searched_parts = self.split_path(path)
230
        for file_path in resolvable:
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
            available_parts = self.split_path(file_path)
            for i_part, part in enumerate(searched_parts):
                if part == available_parts[i_part]:
                    matches[file_path] = i_part

        n = len(matches)
        if n == 1:
            return matches.keys()[0]
        elif n == 0:
            logger.error("Could not find file '{}' specified in CP2K input.".format(path))
            return
        else:
            sorted_list = [(k, v) in sorted(mydict.items(), key=lambda (k, v): v[1])]
            if (sorted_list[0][1] == sorted_list[1][1]):
                logger.error("When searching for file '{}', multiple matches were found. Could not determine which file to use based on their path.")
            else:
                return sorted_list[0][0]

    def split_path(self, path):
250
251
        """Splits a path into components and returns them in a reversed order.
        """
252
253
254
255
256
257
258
259
260
261
262
        folders = []
        while 1:
            path, folder = os.path.split(path)

            if folder != "":
                folders.append(folder)
            else:
                if path != "":
                    folders.append(path)
                break
        return folders
Lauri Himanen's avatar
Lauri Himanen committed
263

264
265
266
267
268
269
    def parse(self):
        """Parses everything that can be found from the given files. The
        results are outputted to std.out by using the backend. The scala layer
        will the take on from that.
        """
        # Write the starting bracket
Lauri Himanen's avatar
Lauri Himanen committed
270
        self.stream.write("[")
271
272
273

        # Use the SimpleMatcher to extract most of the results
        parserInfo = {"name": "cp2k-parser", "version": "1.0"}
Lauri Himanen's avatar
Lauri Himanen committed
274
275
276
277
278
        outputfilename = self.get_file_handle("output").name
        metainfoenv = self.metainfoenv
        backend = self.backend
        outputstructure = self.outputparser.outputstructure
        cachingLevelForMetaName = self.outputparser.cachingLevelForMetaName
279
        self.parse_file(outputfilename, outputstructure, metainfoenv, backend, parserInfo, cachingLevelForMetaName, superContext=self.outputparser)
280
281
282
283

        # Then extract the things that cannot be extracted by the SimpleMatcher

        # Write the ending bracket
Lauri Himanen's avatar
Lauri Himanen committed
284
        self.stream.write("]\n")
285

286
287
288
289
290
291
    # def _Q_energy_total(self):
        # """Return the total energy from the bottom of the input file"""
        # result = Result()
        # result.unit = "hartree"
        # result.value = float(self.regexengine.parse(self.regexs.energy_total, self.parser.get_file_handle("output")))
        # return result
Lauri Himanen's avatar
Lauri Himanen committed
292

293
    def _Q_particle_forces(self):
294
295
296
        """Return the forces that are controlled by
        "FORCE_EVAL/PRINT/FORCES/FILENAME". These forces are typicalle printed
        out during optimization or single point calculation.
297

298
        Supports forces printed in the output file or in a single XYZ file.
Lauri Himanen's avatar
Lauri Himanen committed
299
        """
300
        result = Result()
301
        result.unit = "force_au"
Lauri Himanen's avatar
Lauri Himanen committed
302
303
304
305

        # Determine if a separate force file is used or are the forces printed
        # in the output file.
        separate_file = True
306
        filename = self.input_tree.get_keyword("FORCE_EVAL/PRINT/FORCES/FILENAME")
Lauri Himanen's avatar
Lauri Himanen committed
307
308
309
310
311
        if not filename or filename == "__STD_OUT__":
            separate_file = False

        # Look for the forces either in output or in separate file
        if not separate_file:
312
            logger.debug("Looking for forces in output file.")
Lauri Himanen's avatar
Lauri Himanen committed
313
            forces = self.regexengine.parse(self.regexs.particle_forces, self.parser.get_file_handle("output"))
314
            if forces is None:
315
316
317
318
                msg = "No force configurations were found when searching the output file."
                logger.warning(msg)
                result.error_message = msg
                result.code = ResultCode.fail
319
                return result
320
321
322
323
324

            # Insert force configuration into the array
            i_conf = 0
            force_array = None
            for force_conf in forces:
325
326
                iterator = self.csvengine.iread(force_conf, columns=(-3, -2, -1), comments=("#", "ATOMIC", "SUM"), separator=None)
                i_force_array = iterator.next()
327
328
329
330
331

                # Initialize the numpy array if not done yet
                n_particles = i_force_array.shape[0]
                n_dim = i_force_array.shape[1]
                n_confs = len(forces)
332
                force_array = np.empty((n_confs, n_particles, n_dim))
333

334
                force_array[i_conf, :, :] = i_force_array
335
336
                i_conf += 1

337
            result.value_iterable = force_array
338
            return result
Lauri Himanen's avatar
Lauri Himanen committed
339
        else:
Lauri Himanen's avatar
Lauri Himanen committed
340
            logger.debug("Looking for forces in separate force file.")
341
            iterator = self.csvengine.iread(self.parser.get_file_handle("forces"), columns=(-3, -2, -1), comments=("#", "SUM"), separator=r"\ ATOMIC FORCES in \[a\.u\.\]")
342
343
            result.value_iterable = iterator
            return result
344

345
346
    def get_initial_atom_positions_and_unit(self):
        """Returns the starting configuration of the atoms in the system.
347
        """
348
        unit = "angstrom"
349
350
351

        # Check where the coordinates are specified
        coord_format = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/TOPOLOGY/COORD_FILE_FORMAT")
352
353
        if not coord_format:
            coord_format = self.input_tree.get_keyword_default("FORCE_EVAL/SUBSYS/TOPOLOGY/COORD_FILE_FORMAT")
354
355
356
357
358

        # See if the coordinates are provided in the input file
        if coord_format == "OFF":
            logger.debug("Using coordinates from the input file.")
            coords = self.input_tree.get_default_keyword("FORCE_EVAL/SUBSYS/COORD")
359
360
361
362
363
364
365
366
            coords = coords.strip().split('\n')
            positions = []
            for line in coords:
                components = [float(x) for x in line.split()[1:]]
                positions.append(components)
            positions = np.array(positions)
            return positions, unit

367
        elif coord_format in ["CP2K", "G96", "XTL", "CRD"]:
368
            msg = "Tried to read the number of atoms from the initial configuration, but the parser does not yet support the '{}' format that is used by file '{}'.".format(coord_format, self.parser.file_ids["initial_coordinates"])
369
            logger.warning(msg)
370
371
372
373
        else:
            # External file, use AtomsEngine
            init_coord_file = self.parser.get_file_handle("initial_coordinates")
            if coord_format == "XYZ":
374
                iter_pos = self.atomsengine.iread(init_coord_file, format="xyz")
375
            if coord_format == "CIF":
376
                iter_pos = self.atomsengine.iread(init_coord_file, format="cif")
377
            if coord_format == "PDB":
378
379
380
381
382
383
384
385
386
387
388
389
390
                iter_pos = self.atomsengine.iread(init_coord_file, format="pdb")
            return next(iter_pos), unit

        # # Check if the unit cell is multiplied programmatically
        # multiples = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/TOPOLOGY/MULTIPLE_UNIT_CELL")
        # if not multiples:
            # multiples = self.input_tree.get_keyword_default("FORCE_EVAL/SUBSYS/TOPOLOGY/MULTIPLE_UNIT_CELL")
        # factors = [int(x) for x in multiples.split()]
        # factor = np.prod(np.array(factors))

    def get_atom_positions_and_unit(self):
        """Returns the atom positions and unit that were calculated during the
        simulation.
391
        """
392
        # Determine the unit
393
394
        unit_path = "MOTION/PRINT/TRAJECTORY/UNIT"
        unit = self.input_tree.get_keyword(unit_path)
395
396
        # unit = unit.lower()
        unit = CP2KInput.decode_cp2k_unit(unit)
397

398
        # Read the trajectory
399
400
401
402
403
        traj_file = self.get_file_handle("trajectory", show_warning=False)
        if not traj_file:
            logger.debug("No trajectory file detected.")
            return None, None

404
        input_file_format = self.input_tree.get_keyword("MOTION/PRINT/TRAJECTORY/FORMAT")
405
406
        file_format = {
            "XYZ": "xyz",
407
408
409
410
411
412
413
414
415
416
            "XMOL": "xyz",
            "PDB": "pdb-cp2k",
            "ATOMIC": "atomic",
        }.get(input_file_format)

        if file_format is None:
            logger.error("Unsupported trajectory file format '{}'.".format(input_file_format))

        # Use a custom implementation for the CP2K specific weird formats
        if file_format == "pdb-cp2k":
417
            traj_iter = self.csvengine.iread(traj_file, columns=[3, 4, 5], comments=["TITLE", "AUTHOR", "REMARK", "CRYST"], separator="END")
418
        elif file_format == "atomic":
419
            n_atoms = self.get_result_object("particle_number").value
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435

            def atomic_generator():
                conf = []
                i = 0
                for line in traj_file:
                    line = line.strip()
                    components = np.array([float(x) for x in line.split()])
                    conf.append(components)
                    i += 1
                    if i == n_atoms:
                        yield np.array(conf)
                        conf = []
                        i = 0
            traj_iter = atomic_generator()
        else:
            traj_iter = self.atomsengine.iread(traj_file, format=file_format)
436

437
438
        # Return the iterator and unit
        return (traj_iter, unit)
439

440
    def get_cell(self):
441
442
443
        """The cell size can be static or dynamic if e.g. doing NPT. If the
        cell size changes, outputs an Nx3x3 array where N is typically the
        number of timesteps.
444
        """
445
446
447
448
449
450
451
452
453
454
455
456
457

        def cell_generator(cell_file):
            for line in cell_file:
                line = line.strip()
                if line.startswith("#"):
                    continue
                split = line.split()
                A = [float(x) for x in split[2:5]]
                B = [float(x) for x in split[5:8]]
                C = [float(x) for x in split[8:11]]
                result = np.array([A, B, C])*factor
                yield result

458
        # Determine if the cell is printed during simulation steps
459
460
461
462
463
464
465
466
467
468
469
470
        cell_output_file = self.parser.get_file_handle("cell_output")
        A = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/A")
        B = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/B")
        C = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/C")
        ABC = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/ABC")
        abg = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/ALPHA_BETA_GAMMA")
        cell_input_file = self.parser.get_file_handle("cell_input")

        # Multiplication factor
        multiples = self.input_tree.get_keyword("FORCE_EVAL/SUBSYS/CELL/MULTIPLE_UNIT_CELL")
        factors = [int(x) for x in multiples.split()]
        factor = np.prod(np.array(factors))
471

472
473
474
475
476
        # Separate file from e.g. NPT
        if cell_output_file:
            logger.debug("Cell output file found.")
            result.unit = "angstrom"
            result.value_iterable = cell_generator(cell_output_file)
477
            return result
478

479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
        # Cartesian cell vectors
        elif A and B and C:
            logger.debug("Cartesian cell vectors found.")
            # Cell given as three vectors
            A_unit = self.input_tree.get_unit("FORCE_EVAL/SUBSYS/CELL/A")
            B_unit = self.input_tree.get_unit("FORCE_EVAL/SUBSYS/CELL/B")
            C_unit = self.input_tree.get_unit("FORCE_EVAL/SUBSYS/CELL/C")

            A = np.array([float(x) for x in A.split()])
            B = np.array([float(x) for x in B.split()])
            C = np.array([float(x) for x in C.split()])

            # Convert already here as the different vectors may have different units
            A = convert_unit(A, A_unit)
            B = convert_unit(B, B_unit)
            C = convert_unit(C, C_unit)

            cell = np.empty((3, 3))
            cell[0, :] = A
            cell[1, :] = B
            cell[2, :] = C
            result.value = cell*factor
            return result

        # Cell vector magnitudes and angles
        elif ABC and abg:
            logger.debug("Cell vectors defined with angles and magnitudes found.")
506
            # Cell given as three vectors
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
            ABC_unit = self.input_tree.get_unit("FORCE_EVAL/SUBSYS/CELL/ABC")
            abg_unit = self.input_tree.get_unit("FORCE_EVAL/SUBSYS/CELL/ALPHA_BETA_GAMMA")

            angles = np.array([float(x) for x in abg.split()])
            magnitudes = np.array([float(x) for x in ABC.split()])
            a = magnitudes[0]
            b = magnitudes[1]
            c = magnitudes[2]

            # Convert angles to radians
            angles = (angles*ureg(abg_unit)).to(ureg.radian).magnitude
            alpha = angles[0]
            beta = angles[1]
            gamma = angles[2]

            A = np.array((a, 0, 0))
            B = np.array((b*math.cos(gamma), b*math.sin(gamma), 0))
            b_x = B[0]
            b_y = B[1]
            c_x = c*math.cos(beta)
            c_y = 1.0/b_y*(b*c*math.cos(alpha) - b_x*c_x)
            c_z = math.sqrt(c**2 - c_x**2 - c_y**2)
            C = np.array((c_x, c_y, c_z))

            cell = np.zeros((3, 3))
            cell[0, :] = A
            cell[1, :] = B
            cell[2, :] = C
            result.value = cell*factor
            result.unit = ABC_unit
            return result

        # Separate cell input file
        elif cell_input_file:
            logger.debug("Separate cell input file found.")
            filename = cell_input_file.name
            if filename.endswith(".cell"):
                logger.debug("CP2K specific cell input file format found.")
                result.value = cell_generator(cell_input_file).next()
                result.unit = "angstrom"
                return result
            else:
                logger.error("The XSC cell file format is not yet supported.")

        # No cell found
        else:
            logger.error("Could not find cell declaration.")
554

555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
    def get_functionals(self):
        """Used to search the input file for a functional definition
        """
        # First try to look at the shortcut
        xc_shortcut = self.input_tree.get_parameter("FORCE_EVAL/DFT/XC/XC_FUNCTIONAL")
        if xc_shortcut is not None and xc_shortcut != "NONE" and xc_shortcut != "NO_SHORTCUT":
            logger.debug("Shortcut defined for XC_FUNCTIONAL")

            # If PBE, check version
            if xc_shortcut == "PBE":
                pbe_version = self.input_tree.get_keyword("FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/PBE/PARAMETRIZATION")
                result.value = {
                        'ORIG': "GGA_X_PBE",
                        'PBESOL': "GGA_X_PBE_SOL",
                        'REVPBE': "GGA_X_PBE_R",
                }.get(pbe_version, "GGA_X_PBE")
                return result

            result.value = {
                    'B3LYP': "HYB_GGA_XC_B3LYP",
                    'BEEFVDW': None,
                    'BLYP': "GGA_C_LYP_GGA_X_B88",
                    'BP': None,
                    'HCTH120': None,
                    'OLYP': None,
                    'LDA': "LDA_XC_TETER93",
                    'PADE': "LDA_XC_TETER93",
                    'PBE0': None,
                    'TPSS': None,
            }.get(xc_shortcut, None)
            return result
        else:
            logger.debug("No shortcut defined for XC_FUNCTIONAL. Looking into subsections.")

        # Look at the subsections and determine what part have been activated

        # Becke88
        xc_components = []
        becke_88 = self.input_tree.get_parameter("FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/BECKE88")
        if becke_88 == "TRUE":
            xc_components.append("GGA_X_B88")

        # Becke 97
        becke_97 = self.input_tree.get_parameter("FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/BECKE97")
        if becke_97 == "TRUE":
            becke_97_param = self.input_tree.get_keyword("FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/BECKE97/PARAMETRIZATION")
            becke_97_result = {
                    'B97GRIMME': None,
                    'B97_GRIMME': None,
                    'ORIG': "GGA_XC_B97",
                    'WB97X-V': None,
            }.get(becke_97_param, None)
            if becke_97_result is not None:
                xc_components.append(becke_97_result)

        # Return an alphabetically sorted and joined list of the xc components
        result.value = "_".join(sorted(xc_components))
        return result
Lauri Himanen's avatar
Lauri Himanen committed
613

Lauri Himanen's avatar
Lauri Himanen committed
614
615
616
617
# #===============================================================================
# class CP2K_262_Implementation(CP2KImplementation):
    # def __init__(self, parser):
        # CP2KImplementation.__init__(self, parser)