Commit 7e702fab authored by Lauri Himanen's avatar Lauri Himanen
Browse files

Fixed bugs in the atomic kind labels, added some new basis set information.

parent dd17d5a3
......@@ -3,7 +3,10 @@ from builtins import object
import numpy as np
import logging
from collections import defaultdict
logger = logging.getLogger("nomad")
metainfo_section_prefix = "x_cp2k_section_input_"
metainfo_data_prefix = "x_cp2k_input_"
#===============================================================================
......
......@@ -17,8 +17,8 @@ class CP2KParser(ParserInterface):
After the implementation has been setup, you can parse the files with
parse().
"""
def __init__(self, main_file, metainfo_to_keep=None, backend=None, default_units=None, metainfo_units=None, debug=True, store=True):
super(CP2KParser, self).__init__(main_file, metainfo_to_keep, backend, default_units, metainfo_units, debug, store)
def __init__(self, main_file, metainfo_to_keep=None, backend=None, default_units=None, metainfo_units=None, debug=False, log_level=logging.ERROR, store=True):
super(CP2KParser, self).__init__(main_file, metainfo_to_keep, backend, default_units, metainfo_units, debug, log_level, store)
def setup_version(self):
"""Setups the version by looking at the output file and the version
......
......@@ -20,7 +20,7 @@ import xml.etree.cElementTree as ET
import logging
import json
import pickle
from cp2kparser.generic.inputparsing import *
from cp2kparser.generic.inputparsing import Section, Keyword, DefaultKeyword, SectionParameters, CP2KInput, metainfo_data_prefix, metainfo_section_prefix
logger = logging
......@@ -201,15 +201,25 @@ def recursive_tree_generation(xml_element, for_metainfo=False, name_stack=[], ig
#===============================================================================
def generate_input_metainfos(object_tree):
json_root = {
"type": "nomad_meta_info_1_0",
"description": "Metainfo for the values parsed from a CP2K input file.",
"dependencies": [ {
"relativePath": "cp2k.general.nomadmetainfo.json"
}],
}
parent = Section("dummy")
root_section = object_tree.root_section
root_section.name = "CP2K_INPUT"
root_section.name = None
root_section.description = "This section contains the explicitly stated keywords, default keywords, and section parameters in the CP2K input file. Only some of the sections that control printing (PRINT, EACH) are supported, because including all of them would double the size of this metadata without adding much useful information. The hidden input keywords starting with a double underscore are not included."
container = []
name_stack = []
generate_metainfo_recursively(root_section, parent, container, name_stack)
json_root["metaInfos"] = container
with open("input_metainfo.json", "w") as f:
f.write(json.dumps(container, indent=2, separators=(',', ': ')))
f.write(json.dumps(json_root, indent=2, separators=(',', ': ')))
#===============================================================================
......@@ -217,7 +227,8 @@ def generate_metainfo_recursively(obj, parent, container, name_stack):
json = None
if isinstance(obj, Section):
name_stack.append(obj.name)
if obj.name is not None:
name_stack.append(obj.name)
json = generate_section_metainfo_json(obj, parent, name_stack)
for child in obj.sections.values():
generate_metainfo_recursively(child[0], obj, container, name_stack)
......@@ -227,7 +238,8 @@ def generate_metainfo_recursively(obj, parent, container, name_stack):
generate_metainfo_recursively(obj.section_parameter, obj, container, name_stack)
if obj.default_keyword is not None:
generate_metainfo_recursively(obj.default_keyword, obj, container, name_stack)
name_stack.pop()
if obj.name is not None:
name_stack.pop()
else:
json = generate_input_object_metainfo_json(obj, parent, name_stack)
container.append(json)
......@@ -236,9 +248,11 @@ def generate_metainfo_recursively(obj, parent, container, name_stack):
#===============================================================================
def generate_input_object_metainfo_json(child, parent, name_stack):
path = ".".join(name_stack)
# if path.startswith("."):
# path = path[1:]
json_obj = {}
json_obj["name"] = "x_cp2k_{}.{}".format(path, child.name)
json_obj["superNames"] = ["x_cp2k_section_{}".format(path)]
json_obj["name"] = metainfo_data_prefix + "{}.{}".format(path, child.name)
json_obj["superNames"] = [metainfo_section_prefix + "{}".format(path)]
# Description
description = child.description
......@@ -271,13 +285,20 @@ def generate_input_object_metainfo_json(child, parent, name_stack):
#===============================================================================
def generate_section_metainfo_json(child, parent, name_stack):
name = ".".join(name_stack)
path = ".".join(name_stack[:-1])
json_obj = {}
if child.name is None:
json_obj["name"] = "x_cp2k_section_input"
json_obj["superNames"] = ["section_run"]
else:
name = ".".join(name_stack)
json_obj["name"] = metainfo_section_prefix + "{}".format(name)
if parent.name is not None:
json_obj["superNames"] = [metainfo_section_prefix + "{}".format(path)]
else:
json_obj["superNames"] = ["x_cp2k_section_input"]
json_obj["name"] = "x_cp2k_section_{}".format(name)
json_obj["kindStr"] = "type_section"
json_obj["superNames"] = ["x_cp2k_section_{}".format(path)]
description = child.description
if description is None or description.isspace():
......
......@@ -8,6 +8,7 @@ from nomadcore.caching_backend import CachingLevel
from nomadcore.unit_conversion.unit_conversion import convert_unit
from nomadcore.baseclasses import CommonParser
from .inputparser import CP2KInputParser
from collections import defaultdict
logger = logging.getLogger("nomad")
......@@ -24,6 +25,8 @@ class CP2KCommonParser(CommonParser):
self.section_system_index = None
self.test_electronic_structure_method = "DFT"
self.basis_to_kind_mapping = []
self.atom_kind_info = defaultdict(dict) # Map from kind number to kind information
self.basis_set_info = defaultdict(dict) # Map from kind number to basis set information
#=======================================================================
# Cache levels
......@@ -44,6 +47,13 @@ class CP2KCommonParser(CommonParser):
self.cache_service.add("atom_positions", single=False, update=True)
self.cache_service.add("atom_labels", single=False, update=False)
self.cache_service.add("number_of_atoms", single=False, update=False)
self.cache_service.add("basis_set_kind", single=False, update=False)
self.cache_service.add("basis_set_name", single=False, update=False)
self.cache_service.add("basis_set_planewave_cutoff", update=False)
self.cache_service.add("mapping_section_basis_set_cell_dependent", single=False, update=False)
self.cache_service.add("map_kind_to_basis", single=False, update=False)
self.cache_service.add("map_index_to_kind", single=False, update=False)
self.cache_service.add("map_kind_number_to_basis_ref", single=False, update=False)
#===========================================================================
# SimpleMatchers
......@@ -78,7 +88,7 @@ class CP2KCommonParser(CommonParser):
SM( " ***** ** ** ** ** PROGRAM PROCESS ID\s+(?P<x_cp2k_start_id>{})".replace("*", "\*").format(self.regexs.regex_i)),
SM( " **** ** ******* ** PROGRAM STARTED IN".replace("*", "\*"),
forwardMatch=True,
adHoc=self.adHoc_run_dir(),
adHoc=self.adHoc_run_dir("x_cp2k_start_path"),
)
]
),
......@@ -133,6 +143,28 @@ class CP2KCommonParser(CommonParser):
]
)
# SimpleMatcher for the footer that is common to all run types
def footer(self):
return SM( " - DBCSR STATISTICS -",
forwardMatch=True,
subMatchers=[
SM( re.escape(" **** **** ****** ** PROGRAM ENDED AT"),
forwardMatch=True,
sections=['x_cp2k_section_end_information'],
subMatchers=[
SM( " **** **** ****** ** PROGRAM ENDED AT\s+(?P<x_cp2k_end_time>{})".replace("*", "\*").format(self.regexs.regex_eol)),
SM( " ***** ** *** *** ** PROGRAM RAN ON\s+(?P<x_cp2k_end_host>{})".replace("*", "\*").format(self.regexs.regex_word)),
SM( " ** **** ****** PROGRAM RAN BY\s+(?P<x_cp2k_end_user>{})".replace("*", "\*").format(self.regexs.regex_word)),
SM( " ***** ** ** ** ** PROGRAM PROCESS ID\s+(?P<x_cp2k_end_id>{})".replace("*", "\*").format(self.regexs.regex_i)),
SM( " **** ** ******* ** PROGRAM STOPPED IN".replace("*", "\*"),
forwardMatch=True,
adHoc=self.adHoc_run_dir("x_cp2k_end_path"),
)
]
),
]
)
# SimpleMatcher for an SCF wavefunction optimization
def quickstep_calculation(self):
return SM( " SCF WAVEFUNCTION OPTIMIZATION",
......@@ -228,9 +260,9 @@ class CP2KCommonParser(CommonParser):
],
),
SM( " ATOMIC KIND INFORMATION",
sections=["x_cp2k_section_atomic_kinds", "section_method_basis_set"],
sections=["x_cp2k_section_atomic_kinds"],
subMatchers=[
SM( "\s+(?P<x_cp2k_kind_number>{0})\. Atomic kind: (?P<x_cp2k_kind_element_symbol>{1})\s+Number of atoms:\s+(?P<x_cp2k_kind_number_of_atoms>{1})".format(self.regexs.regex_i, self.regexs.regex_word),
SM( "\s+(?P<x_cp2k_kind_number>{0})\. Atomic kind: (?P<x_cp2k_kind_label>{1})\s+Number of atoms:\s+(?P<x_cp2k_kind_number_of_atoms>{1})".format(self.regexs.regex_i, self.regexs.regex_word),
repeats=True,
sections=["x_cp2k_section_atomic_kind", "x_cp2k_section_kind_basis_set"],
subMatchers=[
......@@ -344,38 +376,86 @@ class CP2KCommonParser(CommonParser):
backend.addValue("program_basis_set_type", "gaussian")
backend.addValue("electronic_structure_method", self.test_electronic_structure_method)
# See if the cutoff is available
cutoff = section.get_latest_value("x_cp2k_planewave_cutoff")
if cutoff is not None:
# Collect the atomic kind information and push it to backend
kind_ids = {}
for kind_number, info in self.atom_kind_info.items():
kindID = backend.openSection("section_method_atom_kind")
kind_ids[kind_number] = kindID
label = info["label"]
atom_number = info.get("element_number")
if atom_number is not None:
backend.addValue("method_atom_kind_atom_number", atom_number)
backend.addValue("method_atom_kind_label", label)
backend.closeSection("section_method_atom_kind", kindID)
# Cell dependent basis information
ryd_cutoff = section.get_latest_value("x_cp2k_planewave_cutoff")
if ryd_cutoff is not None:
gid = backend.openSection("section_basis_set_cell_dependent")
cutoff = convert_unit(2*cutoff, "hartree")
backend.addValue("basis_set_planewave_cutoff", cutoff)
self.cache_service["mapping_section_basis_set_cell_dependent"] = gid
ha_cutoff = convert_unit(2*ryd_cutoff, "hartree")
backend.addValue("basis_set_planewave_cutoff", ha_cutoff)
self.cache_service["basis_set_planewave_cutoff"] = ryd_cutoff
backend.closeSection("section_basis_set_cell_dependent", gid)
def onClose_section_method_basis_set(self, backend, gIndex, section):
# Atom centered basis set information
basis_ids = {}
map_kind_number_to_basis_ref = {}
for kind_number, info in self.basis_set_info.items():
basis_section_id = backend.openSection("section_basis_set_atom_centered")
basis_ids[kind_number] = basis_section_id
map_kind_number_to_basis_ref[kind_number] = basis_section_id
name = info["name"]
atom_number = info.get("element_number")
if atom_number is not None:
backend.addValue("basis_set_atom_number", atom_number)
backend.addValue("basis_set_atom_centered_short_name", name)
backend.closeSection("section_basis_set_atom_centered", basis_section_id)
self.cache_service["map_kind_number_to_basis_ref"] = map_kind_number_to_basis_ref
# Add the basis infomation to section_method
mapping = []
dict_map = {}
for kind_number, basis_id in basis_ids.items():
kind_id = kind_ids[kind_number]
mapping.append((basis_id, kind_id))
dict_map[kind_id] = basis_id
method_basis_id = backend.openSection("section_method_basis_set")
if mapping:
mapping = np.array(mapping)
self.cache_service["map_kind_to_basis"] = dict_map
backend.addArrayValues("mapping_section_method_basis_set_atom_centered", np.array(mapping))
backend.addValue("method_basis_set_kind", "wavefunction")
backend.addValue("number_of_basis_sets_atom_centered", len(self.basis_to_kind_mapping))
backend.addArrayValues("mapping_section_method_basis_set_atom_centered", np.array(self.basis_to_kind_mapping))
self.cache_service.push_value("mapping_section_method_basis_set_cell_associated")
backend.addValue("number_of_basis_sets_atom_centered", len(self.basis_set_info))
backend.closeSection("section_method_basis_set", method_basis_id)
def onClose_x_cp2k_section_atomic_kind(self, backend, gIndex, section):
kindID = backend.openSection("section_method_atom_kind")
basisID = backend.openSection("section_basis_set_atom_centered")
# basisID = backend.openSection("section_basis_set_atom_centered")
element_symbol = section.get_latest_value("x_cp2k_kind_element_symbol")
kind_number = section.get_latest_value("x_cp2k_kind_number")
basis_set_name = section.get_latest_value(["x_cp2k_section_kind_basis_set", "x_cp2k_kind_basis_set_name"])
atom_number = self.get_atomic_number(element_symbol)
kind_label = element_symbol + str(kind_number)
backend.addValue("method_atom_kind_atom_number", atom_number)
backend.addValue("method_atom_kind_label", kind_label)
backend.addValue("basis_set_atom_number", atom_number)
backend.addValue("basis_set_atom_centered_short_name", basis_set_name)
# Save the kind labels. These wil be connected to atomic numbers later
# on when the atomic numbers are listed in the atomic positions.
kind_number = int(section.get_latest_value("x_cp2k_kind_number"))
kind_label = section.get_latest_value("x_cp2k_kind_label")
self.atom_kind_info[kind_number]["label"] = kind_label
# Add the reference based mapping between basis and atomic kind
self.basis_to_kind_mapping.append([basisID, kindID])
backend.closeSection("section_basis_set_atom_centered", basisID)
backend.closeSection("section_method_atom_kind", kindID)
# Save all the basis set information for later use. They will be pushed
# later when an atom number can be associated with the basis.
basis_set_name = section.get_latest_value(["x_cp2k_section_kind_basis_set", "x_cp2k_kind_basis_set_name"])
basis_info = self.basis_set_info[kind_number]
basis_info["name"] = basis_set_name
def onClose_x_cp2k_section_atomic_kinds(self, backend, gIndex, section):
# Store the name and kind of the basis set for later use (stored inside
# single_configuration_calculation).
atomic_kinds = section["x_cp2k_section_atomic_kind"]
long_basis_name = []
for kind in atomic_kinds:
kind_basis = kind["x_cp2k_section_kind_basis_set"][0]
basis_name = kind_basis["x_cp2k_kind_basis_set_name"][0]
long_basis_name.append(basis_name)
self.cache_service["basis_set_kind"] = "wavefunction"
self.cache_service["basis_set_name"] = "_".join(long_basis_name)
def onClose_x_cp2k_section_program_information(self, backend, gIndex, section):
input_file = section.get_latest_value("x_cp2k_input_filename")
......@@ -405,6 +485,40 @@ class CP2KCommonParser(CommonParser):
backend.addValue('single_configuration_to_calculation_method_ref', self.section_method_index)
backend.addValue('single_configuration_calculation_to_system_ref', self.section_system_index)
scc_basis_id = backend.openSection("section_basis_set")
# Basis kind
self.cache_service.push_value("basis_set_kind")
# Basis name
basis_name = self.cache_service["basis_set_name"]
if basis_name is not None:
cutoff = self.cache_service["basis_set_planewave_cutoff"]
if cutoff is not None:
basis_name += "_PW_{}".format(cutoff)
backend.addValue("basis_set_name", basis_name)
# Gaussian mapping
map_index_to_basis = []
map_kind_number_to_basis_ref = self.cache_service["map_kind_number_to_basis_ref"]
map_index_to_kind = self.cache_service["map_index_to_kind"]
# print(map_kind_number_to_basis_ref)
# print(map_index_to_kind)
if map_index_to_kind is not None and map_kind_number_to_basis_ref is not None:
indices = map_index_to_kind.keys()
for index in sorted(indices):
kind = map_index_to_kind[index]
basis_ref = map_kind_number_to_basis_ref[kind]
map_index_to_basis.append(basis_ref)
map_index_to_basis = np.array(map_index_to_basis)
backend.addArrayValues("mapping_section_basis_set_atom_centered", map_index_to_basis)
# Cell dependent basis mapping
self.cache_service.push_value("mapping_section_basis_set_cell_dependent")
backend.closeSection("section_basis_set", scc_basis_id)
#===========================================================================
# adHoc functions
def adHoc_x_cp2k_section_cell(self):
......@@ -522,7 +636,7 @@ class CP2KCommonParser(CommonParser):
def wrapper(parser):
# Define the regex that extracts the information
regex_string = r"\s+\d+\s+(\d+)\s+(\w+)\s+\d+\s+({0})\s+({0})\s+({0})".format(self.regexs.regex_f)
regex_string = r"\s+(\d+)\s+(\d+)\s+(\w+)\s+(\d+)\s+({0})\s+({0})\s+({0})".format(self.regexs.regex_f)
regex_compiled = re.compile(regex_string)
match = True
......@@ -533,6 +647,7 @@ class CP2KCommonParser(CommonParser):
parser.fIn.readline()
parser.fIn.readline()
parser.fIn.readline()
map_index_to_kind = {}
while match:
line = parser.fIn.readline()
......@@ -540,14 +655,22 @@ class CP2KCommonParser(CommonParser):
if result:
match = True
label = result.groups()[1] + result.groups()[0]
kind_number = int(result.groups()[1])
element_number = int(result.groups()[3])
index = int(result.groups()[0])
map_index_to_kind[index] = kind_number
info = self.atom_kind_info[kind_number]
label = info["label"]
info["element_number"] = element_number
self.basis_set_info[kind_number]["element_number"] = element_number
labels.append(label)
coordinate = [float(x) for x in result.groups()[2:]]
coordinate = [float(x) for x in result.groups()[4:]]
coordinates.append(coordinate)
else:
match = False
coordinates = np.array(coordinates)
labels = np.array(labels)
self.cache_service["map_index_to_kind"] = map_index_to_kind
# If anything found, push the results to the correct section
if len(coordinates) != 0:
......@@ -556,16 +679,16 @@ class CP2KCommonParser(CommonParser):
return wrapper
def adHoc_run_dir(self):
def adHoc_run_dir(self, metaname):
def wrapper(parser):
end_str = "\n"
end = False
path_array = []
# Loop through coordinates until the sum of forces is read
# Loop through lines until empty line is encountered
while not end:
line = parser.fIn.readline()
if line.startswith(end_str):
if line == end_str or len(line) == 0:
end = True
else:
path_part = line.split()[-1]
......@@ -573,7 +696,7 @@ class CP2KCommonParser(CommonParser):
# Form the final path and push to backend
path = "".join(path_array)
parser.backend.addValue("x_cp2k_start_path", path)
parser.backend.addValue(metaname, path)
return wrapper
......
......@@ -174,7 +174,8 @@ class CP2KGeoOptParser(MainHierarchicalParser):
self.cm.quickstep_header(),
],
),
self.geo_opt
self.geo_opt,
self.cm.footer(),
]
)
......
......@@ -7,7 +7,7 @@ import logging
import pickle
import numpy as np
from nomadcore.baseclasses import BasicParser
from cp2kparser.generic.inputparsing import *
from cp2kparser.generic.inputparsing import metainfo_data_prefix, metainfo_section_prefix
logger = logging.getLogger("nomad")
......@@ -409,9 +409,13 @@ class CP2KInputParser(BasicParser):
if not section.accessed:
return
name_stack.append(section.name)
path = "x_cp2k_section_{}".format(".".join(name_stack))
not_section_path = "x_cp2k_{}".format(".".join(name_stack))
if section.name == "CP2K_INPUT":
path = "x_cp2k_section_input"
else:
name_stack.append(section.name)
path = metainfo_section_prefix + "{}".format(".".join(name_stack))
not_section_path = metainfo_data_prefix + "{}".format(".".join(name_stack))
gid = self.backend.openSection(path)
......@@ -444,14 +448,15 @@ class CP2KInputParser(BasicParser):
self.backend.closeSection(path, gid)
name_stack.pop()
if section.name != "CP2K_INPUT":
name_stack.pop()
def setup_version(self, version_number):
""" The pickle file which contains preparsed data from the
x_cp2k_input.xml is version specific. By calling this function before
parsing the correct file can be found.
"""
pickle_path = os.path.dirname(__file__) + "/input_data/cp2k_input_tree.pickle".format(version_number)
pickle_path = os.path.dirname(__file__) + "/input_data/cp2k_input_tree.pickle"
input_tree_pickle_file = open(pickle_path, 'rb')
self.input_tree = pickle.load(input_tree_pickle_file)
......
......@@ -141,6 +141,7 @@ class CP2KMDParser(MainHierarchicalParser):
],
),
self.md,
self.cm.footer(),
]
)
......
......@@ -40,6 +40,7 @@ class CP2KSinglePointParser(MainHierarchicalParser):
self.cm.header(),
self.cm.quickstep_header(),
self.cm.quickstep_calculation(),
self.cm.footer(),
]
)
......
NONBONDED NEIGHBOR LISTS IN angstrom (PROCESS 0)
Atom-A X Y Z Atom-B X Y Z Cell(i,j,k) Distance ONFO VDW-scale EI-scale
7 1.357674 -1.357674 -1.357674 1 0.000000 0.000000 0.000000 0 0 0 2.3516
5 -1.357674 1.357674 -1.357674 1 0.000000 0.000000 0.000000 0 0 0 2.3516
8 -1.357674 -1.357674 1.357674 1 0.000000 0.000000 0.000000 0 0 0 2.3516
6 1.357674 1.357674 1.357674 1 0.000000 0.000000 0.000000 0 0 0 2.3516
6 1.357674 1.357674 1.357674 2 0.000000 2.715349 2.715349 0 0 0 2.3516
6 1.357674 1.357674 1.357674 3 2.715349 2.715349 0.000000 0 0 0 2.3516
6 1.357674 1.357674 1.357674 4 2.715349 0.000000 2.715349 0 0 0 2.3516
7 1.357674 -1.357674 -1.357674 3 2.715349 2.715349 0.000000 0 -1 0 2.3516
5 -1.357674 1.357674 -1.357674 3 2.715349 2.715349 0.000000 -1 0 0 2.3516
8 -1.357674 -1.357674 1.357674 4 2.715349 0.000000 2.715349 -1 0 0 2.3516
2 0.000000 2.715349 2.715349 8 -1.357674 -1.357674 1.357674 0 1 0 2.3516
3 2.715349 2.715349 0.000000 8 -1.357674 -1.357674 1.357674 1 1 0 2.3516
4 2.715349 0.000000 2.715349 7 1.357674 -1.357674 -1.357674 0 0 1 2.3516
2 0.000000 2.715349 2.715349 5 -1.357674 1.357674 -1.357674 0 0 1 2.3516
4 2.715349 0.000000 2.715349 5 -1.357674 1.357674 -1.357674 1 0 1 2.3516
2 0.000000 2.715349 2.715349 7 1.357674 -1.357674 -1.357674 0 1 1 2.3516
Total number of neighbor interactions for process 0: 16
&GLOBAL
PROJECT Si_bulk8
RUN_TYPE ENERGY_FORCE
PRINT_LEVEL HIGH
&END GLOBAL
&FORCE_EVAL
METHOD Quickstep
STRESS_TENSOR ANALYTICAL
&SUBSYS
&KIND SiA
ELEMENT Si
BASIS_SET DZVP-GTH-PADE
POTENTIAL GTH-PADE-q4
&END KIND
&KIND SiB
ELEMENT Si
BASIS_SET SZV-GTH-PADE
POTENTIAL GTH-PADE-q4
&END KIND
&CELL
A 5.430697500 0.000000000 0.000000000
B 0.000000000 5.430697500 0.000000000
C 0.000000000 0.000000000 5.430697500
&END CELL
&COORD
SiA 0.000000000 0.000000000 0.000000000
SiA 0.000000000 2.715348700 2.715348700
SiA 2.715348700 2.715348700 0.000000000
SiA 2.715348700 0.000000000 2.715348700
SiB 4.073023100 1.357674400 4.073023100
SiB 1.357674400 1.357674400 1.357674400
SiB 1.357674400 4.073023100 4.073023100
SiB 4.073023100 4.073023100 1.357674400
&END COORD
&END SUBSYS
&DFT
BASIS_SET_FILE_NAME ../BASIS_SET
POTENTIAL_FILE_NAME ../GTH_POTENTIALS
&QS
EPS_DEFAULT 1.0E-10
&END QS
&MGRID
NGRIDS 4
CUTOFF 300
REL_CUTOFF 60
&END MGRID
&XC
&XC_FUNCTIONAL PADE
&END XC_FUNCTIONAL
&END XC
&SCF
SCF_GUESS ATOMIC
EPS_SCF 1.0E-7
MAX_SCF 300
&DIAGONALIZATION ON
ALGORITHM STANDARD
&END DIAGONALIZATION
&MIXING T
METHOD BROYDEN_MIXING
ALPHA 0.4
NBROYDEN 8
&END MIXING
&END SCF
&END DFT
&PRINT
&FORCES ON
&END FORCES
&END PRINT
&END FORCE_EVAL