diff --git a/dependencies/parsers/vasp b/dependencies/parsers/vasp index 8776a0bc7b32fb51e98ea8fe7af7d5630240edd3..2c8b7763eae8a27e7c1ca3a8ce86533978f42f73 160000 --- a/dependencies/parsers/vasp +++ b/dependencies/parsers/vasp @@ -1 +1 @@ -Subproject commit 8776a0bc7b32fb51e98ea8fe7af7d5630240edd3 +Subproject commit 2c8b7763eae8a27e7c1ca3a8ce86533978f42f73 diff --git a/nomad/cli/parse.py b/nomad/cli/parse.py index cadec1d8dbea1979337a44dd0db4a98b18b6575f..32f8b5ab80f8e5e34ceec99577fb0ce3f2af496e 100644 --- a/nomad/cli/parse.py +++ b/nomad/cli/parse.py @@ -48,8 +48,9 @@ def parse( if isinstance(parser_backend, MSection): backend = Backend(parser._metainfo_env, parser.domain) - root_section = str(datamodel.domains[parser.domain]['root_section']) - setattr(backend.entry_archive, root_section, parser_backend) + root_section = parser_backend.m_def.name + section_def = getattr(datamodel.EntryArchive, root_section) + backend.entry_archive.m_add_sub_section(section_def, parser_backend) backend.resource.add(parser_backend) parser_backend = backend diff --git a/nomad/datamodel/metainfo/public.py b/nomad/datamodel/metainfo/public.py index fda3cf773ba7753971c2007e25c3e1a7bb50e4b3..8bef14b727bfbdb6f2bd03c0a509fff22771d08a 100644 --- a/nomad/datamodel/metainfo/public.py +++ b/nomad/datamodel/metainfo/public.py @@ -3439,6 +3439,11 @@ class section_run(MSection): repeats=True, a_legacy=LegacyDefinition(name='section_system')) + section_workflow = SubSection( + sub_section=SectionProxy('section_workflow'), + repeats=True + ) + class section_sampling_method(MSection): ''' @@ -5575,4 +5580,37 @@ class section_XC_functionals(MSection): a_legacy=LegacyDefinition(name='XC_functional_weight')) +class section_workflow(MSection): + ''' + Section containing the results of a workflow. + ''' + + m_def = Section(validate=False, a_legacy=LegacyDefinition(name='section_workflow')) + + workflow_type = Quantity( + type=str, + shape=[], + description=''' + The type of calculation workflow. Can be one of relaxation, elastic, phonon, + molecular dynamics. + ''', + a_legacy=LegacyDefinition(name='workflow_type')) + + relaxation_energy_tolerance = Quantity( + type=np.dtype(np.float64), + shape=[], + unit='joule', + description=''' + The tolerance value in the energy between relaxation steps for convergence. + ''', + a_legacy=LegacyDefinition(name='relaxation_energy_tolerance')) + + workflow_final_calculation_ref = Quantity( + type=Reference(SectionProxy('section_single_configuration_calculation')), + shape=[], + description=''' + Reference to last calculation step. + ''') + + m_package.__init_metainfo__() diff --git a/nomad/parsing/legacy.py b/nomad/parsing/legacy.py index 2cabcf9389bdb7aaad77cf68f2e56b0be3eb3071..0fdd0cfeab3b1de9cd5d645188f19d45b6a94952 100644 --- a/nomad/parsing/legacy.py +++ b/nomad/parsing/legacy.py @@ -244,11 +244,14 @@ class Backend(AbstractParserBackend): if section_def.extends_base_section: section_def = section_def.base_sections[0] + section = self.__open_sections.get((section_def, -1), None) + if not section: + section = self.open_sections[(section_def, -1)] if isinstance(property_def, Quantity): - return self.__open_sections[(section_def, -1)].m_get(property_def) + return section.m_get(property_def) elif isinstance(property_def, SubSection): - return self.__open_sections[(section_def, -1)].m_get_sub_sections(property_def) + return section.m_get_sub_sections(property_def) def metaInfoEnv(self): if self.__legacy_env is None: @@ -302,6 +305,12 @@ class Backend(AbstractParserBackend): section = self.resolve_context(context_uri) self.__close(section) + @property + def open_sections(self): + for section in self.entry_archive.m_all_contents(): + self.__open(section) + return self.__open_sections + def __open(self, section): if section.m_parent_index != -1: self.__open_sections[(section.m_def, section.m_parent_index)] = section @@ -357,7 +366,9 @@ class Backend(AbstractParserBackend): if section_def.extends_base_section: section_def = section_def.base_sections[0] - section = self.__open_sections[(section_def, g_index)] + section = self.__open_sections.get((section_def, g_index), None) + if not section: + section = self.open_sections[(section_def, g_index)] return section, quantity_def diff --git a/nomad/parsing/parsers.py b/nomad/parsing/parsers.py index c477e0dcbe9d706f6212a4c418f8b25aa91aefce..fa142373b3637313323ad49dbfd402870f61c66d 100644 --- a/nomad/parsing/parsers.py +++ b/nomad/parsing/parsers.py @@ -24,6 +24,7 @@ from .artificial import EmptyParser, GenerateRandomParser, TemplateParser, Chaos from eelsparser import EelsParser from mpesparser import MPESParser from aptfimparser import APTFIMParser +from vaspparser import VASPParser try: # these packages are not available without parsing extra, which is ok, if the @@ -121,18 +122,7 @@ parsers = [ # mainfile_contents_re=r'', # Empty regex since this code calls other DFT codes. mainfile_name_re=(r'.*/phonopy-FHI-aims-displacement-0*1/control.in$') ), - LegacyParser( - name='parsers/vasp', code_name='VASP', code_homepage='https://www.vasp.at/', - parser_class_name='vaspparser.VASPRunParser', - mainfile_mime_re=r'(application/.*)|(text/.*)', - mainfile_contents_re=( - r'^\s*<\?xml version="1\.0" encoding="ISO-8859-1"\?>\s*' - r'?\s*<modeling>' - r'?\s*<generator>' - r'?\s*<i name="program" type="string">\s*vasp\s*</i>' - r'?'), - supported_compressions=['gz', 'bz2', 'xz'] - ), + VASPParser(), VaspOutcarParser( name='parsers/vasp-outcar', code_name='VASP', code_homepage='https://www.vasp.at/', parser_class_name='vaspparser.VaspOutcarParser', diff --git a/nomad/processing/data.py b/nomad/processing/data.py index fc51648500eda6195bf6ef4c009c829596785d00..4bed0ae4eccb1a9d3e0188eb8c5dca03974d2528 100644 --- a/nomad/processing/data.py +++ b/nomad/processing/data.py @@ -388,8 +388,9 @@ class Calc(Proc): if isinstance(self._parser_backend, MSection): backend = Backend(parser._metainfo_env, parser.domain) - root_section = datamodel.domains[parser.domain]['root_section'] - setattr(backend.entry_archive, root_section, self._parser_backend) + root_section = self._parser_backend.m_def.name + section_def = getattr(datamodel.EntryArchive, root_section) + backend.entry_archive.m_add_sub_section(section_def, self._parser_backend) backend.resource.add(self._parser_backend) self._parser_backend = backend diff --git a/tests/test_parsing.py b/tests/test_parsing.py index 64520336f469c8689c0df97808948efcd3f2ff0a..4378f0c0e763063ab3fa39103f5465393d269057 100644 --- a/tests/test_parsing.py +++ b/tests/test_parsing.py @@ -289,13 +289,22 @@ def create_reference(data, pretty): return json.dumps(data, separators=(',', ':')) -def assert_parser_result(backend, error=False): - status, errors = backend.status - assert status == 'ParseSuccess' - if error: - assert len(errors) > 0 - else: - assert errors is None or len(errors) == 0 +@pytest.fixture(scope='function') +def assert_parser_result(caplog): + def _assert(backend, error=False): + status, errors = backend.status + assert status == 'ParseSuccess' + if error: + if not errors: + errors = [] + for record in caplog.get_records(when='call'): + if record.levelname in ['WARNING', 'ERROR', 'CRITICAL']: + errors.append(record.msg) + assert len(errors) > 0 + else: + assert errors is None or len(errors) == 0 + + return _assert def assert_parser_dir_unchanged(previous_wd, current_wd): @@ -308,8 +317,9 @@ def run_parser(parser_name, mainfile): result = parser.run(mainfile, logger=utils.get_logger(__name__)) if isinstance(result, MSection): backend = Backend(parser._metainfo_env, parser.domain) - root_section = datamodel.domains[parser.domain]['root_section'] - setattr(backend.entry_archive, root_section, result) + root_section = result.m_def.name + section_def = getattr(datamodel.EntryArchive, root_section) + backend.entry_archive.m_add_sub_section(section_def, result) backend.resource.add(result) result = backend result.domain = parser.domain @@ -357,7 +367,7 @@ def add_calculation_info(backend: Backend, **kwargs) -> Backend: @pytest.mark.parametrize('parser_name, mainfile', parser_examples) -def test_parser(parser_name, mainfile): +def test_parser(parser_name, mainfile, assert_parser_result): previous_wd = os.getcwd() # Get Working directory before parsing. parsed_example = run_parser(parser_name, mainfile) assert_parser_result(parsed_example) @@ -365,7 +375,7 @@ def test_parser(parser_name, mainfile): assert_parser_dir_unchanged(previous_wd, current_wd=os.getcwd()) -def test_broken_xml_vasp(): +def test_broken_xml_vasp(assert_parser_result): parser_name, mainfile = 'parsers/vasp', 'tests/data/parsers/vasp/broken.xml' previous_wd = os.getcwd() # Get Working directory before parsing. parsed_example = run_parser(parser_name, mainfile)