Commit 95b3874b authored by Markus Scheidgen's avatar Markus Scheidgen
Browse files

Adopted the parser for the needs of the demonstrator.

parent 29949c2b
......@@ -58,74 +58,61 @@ class APTFIMParser(AbstractBaseParser):
with open(filepath, 'rt') as f:
data = json.load(f)
#print(data)
# # You need to open sections before you can add values or sub sections to it.
# # The returned 'gid' can be used to reference a specific section if multiple
# # sections of the same type are opened.
#test_gid = backend.openSection('experiment_cantext')
#backend.addValue('context_headxx', data.get('experiment_type'))
#backend.closeSection('experiment_context', test_gid)
root_gid = backend.openSection('section_experiment')
# # Values do not necessarily have to be read from the parsed file.
# # The backend will check the type of the given value agains the metadata definition.
# backend.addValue('experiment_time', int(datetime.strptime(data.get('date'), '%d.%M.%Y').timestamp()))
#
# # Read data .
# data_gid = backend.openSection('section_context')
#data_gid = backend.openSection('section_experiment')
# addValue
# first argument STRING IDENTIFIER IN OUTPUT JSON KEYWORDS from aptfim.nomadmetainfo.json (ie. the generated parser result JSON)
# second argument STRING IDENTIFIER IN INPUT JSON (ie. the small META DATA FILE TO THE DATASET
#backend.addValue('data_repository_name', data.get('data_repository_name'))
#backend.addValue('data_repository_url', data.get('data_repository_url'))
#backend.addValue('data_preview_url', data.get('data_preview_url'))
# backend.addValue('real_one', data.get('experiment_typpe'))
# backend.closeSection('section_context', data_gid)
# Read general tool environment details
# general_gid = backend.openSection('section_experiment_general_parameters')
backend.addValue('experiment_method', data.get('experiment_method'))
backend.addValue('experiment_location', data.get('experiment_location'))
backend.addValue('experiment_facility_institution', data.get('experiment_facility_institution'))
backend.addValue('experiment_tool_info', data.get('instrument_info')) ###test here the case that input.json keyword is different to output.json
# backend.addValue('experiment_data_global_start', np.array(re.findall(r"[\w']+", data.get('experiment_data_global_start')))) ####
# backend.addValue('experiment_data_global_end', np.array(re.findall(r"[\w']+", data.get('experiment_data_global_end')))) ####
# backend.addValue('experiment_data_local_start', np.array(re.findall(r"[\w']+", data.get('experiment_data_local_start')))) ####
# backend.addValue('experiment_operation_method', data.get('experiment_operation_method'))
# backend.addValue('experiment_imaging_method', data.get('experiment_imaging_method'))
backend.addValue('experiment_summary', '%s of %s.' % (data.get('experiment_method').capitalize(), data.get('specimen_description')))
try:
backend.addValue('experiment_time', int(datetime.strptime(data.get('experiment_date_global_start'), '%d.%m.%Y %M:%H:%S').timestamp()))
except ValueError:
pass
try:
backend.addValue('experiment_end_time', int(datetime.strptime(data.get('experiment_date_global_end'), '%d.%m.%Y %M:%H:%S').timestamp()))
except ValueError:
pass
# Read data parameters
data_gid = backend.openSection('section_data')
backend.addValue('data_repository_name', data.get('data_repository_name'))
backend.addValue('data_repository_url', data.get('data_repository_url'))
preview_url = data.get('data_preview_url')
# TODO: This a little hack to correct the preview url and should be removed
# after urls are corrected
preview_url = '%s/files/%s' % tuple(preview_url.rsplit('/', 1))
backend.addValue('data_preview_url', preview_url)
backend.closeSection('section_data', data_gid)
# Read parameters related to method
method_gid = backend.openSection('section_method')
backend.addValue('experiment_method_name', data.get('experiment_method'))
backend.addValue('experiment_method_abbreviation', 'APT/FIM')
backend.addValue('probing_method', 'electric pulsing')
# backend.addValue('experiment_tool_info', data.get('instrument_info')) ###test here the case that input.json keyword is different to output.json
# measured_pulse_voltage for instance should be a conditional read
# backend.addValue('measured_number_ions_evaporated', data.get('measured_number_ions_evaporated'))
# backend.addValue('measured_detector_hit_pos', data.get('measured_detector_hit_pos'))
# backend.addValue('measured_detector_hit_mult', data.get('measured_detector_hit_mult'))
# backend.addValue('measured_detector_dead_pulses', data.get('measured_detector_dead_pulses'))
# backend.addValue('measured_time_of_flight', data.get('measured_time_of_flight'))
# backend.addValue('measured_standing_voltage', data.get('measured_standing_voltage'))
# backend.addValue('measured_pulse_voltage', data.get('measured_pulse_voltage'))
# backend.addValue('experiment_operation_method', data.get('experiment_operation_method'))
# backend.addValue('experiment_imaging_method', data.get('experiment_imaging_method'))
backend.closeSection('section_method', method_gid)
# Read parameters related to sample
# backend.addValue('specimen_description', data.get('specimen_description'))
# backend.addValue('specimen_microstructure', data.get('specimen_microstructure'))
# backend.addValue('specimen_constitution', data.get('specimen_constitution'))
#### parse chemical composition
### measured_pulse_voltage for instance should be a conditional read
# backend.addValue('measured_number_ions_evaporated', data.get('measured_number_ions_evaporated'))
# backend.addValue('measured_detector_hit_pos', data.get('measured_detector_hit_pos'))
# backend.addValue('measured_detector_hit_mult', data.get('measured_detector_hit_mult'))
# backend.addValue('measured_detector_dead_pulses', data.get('measured_detector_dead_pulses'))
# backend.addValue('measured_time_of_flight', data.get('measured_time_of_flight'))
# backend.addValue('measured_standing_voltage', data.get('measured_standing_voltage'))
# backend.addValue('measured_pulse_voltage', data.get('measured_pulse_voltage'))
# To add arrays (vectors, matrices, etc.) use addArrayValues and provide a
# numpy array. The shape of the numpy array must match the shape defined in
# the respective metadata definition.
sample_gid = backend.openSection('section_sample')
backend.addValue('sample_description', data.get('specimen_description'))
backend.addValue('sample_microstructure', data.get('specimen_microstructure'))
backend.addValue('sample_constituents', data.get('specimen_constitution'))
atom_labels = data.get('specimen_chemistry')
formula = ase.Atoms(atom_labels).get_chemical_formula()
backend.addArrayValues('sample_atom_labels', np.array(atom_labels))
backend.addValue('sample_chemical_formula', formula)
backend.closeSection('section_sample', sample_gid)
# Close sections in the reverse order
#backend.closeSection('section_experiment', data_gid)
#backend.closeSection('section_data', data_gid)
backend.closeSection('section_experiment', root_gid)
# backend.closeSection('section_experiment_general_parameters', general_gid)
# backend.closeSection('section_experiment_source_parameters', source_gid)
# backend.closeSection('section_experiment_detector_parameters', detector_gid)
# backend.closeSection('section_experiment_sample_parameters', sample_gid)
......@@ -10,29 +10,6 @@
}
],
"metaInfos": [
{
"description": "String identifier aka name of the repository where the raw data to the experiment is available",
"name": "data_repository_name",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "URL of this repository",
"name": "data_repository_url",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{ "description": "Thumbnail image informing about the experiment",
"name": "data_preview_url",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "Shape of the None/Null object",
"name": "none_shape",
......@@ -41,30 +18,6 @@
"shape": [],
"superNames": ["section_experiment"]
},
{
"description": "Full name of the experimental method in use",
"name": "experiment_method",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "Name of the city and country the experiment took place, format 'Country, City'",
"name": "experiment_location",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "Name of the institution hosting the experimental facility",
"name": "experiment_facility_institution",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "Name of the equipment, instrument with which the experiment was performed e.g. LEAP5000XS",
"name": "experiment_tool_info",
......@@ -73,30 +26,6 @@
"superNames": ["section_experiment"],
"units": ""
},
{
"description": "UTC start time of the experiment, format 'DD.MM.YYYY - HH.MM.SS'",
"name": "experiment_date_global_start",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": "C"
},
{
"description": "UTC end time of the experiment, format 'DD.MM.YYYY - HH.MM.SS'",
"name": "experiment_date_global_end",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": "C"
},
{
"description": "Local start time of the experiment, format 'DD.MM.YYYY - HH.MM.SS'",
"name": "experiment_date_local_start",
"dtypeStr": "C",
"shape": [],
"superNames": ["section_experiment"],
"units": "C"
},
{
"description": "Operation mode of the instrument (APT, FIM or combination)",
"name": "experiment_operation_method",
......
......@@ -16,14 +16,14 @@ from setuptools import setup, find_packages
def main():
setup(
name='skeletonparser', # replace with new name for parser's python package
name='aptfimparser', # replace with new name for parser's python package
version='0.1',
description='A skeleton NOMAD parser implementation.', # change accordingly
author='', # add your names
description='A NOMAD parser implementation for aptfim data.', # change accordingly
author='Markus Kühbach', # add your names
license='APACHE 2.0',
packages=find_packages(),
package_data={
'skeletonparser': ['*.json']
'aptfimparser': ['*.json']
},
install_requires=[
'nomadcore'
......
# Copyright 2016-2018 Markus Scheidgen
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
import json
import ase
import numpy as np
from datetime import datetime
from nomadcore.simple_parser import SimpleMatcher
from nomadcore.baseclasses import ParserInterface, AbstractBaseParser
from nomad.parsing import LocalBackend
class SkeletonParserInterface(ParserInterface):
def get_metainfo_filename(self):
"""
The parser specific metainfo. To include other metadata definitions, use
the 'dependencies' key to refer to other local nomadmetainfo.json files or
to nomadmetainfo.json files that are part of the general nomad-meta-info
submodule (i.e. ``dependencies/nomad-meta-info``).
"""
return os.path.join(os.path.dirname(__file__), 'skeleton.nomadmetainfo.json')
def get_parser_info(self):
""" Basic info about parser used in archive data and logs. """
return {
'name': 'you parser name',
'version': '1.0.0'
}
def setup_version(self):
""" Can be used to call :func:`setup_main_parser` differently for different code versions. """
self.setup_main_parser(None)
def setup_main_parser(self, _):
""" Setup the actual parser (behind this interface) """
self.main_parser = SkeletonParser(self.parser_context)
class SkeletonParser(AbstractBaseParser):
def parse(self, filepath):
backend = self.parser_context.super_backend
with open(filepath, 'rt') as f:
data = json.load(f)
# You need to open sections before you can add values or sub sections to it.
# The returned 'gid' can be used to reference a specific section if multiple
# sections of the same type are opened.
root_gid = backend.openSection('section_experiment')
# Values are added to the open section of the given metadata definitions. In
# the following case 'experiment_location' is a quantity of 'section_experiment'.
# When multiple sections of the same type (e.g. 'section_experiment') are open,
# you can use the 'gid' as an additional argument.
backend.addValue('experiment_location', data.get('location'))
# The backend will check the type of the given value agains the metadata definition.
backend.addValue('experiment_time', int(datetime.strptime(data.get('date'), '%d.%M.%Y').timestamp()))
# Subsections work like before. The parent section must still be open.
method_gid = backend.openSection('section_method')
backend.addValue('experiment_method_name', data.get('method', 'Bare eyes'))
# Values do not necessarely have to be read from the parsed file.
backend.addValue('probing_method', 'laser pulsing')
backend.closeSection('section_method', method_gid)
data_gid = backend.openSection('section_data')
backend.addValue('data_repository_name', 'zenodo.org')
backend.addValue('data_repository_url', 'https://zenodo.org/path/to/mydata')
backend.addValue('data_preview_url', 'https://www.physicsforums.com/insights/wp-content/uploads/2015/09/fem.jpg')
backend.closeSection('section_data', data_gid)
# Subsections work like before. The parent section must still be open.
sample_gid = backend.openSection('section_sample')
backend.addValue('sample_chemical_name', data.get('sample_chemical'))
backend.addValue('sample_chemical_formula', data.get('sample_formula'))
backend.addValue('sample_temperature', data.get('sample_temp'))
backend.addValue('sample_microstructure', 'thin films')
backend.addValue('sample_constituents', 'multi phase')
atoms = set(ase.Atoms(data.get('sample_formula')).get_chemical_symbols())
# To add arrays (vectors, matrices, etc.) use addArrayValues and provide a
# numpy array. The shape of the numpy array must match the shape defined in
# the respective metadata definition.
backend.addArrayValues('sample_atom_labels', np.array(list(atoms)))
# Close sections in the reverse order.
backend.closeSection('section_sample', sample_gid)
backend.closeSection('section_experiment', root_gid)
# Copyright 2016-2018 Markus Scheidgen
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nomad.parsing import LocalBackend
from skeletonparser import SkeletonParserInterface
if __name__ == "__main__":
# instantiate the parser via its interface with a LocalBackend
parser = SkeletonParserInterface(backend=LocalBackend)
# call the actual parsing with the given mainfile
parser.parse(sys.argv[1])
# print the results stored in the LocalBackend
parser.parser_context.super_backend.write_json(
sys.stdout, pretty=True, root_sections=['section_experiment'])
{
"type": "nomad_meta_info_1_0",
"description": "Parser specific metadata definitions.",
"dependencies":[
{
"metainfoPath":"general.nomadmetainfo.json"
},
{
"metainfoPath":"general.experimental.nomadmetainfo.json"
}
],
"metaInfos": [
{
"description": "Contains information relating to an archive.",
"name": "experiment_location",
"dtypeStr": "C",
"superNames": ["section_experiment"]
}
]
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment