Commit c766ace1 authored by Guido Petretto's avatar Guido Petretto
Browse files

include abiflows

parent fb31b2b6
......@@ -114,9 +114,12 @@ RUN pip install -e .
# RUN pip install git+https://github.com/fekad/jupyter-fireworks-proxy.git \
# && jupyter serverextension enable --sys-prefix jupyter_server_proxy
# temporarily install abiflows from develop repository
RUN pip install git+https://github.com/gpetretto/abiflows.git@develop
WORKDIR $HOME
COPY --chown=$NB_UID:$NB_GID tutorials tutorials
COPY --chown=$NB_UID:$NB_GID configs/my_launchpad.yaml .fireworks/
COPY --chown=$NB_UID:$NB_GID configs/manager.yml configs/scheduler.yml .abinit/abipy/
COPY --chown=$NB_UID:$NB_GID configs/FW_config.yaml configs/my_launchpad.yaml configs/my_fworker.yaml configs/my_qadapter.yaml configs/SLURM_template.txt .fireworks/
COPY --chown=$NB_UID:$NB_GID configs/manager.yml configs/scheduler.yml configs/fw_manager.yaml .abinit/abipy/
PRINT_FW_JSON: True
LAUNCHPAD_LOC: /home/jovyan/.fireworks/my_launchpad.yaml
FWORKER_LOC: /home/jovyan/.fireworks/my_fworker.yaml
QUEUEADAPTER_LOC: /home/jovyan/.fireworks/my_qadapter.yaml
SUBMIT_SCRIPT_NAME: FW_submit.script
QUEUE_UPDATE_INTERVAL: 3
EXCEPT_DETAILS_ON_RERUN: True
REMOVE_USELESS_DIRS: True
#!/bin/bash -l
#SBATCH --nodes=$${nodes}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --ntasks-per-core=$${ntasks_per_core}
#SBATCH --core-spec=$${core_spec}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --cpus-per-task=$${cpus_per_task}
#SBATCH --gpus-per-task=$${gpus_per_task}
#SBATCH --gres=$${gres}
#SBATCH --qos=$${qos}
#SBATCH --time=$${walltime}
#SBATCH --time-min=$${time_min}
#SBATCH --partition=$${queue}
#SBATCH --account=$${account}
#SBATCH --job-name=$${job_name}
#SBATCH --license=$${license}
#SBATCH --output=$${job_name}-%j.out
#SBATCH --error=$${job_name}-%j.error
#SBATCH --constraint=$${constraint}
#SBATCH --signal=$${signal}
#SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --mail-type=$${mail_type}
#SBATCH --mail-user=$${mail_user}
$${pre_rocket}
cd $${launch_dir}
$${rocket_launch}
$${post_rocket}
fw_policy:
autoparal: True
rerun_same_dir: False
allow_local_restart: False
mpirun_cmd: mpirun
max_restarts: 30
abipy_manager: /home/jovyan/.abinit/abipy/manager.yml
......@@ -4,26 +4,44 @@
# policy:
# autoparal: 0
hardware: &hardware
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 4
mem_per_node: 16 Gb
job: &job
mpi_runner: "mpirun"
# pre_run: "export PATH=$HOME/git_repos/abinit/build_gcc/src/98_main:$PATH"
qadapters: # List of `qadapters` objects (just one in this simplified example)
- priority: 1
queue:
qtype: shell # "Submit" jobs via the shell.
qname: localhost # "Submit" to the localhost queue
- priority: 2
queue:
qtype: shell # "Submit" jobs via the shell.
qname: localhost # "Submit" to the localhost queue
# (it's a fake queue in this case)
job:
mpi_runner: "mpirun"
# pre_run: "export PATH=$HOME/git_repos/abinit/build_gcc/src/98_main:$PATH"
limits:
timelimit: 01:00:00 # Time-limit for each task.
max_cores: 4 # Max number of cores that can be used by a single task.
hint_cores: 2
hardware: *hardware
job: *job
limits:
timelimit: 01:00:00 # Time-limit for each task.
max_cores: 4 # Max number of cores that can be used by a single task.
hint_cores: 2
- priority: 1
queue:
qname: debug
qtype: slurm
limits:
timelimit: 2:00:0
min_cores: 1
max_cores: 2
min_mem_per_proc: 1
hardware: *hardware
job: *job
hardware:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 4
mem_per_node: 16 Gb
##############################
name: jovyan_worker
category: ''
query: '{}'
_fw_name: CommonAdapter
_fw_q_type: SLURM
_fw_template_file: /home/jovyan/.fireworks/SLURM_template.txt
rocket_launch: rlaunch singleshot
ntasks: 1
walltime: '02:0:0'
queue: debug
account: null
job_name: null
pre_rocket: null
post_rocket: null
......@@ -45,5 +45,5 @@ SlurmctldLogFile=/var/log/slurm-llnl/slurmctld.log
SlurmdLogFile=/var/log/slurm-llnl/slurmd.log
# COMPUTE NODES
NodeName=localhost CPUs=1 State=UNKNOWN
NodeName=localhost CPUs=4 State=UNKNOWN
PartitionName=debug Nodes=localhost Default=YES MaxTime=INFINITE State=UP
%% Cell type:code id:realistic-humanity tags:
``` python
from abiflows.database.mongoengine.utils import DatabaseData
from pymatgen.core import Structure
from abiflows.database.mongoengine.abinit_results import RelaxResult, DfptResult
from jupyter_jsmol.pymatgen import quick_view
```
%% Cell type:code id:martial-football tags:
``` python
%matplotlib notebook
```
%% Cell type:code id:altered-outdoors tags:
``` python
db = DatabaseData(host='mongo', port=27017, collection='relax',
database='fireworks', username=None, password=None)
db.connect_mongoengine()
```
%% Cell type:code id:proprietary-humidity tags:
``` python
with db.switch_collection(RelaxResult) as RelaxResult:
relaxed_results = RelaxResult.objects(mp_id="mp-149")
relaxed = relaxed_results[0]
```
%% Cell type:code id:useful-indicator tags:
``` python
# load the relaxed Structure
structure = Structure.from_dict(relaxed.abinit_output.structure)
```
%% Cell type:code id:going-agent tags:
``` python
quick_view(structure)
```
%% Cell type:code id:suitable-czech tags:
``` python
gsr = relaxed.abinit_output.gsr.abiopen()
print(gsr)
```
%% Cell type:code id:swedish-poison tags:
``` python
fig = gsr.plot_bz()
```
%% Cell type:code id:proved-endorsement tags:
``` python
db.collection = "phonon"
with db.switch_collection(DfptResult) as DfptResult:
ph_results = DfptResult.objects()
ph_res = ph_results[0]
```
%% Cell type:code id:narrative-religion tags:
``` python
ddb = ph_res.abinit_output.ddb.abiopen()
```
%% Cell type:code id:antique-ceremony tags:
``` python
phb = ddb.anaget_phmodes_at_qpoint([0,0,0])
```
%% Cell type:code id:cutting-hopkins tags:
``` python
phb.phfreqs
```
%% Cell type:code id:ambient-glossary tags:
``` python
phbst, phdos = ddb.anaget_phbst_and_phdos_files(line_density=10)
```
%% Cell type:code id:electric-narrative tags:
``` python
fig = phbst.plot_phbands()
```
%% Cell type:code id:comparative-pacific tags:
``` python
phbst.phbands.create_phononwebsite_json("/home/jovyan/Si_phononwebsite.json")
```
%% Cell type:markdown id:electric-second tags:
Download the Si_phononwebsite.json file on your local machine and upload it on the following website to display the atomic displacents associated to the phonon modes: http://henriquemiranda.github.io/phononwebsite/phonon.html
from abipy.abilab import Structure
from abiflows.fireworks.workflows.abinit_workflows import DfptFWWorkflow
from abiflows.database.mongoengine.utils import DatabaseData
from abiflows.database.mongoengine.abinit_results import RelaxResult
# data for the database where the relaxed structures were stored
source_db = DatabaseData(host='mongo', port=27017, collection='relax',
database='fireworks', username=None, password=None)
# data for the database where the phonon results will be stored.
# note that these can be in different databases or in the same.
# The collections should be different
db = DatabaseData(host='mongo', port=27017, collection='phonon',
database='fireworks', username=None, password=None)
# Open the connection to the database
source_db.connect_mongoengine()
# in case you are using multiple workers for the same fireworks db (i.e. different clusters or queues)
# it may be a good idea to set the worker explicitly. One can just get the name from the configuration:
# fworker = FWorker.from_file(os.path.join(os.getenv("HOME"), ".fireworks", "my_fworker.yaml"))
# or you can also just write the name of the fworker explicitely
#fworker_name = 'name_of_the_fworker'
mp_id = 'mp-149'
# This context manager is required to use the collection name selected in source_db
# By default mongoengine uses the name of the class (in this case RelaxResult) as
# name of the collection to query.
with source_db.switch_collection(RelaxResult) as RelaxResult:
# download from the database the relaxed structure
# This relies on mongoengine (http://mongoengine.org/) to interact with the database.
# See the module abiflows.database.mongoengine.abinit_results for the objects used to store the results
relaxed_results = RelaxResult.objects(mp_id=mp_id)
# Assume that there is one and only one result matching the query. In real cases you might want to check this.
# At this point is an instance of a RelaxResult object
relaxed = relaxed_results[0]
# load the relaxed Structure
structure = Structure.from_dict(relaxed.abinit_output.structure)
# use the same k-point sampling as the one of the relax
kppa = relaxed.abinit_input.kppa
ngkpt = relaxed.abinit_input.ngkpt
# The AbinitInput object used for the relax is stored in the database.
# We get it to use the same approximations used during the relaxation.
relax_input = relaxed.abinit_input.last_input.to_mgobj()
# We use the same k and q point grid
qppa = kppa
extra_abivars = dict(chkprim=1, nstep=100, chksymbreak=1, paral_kgb=1)
# as for the relax workflow, information stored in the database for the calculation. In particular information
# about the source structure.
initialization_info = dict(kppa=kppa, mp_id=mp_id,
relax_db=source_db.as_dict_no_credentials(), relax_id=relaxed.id,
relax_tol_val=1e-6, qppa=qppa)
# In this case the base is the input file of the of the relax workflow.
# Use the DfptFWWorkflow that allow to calculate the different kind of Dfpt perturbations
# with abinit in a single workflow. In this case only the phonons.
gen = DfptFWWorkflow.from_gs_input(structure=structure, gs_input=relax_input, extra_abivars=extra_abivars, autoparal=True,
initialization_info=initialization_info, do_ddk=True, do_dde=True, ph_ngqpt=[1,1,1],
do_strain=False)
# add to the workflow a step that automatically adds the results to the database in the collection specified above.
gen.add_mongoengine_db_insertion(db)
# add a step to the workflow that cleans up files with this extensions once the other calculations are completed.
# The list of extensions is customizable and these are usually file that won't be needed again.
# Here we do not delete the DDB files.
gen.add_final_cleanup(["WFK", "1WF", "WFQ", "1POT", "1DEN"])
# This will specify that all the steps will be forced to be executed on the same worker
# and will set the worker to the one chosen before for the existing fireworks. This step is not mandatory.
#gen.fix_fworker(fworker_name)
# adds the workflow to the fireworks database. It will use the fireworks LaunchPad that has been set by default.
# If a different one should be used it can be passed as an argument.
gen.add_to_db()
from fireworks import FWorker
import os
import pseudo_dojo
from abiflows.fireworks.workflows.abinit_workflows import RelaxFWWorkflow
from abiflows.database.mongoengine.utils import DatabaseData
from pymatgen.ext.matproj import MPRester
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import abipy.data as abidata
# use the pseudo dojo table of pseudopotentials. These are good pseudopotentials. If you want to use
# some other kind of pseudos you will need to provide explicitly the cutoff for the calculation
pseudo_table = pseudo_dojo.OfficialDojoTable.from_djson_file(
os.path.join(pseudo_dojo.dojotable_absdir("ONCVPSP-PBE-PDv0.4"), 'standard.djson'))
pseudo_path = pseudo_dojo.dojotable_absdir("ONCVPSP-PBE-PDv0.4")
# connection data of the output MongoDB database
# it can be the same database used for fireworks with other collections or a different one
db = DatabaseData(host='mongo', port=27017, collection='relax',
database='fireworks', username=None, password=None)
# in case you are using multiple workers for the same fireworks db (i.e. different clusters or queues) it may be a good idea
# setting the worker explicitly. Here I just get the name
#fworker = FWorker.from_file(os.path.join(os.getenv("HOME"), ".fireworks", "my_fworker.yaml"))
# Get the structure from the Materials Project. mp-149 is silicon.
mp_id = 'mp-149'
#structure = MPRester().get_structure_by_material_id(mp_id)
structure = abidata.structure_from_cif("si.cif")
spga = SpacegroupAnalyzer(structure)
structure = spga.get_primitive_standard_structure()
# check if the pseudo is available and just selects those that are needed for the specific structure
try:
pseudos = pseudo_table.get_pseudos_for_structure(structure)
except BaseException as e:
print("no pseudo")
exit(1)
# density of k-points per reciprocal atom. set to 1500 for phonons.
kppa = 500
# this will be read at the end of the workflow to store this information in the database. It is not mandatory
initialization_info = dict(kppa=kppa, mp_id=mp_id)
# use a more strict tolmxf in case this might be needed, for example for phonon calculations.
tolmxf = 1e-5
#override some default parameters from the factory function
extra_abivars = dict(tolmxf=tolmxf, ionmov=2, chksymbreak=1, ntime=30, nstep=100)
# uncomment this if you want to try paral_kgb=1
#extra_abivars['paral_kgb'] = 1
# this will create a fireworks workflow object (still not added to fireworks database)
# check the function for the different options available.
# The OneSymmetric option will set a single shift that respects the symmetry of the crystal.
# The target_dilatmx means that the dilatmx parameter will be automatically progressively
# reduced and relaxation restarted until the desired value has been used.
gen = RelaxFWWorkflow.from_factory(structure, pseudo_table, kppa=kppa, spin_mode="unpolarized", extra_abivars=extra_abivars,
autoparal=True, initialization_info=initialization_info, target_dilatmx=1.01,
smearing=None, shift_mode='OneSymmetric', ecut=5)
# add to the workflow a step that automatically adds the results to the database in the collection specified above.
gen.add_mongoengine_db_insertion(db)
# add a step to the workflow that cleans up files with this extensions once the other calculations are completed.
# The list of extensions is customizable and these are usually file that won't be needed again
gen.add_final_cleanup(["WFK", "1WF", "DEN", "WFQ", "DDB"])
# This will specify that all the steps will be forced to be executed on the same worker
# and will set the worker to the one chosen before for the existing fireworks. This step is not mandatory.
#gen.fix_fworker(fworker.name)
# adds the workflow to the fireworks database. It will use the fireworks LaunchPad that has been set by default.
# If a different one should be used it can be passed as an argument.
fw_id_maps = gen.add_to_db()
print("{} submitted".format(mp_id))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment