diff --git a/docs/data/eln.md b/docs/data/eln.md
deleted file mode 100644
index 888755dddaa6be176811c421666362b66c0e9348..0000000000000000000000000000000000000000
--- a/docs/data/eln.md
+++ /dev/null
@@ -1,37 +0,0 @@
-This guide describes how to manually create entries and enter information
-via ELNs (electronic lab notebooks). NOMAD ELNs allow you to acquire
-consistently structured data from users to augment uploaded files.
-
-!!! attention
-
-    This part of the documentation is still work in progress.
-
-## Create a basic ELN entry
-
-Go to `PUBLISH` / `Uploads`. Here you can create an upload with the `CREATE A NEW UPLOAD`
-button. This will bring you to the upload page.
-
-Click the `CREATE ENTRY` button. This will bring-up a dialog to choose an ELN schema.
-All ELNs (as any entry in NOMAD) needs to follow a schema. You can choose from uploaded
-custom schemas or NOMAD built-in schemas. You can choose the `Basic ELN` to create a
-simple ELN entry.
-
-The name of your ELN entry, will be the filename for your ELN without the `.archive.json`
-ending that will be added automatically. You can always find and download your ELNs
-on the `FILES` tab.
-
-The `Basic ELN` offers you simple fields for a *name*, *tags*, a *date/time*, and a rich text
-editor to enter your notes.
-
-## Add your own ELN schema
-
-To make NOMAD ELNs more useful, you can define your own schema to create you own data
-fields, create more sub-sections, reference other entries, and much more.
-
-You should have a look at our ELN example upload. Go to `PUBLISH` / `Uploads` and
-click the `ADD EXAMPLE UPLOADS` button. The `Electronic Lab Notebook` example, will
-contain a schema and entries that instantiate different parts of the schema.
-The *ELN example sample (`sample.archive.json`) demonstrates what you can do.
-
-Follow the [How-to write a schema](../schemas/basics.md) and [How-to define ELN](../schemas/elns.md)
-guides to create you own customized of ELNs.
diff --git a/docs/examples/computational_data/data/Si_gw.zip b/docs/examples/computational_data/data/Si_gw.zip
new file mode 100644
index 0000000000000000000000000000000000000000..74cd49df92b0b4783618299d00326216c64bf34d
Binary files /dev/null and b/docs/examples/computational_data/data/Si_gw.zip differ
diff --git a/docs/examples/computational_data/data/example_files.zip b/docs/examples/computational_data/data/example_files.zip
new file mode 100644
index 0000000000000000000000000000000000000000..bb4694a70c76d5834be3847d149b02273d0f148f
Binary files /dev/null and b/docs/examples/computational_data/data/example_files.zip differ
diff --git a/docs/examples/computational_data/data/workflowyaml_files.zip b/docs/examples/computational_data/data/workflowyaml_files.zip
new file mode 100644
index 0000000000000000000000000000000000000000..4c4028f2949a45084354fae4064e7844f4973ab3
Binary files /dev/null and b/docs/examples/computational_data/data/workflowyaml_files.zip differ
diff --git a/docs/examples/computational_data/images/data_page.png b/docs/examples/computational_data/images/data_page.png
new file mode 100644
index 0000000000000000000000000000000000000000..1e8b91a9eac246758d5406a5634f12322894246a
Binary files /dev/null and b/docs/examples/computational_data/images/data_page.png differ
diff --git a/docs/examples/computational_data/images/datasets_page.png b/docs/examples/computational_data/images/datasets_page.png
new file mode 100644
index 0000000000000000000000000000000000000000..f30b62a909412d929d9f702cceeecdaa84434ae2
Binary files /dev/null and b/docs/examples/computational_data/images/datasets_page.png differ
diff --git a/docs/examples/computational_data/images/edit_author_metadata.png b/docs/examples/computational_data/images/edit_author_metadata.png
new file mode 100644
index 0000000000000000000000000000000000000000..7b99bb277fc8e7a2d8581ad011a4a3cacadb45f9
Binary files /dev/null and b/docs/examples/computational_data/images/edit_author_metadata.png differ
diff --git a/docs/examples/computational_data/images/entries.png b/docs/examples/computational_data/images/entries.png
new file mode 100644
index 0000000000000000000000000000000000000000..c88aaf00d4ef35ad8ca10a8169b14e599f8373bb
Binary files /dev/null and b/docs/examples/computational_data/images/entries.png differ
diff --git a/docs/examples/computational_data/images/fullworkflow.png b/docs/examples/computational_data/images/fullworkflow.png
new file mode 100644
index 0000000000000000000000000000000000000000..18b98c34ae0184361cc8ec4b81cbc6475bccec4f
Binary files /dev/null and b/docs/examples/computational_data/images/fullworkflow.png differ
diff --git a/docs/examples/computational_data/images/gwupload.gif b/docs/examples/computational_data/images/gwupload.gif
new file mode 100644
index 0000000000000000000000000000000000000000..f1a6599fd2b63f73babd010bc3a244e96816954e
Binary files /dev/null and b/docs/examples/computational_data/images/gwupload.gif differ
diff --git a/docs/examples/computational_data/images/logs_page.png b/docs/examples/computational_data/images/logs_page.png
new file mode 100644
index 0000000000000000000000000000000000000000..da88af791a3b524d3ef858891cdedb469c3e1d96
Binary files /dev/null and b/docs/examples/computational_data/images/logs_page.png differ
diff --git a/docs/examples/computational_data/images/newuser_register.png b/docs/examples/computational_data/images/newuser_register.png
new file mode 100644
index 0000000000000000000000000000000000000000..4b0c46eab5c6568fd83f371d70f09380ad4670e4
Binary files /dev/null and b/docs/examples/computational_data/images/newuser_register.png differ
diff --git a/docs/examples/computational_data/images/nomad_metainfo.png b/docs/examples/computational_data/images/nomad_metainfo.png
new file mode 100644
index 0000000000000000000000000000000000000000..b34c27e7ce83c8d4f71023154ea12392b9c892c4
Binary files /dev/null and b/docs/examples/computational_data/images/nomad_metainfo.png differ
diff --git a/docs/examples/computational_data/images/nomadmetainfo.png b/docs/examples/computational_data/images/nomadmetainfo.png
new file mode 100644
index 0000000000000000000000000000000000000000..84e18913442ac96ce66dc772b85a267244ba379f
Binary files /dev/null and b/docs/examples/computational_data/images/nomadmetainfo.png differ
diff --git a/docs/examples/computational_data/images/overview_page.png b/docs/examples/computational_data/images/overview_page.png
new file mode 100644
index 0000000000000000000000000000000000000000..88fe047883234c80efe165e42b793a42a09732d0
Binary files /dev/null and b/docs/examples/computational_data/images/overview_page.png differ
diff --git a/docs/examples/computational_data/images/pressure1.png b/docs/examples/computational_data/images/pressure1.png
new file mode 100644
index 0000000000000000000000000000000000000000..ebbeb9c7a8d4b18ddfdeb97d43e976af57b6c0dc
Binary files /dev/null and b/docs/examples/computational_data/images/pressure1.png differ
diff --git a/docs/examples/computational_data/images/publish.png b/docs/examples/computational_data/images/publish.png
new file mode 100644
index 0000000000000000000000000000000000000000..bfd5ad3bbba7d00cb530920e373e63adb01d9fbd
Binary files /dev/null and b/docs/examples/computational_data/images/publish.png differ
diff --git a/docs/examples/computational_data/images/singlepoint.png b/docs/examples/computational_data/images/singlepoint.png
new file mode 100644
index 0000000000000000000000000000000000000000..b20b9dc0fac95705f047e27b2b73345a7fbf6291
Binary files /dev/null and b/docs/examples/computational_data/images/singlepoint.png differ
diff --git a/docs/examples/computational_data/images/singlepoint_methodadded.png b/docs/examples/computational_data/images/singlepoint_methodadded.png
new file mode 100644
index 0000000000000000000000000000000000000000..2a140fd3d06542ff3dd24e9629f7d1da6520f3fa
Binary files /dev/null and b/docs/examples/computational_data/images/singlepoint_methodadded.png differ
diff --git a/docs/examples/computational_data/images/top_fields_uploads.png b/docs/examples/computational_data/images/top_fields_uploads.png
new file mode 100644
index 0000000000000000000000000000000000000000..845144d8307add2b62b782218e8173111d34288a
Binary files /dev/null and b/docs/examples/computational_data/images/top_fields_uploads.png differ
diff --git a/docs/examples/computational_data/images/upload_files.png b/docs/examples/computational_data/images/upload_files.png
new file mode 100644
index 0000000000000000000000000000000000000000..1ac3a42e3a9440241a29ea87e5805afc6eadab73
Binary files /dev/null and b/docs/examples/computational_data/images/upload_files.png differ
diff --git a/docs/examples/computational_data/images/upload_menu.png b/docs/examples/computational_data/images/upload_menu.png
new file mode 100644
index 0000000000000000000000000000000000000000..deedbf5a558ba6443d19ef2ec9185d99efd099a2
Binary files /dev/null and b/docs/examples/computational_data/images/upload_menu.png differ
diff --git a/docs/examples/computational_data/uploading.md b/docs/examples/computational_data/uploading.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ae218e07cac8b1a293e15aaf3fe581e9c2d1b78
--- /dev/null
+++ b/docs/examples/computational_data/uploading.md
@@ -0,0 +1,251 @@
+# Quick Start: Uploading computational data in NOMAD
+
+!!! warning "Attention"
+
+    This part of the documentation is still work in progress.
+
+This page provides an overview of NOMAD's usage with computational data. If you are completely new to NOMAD, we recommend to first read through the [Navigating to NOMAD](../../tutorial/nomad_repo.md), [Uploading and publishing data](../../tutorial/upload_publish.md), and [Exploring data](../../tutorial/explore.md) tutorials.
+
+<!-- ## Creating a NOMAD account
+
+Before being able to upload and publish data in NOMAD, you need to create your personal account.
+
+Go to the [NOMAD website](https://nomad-lab.eu/nomad-lab/){:target="_blank"} and click on the button `Open NOMAD`. This will take you to the [NOMAD GUI](../glossary/glossary.md/#gui). The purpose of this site is to allow users to search, access, and download data using an intuitive and appealing interface.
+
+On the top right, click on `LOGIN / REGISTER`.
+
+You can then create an account by clicking on `New user? Register`.
+
+<p align="center">
+    <img src="images/newuser_register.png" width="50%" alt="New user? Register.">
+</p>
+
+After filling the blanks and clicking on `REGISTER`, you will receive a verification email. Once you verify your personal account, you can start using NOMAD.
+
+!!! note
+    In practice, you can create as many accounts as you want. However, we recommend you to create a single one
+    for managing your data in the platform. Otherwise, this can interfere with other functionalities, e.g.,
+    when a collaborator wants to add you as a member of an upload but instead finds a list of possible accounts. -->
+
+Uploading data in NOMAD can be done in several ways:
+
+- By dragging-and-dropping your files into the `PUBLISH > Uploads` page: suitable for users who have a relatively small amount of data.
+- By using the Python-based [NOMAD API](../../howto/programmatic/api.md): suitable for users who have larger datasets and need to automatize the upload.
+- By using the shell command `curl` for sending files to the upload: suitable for users who have larger datasets and need to automatize the upload.
+
+You can upload the files one by one or you can zip them in [`.zip`](https://copyrightservice.co.uk/reg/creating-zip-files) or `.tar.gz` formats to upload a larger amount of files at once.
+
+<!-- We suggest you to visit and read the [References > Best Practices: preparing the data and folder structure](refs.md/#best-practices-preparing-folder-upload) page to see what are the best practices to organize data in a directory tree prior to upload it. -->
+
+
+## Drag-and-drop uploads
+
+On the top-left menu, click on `PUBLISH > Uploads`.
+
+![Navigate to the uploads page](images/upload_menu.png){.screenshot}
+
+You can then click on `CREATE A NEW UPLOAD` or try one of the example uploads by clicking in `ADD EXAMPLE UPLOADS` and selecting one of the multiple options, including data from an ELN, various instruments, or computational software. For a clear demonstration of the entire process, we will use the following example data:
+
+<center>
+[Download Example Data](data/Si_gw.zip){:target="_blank" .md-button .nomad-button}
+</center>
+
+This particular example represents a computational workflow to investigate some properties of Si~2~, however the details are not important for our demonstration here.
+
+After downloading the example `.zip` file, you can drag-and-drop it or click on the `CLICK OR DROP FILES` button to browse through your local directories.
+
+![File upload](images/gwupload.gif){.screenshot}
+
+After the files are uploaded, a **processing** is triggered. This generally includes an automatic identification of the uploaded files that are supported in NOMAD, and then a corresponding processing to harvest all the relevant (meta)data. The precise details of the processing depend on each use-case. For example, you can find out more about the processing of computational data in [Processing of computational data](#processing-of-computational-data).
+
+You will receive an email when the upload processing is finished.
+
+
+## Sections of the Uploads page
+
+At the top of the uploads page, you can modify certain general metadata fields.
+
+![Top fields in uploads page](images/top_fields_uploads.png){.screenshot}
+
+The name of the upload can be modify by clicking on the pen icon :fontawesome-solid-pen:. The other icons correspond to:
+
+<!--Confirm with Lauri the icons-->
+- :fontawesome-solid-user-group: _Manage members_: allows users to invite collaborators by defining co-authors and reviewers roles.
+- :fontawesome-solid-cloud-arrow-down: _Download files_: downloads all files present in the upload.
+- :fontawesome-solid-rotate-left: _Reload_: reloads the uploads page.
+- :fontawesome-solid-rotate: _Reprocess_: triggers again the processing of the uploaded data.
+- :fontawesome-solid-angle-left::fontawesome-solid-angle-right: _API_: generates a JSON response to use by the [NOMAD API](../../howto/programmatic/api.md).
+<!-- See [Filtering and Querying](../filtering_and_querying/overview.md) for more information. -->
+<!-- TODO  Add API to glossary -->
+- :fontawesome-solid-trash: _Delete the upload_: deletes completely the upload.
+
+The remainder of the uploads page is divided in 4 sections.
+
+### Prepare and upload your files
+
+This section shows the files and folder structure in the upload. You can add a `README.md` in the root directory and its content will be shown above this section.
+
+![Uploaded files](images/upload_files.png){.screenshot}
+
+### Process data
+
+This section shows the processed data and the generated [entries](../../reference/glossary.md#entry) in NOMAD.
+
+![Processed entries](images/entries.png){.screenshot}
+
+### Edit author metadata
+
+This section allows users to edit certain metadata fields from all entries recognized in the upload. This includes _comments_, where you can add as much extra information as you want, _references_, where you can add a URL to your upload (e.g., an article DOI), and _datasets_, where you can create or add the uploaded data into a more general dataset (see [Organizing data in datasets](#organizing-data-in-datasets)).
+
+<center>
+![Edit author metadata](images/edit_author_metadata.png){.screenshot style="max-width:300px !important;"}
+</center>
+
+### Publish
+
+This section lets the user to publish the data with or without an embargo.
+
+![Publish button](images/publish.png){.screenshot}
+
+## Publishing
+
+After uploading and a successful parsing, **congratulations!** Now you can publish your data and let other users browse through it and re-use it for other purposes.
+
+![Publish button](images/publish.png){.screenshot}
+
+You can define a specific `Embargo period` of up to 36 months, after which the data will be made publicly available under the [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/){:target="_blank"} license.
+
+After publishing by clicking on `PUBLISH`, the uploaded files cannot be altered. However, you can still edit the metadata fields.
+
+## Organizing data in datasets
+
+You can organize your uploads and individual entries by grouping them into common datasets.
+
+In the uploads page, click on `EDIT AUTHOR METADATA OF ALL ENTRIES`.
+
+Under `Datasets` you can either `Create a new dataset` or `Search for an existing dataset`. After selecting the dataset, click on `SUBMIT`.
+
+Now, the defined dataset will be defined under `PUBLISH > Datasets`.
+
+![Datasets page](images/datasets_page.png){.screenshot}
+
+The icon :fontawesome-solid-bookmark: allows you to assign a DOI to a specific dataset. Once a DOI has been assign to a dataset, no more data can be added to it. This can then be added into your publication so that it can be used as a reference, e.g., see the [**Data availability statement** in M. Kuban et al., _Similarity of materials and data-quality assessment by fingerprinting_, MRS Bulletin **47**, 991-999 (2022)](https://link.springer.com/article/10.1557/s43577-022-00339-w#data-availability){:target="_blank"}.
+
+## Processing of computational data
+
+See [From files to data](../../explanation/data.md) and [Processing](../../explanation/processing.md) for full explanations about data processing in NOMAD.
+
+When data is uploaded to NOMAD, the software interprets the files and determines which of them is a **mainfile**. Any other files in the upload can be viewed as **auxiliary files**. In the same upload, there might be multiple mainfiles and auxiliary files organized in a folder tree structure.
+
+The **mainfiles** are the main output file of a calculation. The presence of a mainfile in the upload is key for NOMAD to recognize a calculation. In NOMAD, we support an array computational codes for first principles calculations, molecular dynamics simulations, and lattice modeling, as well as workflow and database managers. For each code, NOMAD recognizes a single file as the mainfile. For example, the [VASP](https://www.vasp.at/){:target="_blank"} mainfile is by default the `vasprun.xml`, although if the `vasprun.xml` is not present in the upload NOMAD searches the `OUTCAR` file and assigns it as the mainfile (see [VASP POTCAR stripping](#vasp-potcar-stripping)).
+
+The rest of files which are not the mainfile are **auxiliary files**. These can have several purposes and be supported and recognized by NOMAD in the [parser](../../reference/glossary.md#parser). For example, the `band*.out` or `GW_band*` files in [FHI-aims](https://fhi-aims.org/){:target="_blank"} are auxiliary files that allows the NOMAD FHI-aims parser to recognize band structures in DFT and GW, respectively.
+
+<!--TODO: add our own supported parsers list with improved info-->
+You can see the full list of supported codes, mainfiles, and auxiliary files in the general NOMAD documentation under [Supported parsers](https://nomad-lab.eu/prod/v1/staging/docs/reference/parsers.html){:target="_blank"}.
+
+We recommend that the user keeps the folder structure and files generated by the simulation code, but without reaching the [uploads limits](../../howto/manage/upload.md#upload-limits). Please, also check our recommendations on [Best Practices: preparing the data and folder structure](#best-practices-preparing-folder-upload).
+
+
+## Structured data with the NOMAD metainfo
+
+Once the mainfile has been recognized, a new [entry](../../reference/glossary.md/#entry) in NOMAD is created and a specific [parser](#parsing) is called. The auxliary files are searched by and accessed within the parser.
+<!-- TODO add more info or link properly
+You can check more details in [Writing a parser plugin](../writing_a_parser_plugin/parser_plugin_overview.md) on how to add new parsers in order for NOMAD to support new codes. -->
+
+For this new entry, NOMAD generates a **NOMAD archive**. It will contain all the (meta)information extracted from the unstructured raw data files but in a _structured_, _well defined_, and _machine readable_ format. This **metadata** provides context to the raw data, i.e., what were the input methodological parameters, on which material the calculation was performed, etc. We define the **NOMAD Metainfo** as all the set of [sections, sub-sections, and quantities](../../reference/glossary.md/#metainfo) used to structure the raw data into a structured _schema_. Further information about the NOMAD Metainfo is available in the general NOMAD documentation page in [Learn > Structured data](https://nomad-lab.eu/prod/v1/staging/docs/learn/data.html){:target="_blank"}.
+
+![The NOMAD metainfo](images/nomad_metainfo.png){.screenshot}
+
+
+## NOMAD sections for computational data
+
+Under the `Entry` / `archive` section, there are several sections and quantities being populated by the parsers. For computational data, only the following sections are populated:
+
+- `metadata`: contains general and non-code specific metadata. This is mainly information about authors, creation of the entry time, identifiers (id), etc.
+- `run`: contains the [**parsed**](#parsing) and [**normalized**](#normalizing) raw data into the structured NOMAD schema. This is all the possible raw data which can be translated into a structured way.
+- `workflow2`: contains metadata about the specific workflow performed within the entry. This is mainly a set of well-defined workflows, e.g., `GeometryOptimization`, and their parameters.
+- `results`: contains the [**normalized**](#normalizing) and [**search indexed**](#search-indexing-and-storing) metadata. This is mainly relevant for searching, filtering, and visualizing data in NOMAD.
+
+??? question "`workflow` and `workflow2` sections: development and refactoring"
+    You have probably noticed the name `workflow2` but also the existence of a section called `workflow` under `archive`. This is because
+    `workflow` is an old version of the workflow section, while `workflow2` is the new version. Sometimes, certain sections suffer a rebranding
+    or _refactoring_, in most cases to add new features or to polish them after we receive years of feedback. In this case, the `workflow` section
+    will remain until all older entries containing such section are reprocessed to transfer this information into `workflow2`.
+
+
+### Parsing
+
+A parser is a Python module which reads the code-specific mainfile and auxiliary files and populates the `run` and `workflow2` sections of the `archive`, along with all relevant sub-sections and quantities.
+<!-- TODO add link to parser plugin or maybe parser explanation -->
+<!-- We explain them more in detail in [Writing a parser plugin](../writing_a_parser_plugin/parser_plugin_overview.md). -->
+
+Parsers are added to NOMAD as _plugins_ and are divided in a set of Github sub-projects under the [main NOMAD repository](https://github.com/nomad-coe/nomad){:target="_blank"}.
+<!-- You can find a detailed list of projects in [Writing a parser plugin - Parser organization](../writing_a_parser_plugin/parser_plugin_overview.md/#parser-organization). -->
+
+<!-- !!! tip "External contributions"
+    We always welcome external contributions for new codes and parsers in our repositories. Furthermore, we are always happy to hear feedback and implement new features
+    into our parsers.
+    TODO add contact info
+    Please, check our [Contact](../contact.md) information to get in touch with us so we can promptly help you! -->
+
+
+### Normalizing
+
+After the parsing populates the `run` and `workflow2` sections, an extra layer of Python modules is executed on top of the processed NOMAD metadata. This has two main purposes: 1. normalize or _homogenize_ certain metadata parsed from different codes, and 2. populate the `results` section. For example, this is the case of normalizing the density of states (DOS) to its size intensive value, independently of the code used to calculate the DOS. The set of normalizers relevant for computational data are listed in [`/nomad/config/models.py`](https://github.com/nomad-coe/nomad/blob/develop/nomad/config/models.py#L383){:target="_blank"} and are executed in the specific order defined there. Their roles are explained more in detail in [Processing](../../explanation/processing.md).
+
+
+### Search indexing (and storing)
+
+The last step is to store the structured metadata and pass some of it to the search index. The metadata which is passed to the search index is defined in the `results` section. These metadata can then be searched by filtering in the Entries page of NOMAD or by writing a Python script which searches using the NOMAD API.
+<!-- TODO add link or this info somewhere -->
+<!-- , see [Filtering and Querying](../filtering_and_querying/overview.md). -->
+
+
+## Entries OVERVIEW page
+
+Once the parsers and normalizers finish, the Uploads page will show if the processing of the entry was a `SUCCESS` or a `FAILURE`. The entry information can be browsed by clicking on the :fontawesome-solid-arrow-right: icon.
+
+You will land on the `OVERVIEW` page of the entry. On the top menu you can further select the `FILES` page, the `DATA` page, and the `LOGS` page.
+
+![Overview page](images/overview_page.png){.screenshot}
+
+The overview page contains a summary of the parsed metadata, e.g., tabular information about the material and methodology of the calculation (in the example, a G0W0 calculation done with the code [exciting](https://www.exciting-code.org/){:target="_blank"} for bulk Si<sub>2</sub>), and visualizations of the system and some relevant properties. We note that all metadata are read directly from `results`.
+
+### LOGS page
+
+In the `LOGS` page, you can find information about the processing. You can read error, warning, and critical messages which can provide insight if the processing of an entry was a `FAILURE`.
+
+![Logs page](images/logs_page.png){.screenshot}
+
+We recommend you to [Get support](https://nomad-lab.eu/nomad-lab/support.html){:target="_blank"} or [contact our team](mailto:support@nomad-lab.eu) in case you find `FAILURE` situations. These might be due to bugs which we are rapidly fixing, and whose origin might be varied: from a new version
+of a code which is not yet supported to wrong handling of potential errors in the parser script. It may also be a problem with the organization of the data in the folders. In order to minimize these situations, we suggest that you read [Best Practices: preparing the data and folder structure](#best-practices-preparing-the-data-and-folder-structure).
+
+### DATA page
+
+The `DATA` page contains all the structured NOMAD metainfo populated by the parser and normalizers. This is the most important page in the entry, as it contains all the relevant metadata which will allow users to find that specific simulation.
+
+![Data page](images/data_page.png){.screenshot}
+
+Furthermore, you can click on the :fontawesome-solid-cloud-arrow-down: icon to download the NOMAD `archive` in a JSON format.
+<!-- We explain more in detail how to work with such files in [Filtering and Querying](../filtering_and_querying/overview.md). -->
+
+
+## Best Practices: preparing the data and folder structure
+
+!!! warning "Attention"
+    Under construction.
+
+<!-- ## Uploads limits
+
+NOMAD limits the number of uploads and size of all its users. The following rules apply:
+
+1. One upload cannot exceed **32 GB** in size.
+2. A user can only be co-author of up to **10 non-published uploads** at the same time.
+3. Only uploads with at least **one recognized entry** can be published. -->
+
+
+## VASP POTCAR stripping
+<!-- TODO move this to a separate section / page with details about particular parsers -->
+
+For VASP data, NOMAD complies with the licensing of the `POTCAR` files. In agreement with [Georg Kresse](https://www.vasp.at/info/team/){:target="_blank"}, NOMAD extracts the most important information of the `POTCAR` file and stores them in a stripped version called `POTCAR.stripped`. The `POTCAR` files are then automatically removed from the upload, so that you can safely publish your data.
diff --git a/docs/examples/computational_data/workflows.md b/docs/examples/computational_data/workflows.md
new file mode 100644
index 0000000000000000000000000000000000000000..dfda9e4f5936813e6ac73f31c9853510a861deae
--- /dev/null
+++ b/docs/examples/computational_data/workflows.md
@@ -0,0 +1,318 @@
+# Standard and Custom Computational Workflows in NOMAD
+
+The following examples contain the basic knowledge on understanding and learning to use NOMAD workflows, and its relation with DFT and beyond-DFT (GW, BSE, DMFT, etc.) methodologies. You will use a fictitious example of a simulation workflow with the following files and folder structure:
+```
+.
+├── pressure1
+│   ├── temperature1
+│   │   ├── dmft_p1_t1.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── temperature2
+│   │   ├── dmft_p1_t2.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── dft_p1.xml
+│   ├── tb_p1.wout
+│   └── ...extra auxiliary files
+└── pressure2
+    ├── temperature1
+    │   ├── dmft_p2_t1.hdf5
+    │   └── ...extra auxiliary files
+    ├── temperature2
+    │   ├── dmft_p2_t2.hdf5
+    │   └── ...extra auxiliary files
+    ├── dft_p2.xml
+    ├── tb_p2.wout
+    └── ...extra auxiliary files
+```
+
+which can be downloaded here:
+<center>
+[Download example_files.zip](data/example_files.zip){ .md-button .nomad-button }
+</center>
+
+Each of the _mainfiles_ represent an electronic-structure calculation (either [DFT](https://en.wikipedia.org/wiki/Density_functional_theory){:target="_blank"}, [TB](https://en.wikipedia.org/wiki/Tight_binding){:target="_blank"}, or [DMFT](https://en.wikipedia.org/wiki/Dynamical_mean-field_theory){:target="_blank"}) which in turn is then parsed into a singular _entry_ in NOMAD. When dragged into the [NOMAD Upload page](https://nomad-lab.eu/prod/v1/staging/gui/user/uploads){:target="_blank"}, these files should generate 8 entries in total. This folder structure presents a typical workflow calculation which can be represented as a provenance graph:
+```mermaid
+graph LR;
+    A2((Inputs)) --> B2[DFT];
+    A1((Inputs)) --> B1[DFT];
+    subgraph pressure P<sub>2</sub>
+    B2[DFT] --> C2[TB];
+    C2[TB] --> D21[DMFT at T<sub>1</sub>];
+    C2[TB] --> D22[DMFT at T<sub>2</sub>];
+    end
+    D21[DMFT at T<sub>1</sub>] --> E21([Output calculation P<sub>2</sub>, T<sub>1</sub>])
+    D22[DMFT at T<sub>2</sub>] --> E22([Output calculation P<sub>2</sub>, T<sub>2</sub>])
+    subgraph pressure P<sub>1</sub>
+    B1[DFT] --> C1[TB];
+    C1[TB] --> D11[DMFT at T<sub>1</sub>];
+    C1[TB] --> D12[DMFT at T<sub>2</sub>];
+    end
+    D11[DMFT at T<sub>1</sub>] --> E11([Output calculation P<sub>1</sub>, T<sub>1</sub>])
+    D12[DMFT at T<sub>2</sub>] --> E12([Output calculation P<sub>1</sub>, T<sub>2</sub>])
+```
+Here, "Input" refers to the all _input_ information given to perform the calculation (e.g., atom positions, model parameters, experimental initial conditions, etc.). "DFT", "TB" and "DMFT" refer to individual _tasks_ of the workflow, which each correspond to a _SinglePoint_ entry in NOMAD. "Output calculation" refers to the _output_ data of each of the final DMFT tasks.
+
+The goal of this part is to set up the following workflows:
+
+1. A `SinglePoint` workflow for one of the calculations (e.g., the DFT one) in the `pressure1` subfolder.
+2. An overarching workflow entry for each pressure P<sub>i=1,2</sub>, grouping all `SinglePoint` "DFT", "TB", "DMFT at T<sub>1</sub>", and "DMFT at T<sub>2</sub>" tasks.
+3. A top level workflow entry, grouping together all pressure calculations.
+
+The files for all these cases can be downloaded here:
+<center>
+[Download worfklowyaml_files.zip](data/workflowyaml_files.zip){ .md-button .nomad-button }
+</center>
+
+ You can try writing these files yourself first, and then compare them with the tested files.
+
+
+## Starting example: SinglePoint workflow
+
+NOMAD is able to recognize certain workflows in an automatic way, such as the `SinglePoint` case mentioned above. However, to showcase how to the use workflows in NOMAD, you will learn how to "manually" construct the SinglePoint workflow, represented by the following provenance graph:
+```mermaid
+graph LR;
+    A((Inputs)) --> B[DFT];
+    B[DFT] --> C([Output calculation]);
+```
+To define a workflow manually in NOMAD, you must add a YAML file to the upload folder that contains the relevant input, output, and task information. This file should be named `<filename>.archive.yaml`. In this case, you should include the file `single_point.archive.yaml` with the following content:
+
+```yaml
+workflow2:
+  name: SinglePoint
+  inputs:
+    - name: Input structure
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+  outputs:
+    - name: Output calculation
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+  tasks:
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/dft_p1.xml#/workflow2'
+      name: DFT at Pressure P1
+      inputs:
+        - name: Input structure
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+      outputs:
+        - name: Output calculation
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+```
+
+Note several things about the content of this file:
+
+1. **`name`** keys are optional.
+2. The root path of the upload can be referenced with `../upload/archive/mainfile/`. Starting from there, the original directory tree structure of the upload is maintained.
+3. **`inputs`** reference the section containing inputs of the whole workflow. In this case this is the section `run[0].system[-1]` parsed from the mainfile in the path `pressure1/dft_p1.xml`.
+4. **`outputs`** reference the section containing outputs of the whole workflow. In this case this is the section `run[0].calculation[-1]` parsed from the mainfile in the path `pressure1/dft_p1.xml`.
+5. **`tasks`** reference the section containing tasks of each step in the workflow. These must also contain `inputs` and `outputs` properly referencing the corresponding sections; this will then _link_ inputs/outputs/tasks in the NOMAD Archive. In this case this is a `TaskReference` to the section `workflow2` parsed from the mainfile in the path `pressure1/dft_p1.xml`.
+6. **`section`** reference to the uploaded mainfile specific section. The left side of the `#` symbol contains the path to the _mainfile_, while the right contains the path to the _section_.
+
+This will produce an extra entry with the following Overview content:
+
+![NOMAD workflow schema](images/singlepoint.png){.screenshot}
+
+Note that you are referencing sections which are lists. Thus, in each case you should be careful to reference the correct section for inputs and outputs (example: a `GeometryOptimization` workflow calculation will have the "Input structure" as `run[0].system[0]`, while the "Output calculation" would also contain `run[0].system[-1]`, and all intermediate steps must input/output the corresponding section system).
+
+!!! note "NOMAD workflow filename"
+    The NOMAD workflow YAML file name, i.e., `<filename>` in the explanation above, can be any custom name defined by the user, but the file **must** keep the extension `.archive.yaml` at the end. This is done in order for NOMAD to recognize this file as a _custom schema_. Custom schemas are widely used in experimental parsing, and you can learn more about them in the [FAIRmat tutorial 8](https://www.fairmat-nfdi.eu/events/fairmat-tutorial-8/tutorial-8-home).
+
+You can extend the workflow meta-information by adding the metholodogical input parameters. These are stored in NOMAD in the section path `run[0].method[-1]`. The new `single_point.archive.yaml` will be:
+
+```yaml
+workflow2:
+  name: SinglePoint
+  inputs:
+    - name: Input structure
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+    - name: Input methodology parameters
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/method/-1'
+  outputs:
+    - name: Output calculation
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+  tasks:
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/dft_p1.xml#/workflow2'
+      name: DFT at Pressure P1
+      inputs:
+        - name: Input structure
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+        - name: Input methodology parameters
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/method/-1'
+      outputs:
+        - name: Output calculation
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+```
+
+which in turn produces a similar workflow than before, but with an extra input node:
+
+![SinglePoint workflow visualizer with Method added](images/singlepoint_methodadded.png){.screenshot}
+
+
+## Pressure workflows
+
+Now that you know the basics of the workflow YAML schema, let's try to define an overarching workflow for each of the pressures. For this section, you will learn how to create the workflow YAML schema for the P<sub>1</sub> case; the extension for P<sub>2</sub> is then a matter of changing names and paths in the YAML files. For simplicity, you can skip referencing to methodologies.
+
+Thus, the `inputs` can be defined as:
+```yaml
+workflow2:
+  name: DFT+TB+DMFT at P1
+  inputs:
+    - name: Input structure
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+```
+and there are two `outputs`, one for each of the DMFT calculations at distinct temperatures:
+```yaml
+  outputs:
+    - name: Output DMFT at P1, T1 calculation
+      section: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/run/0/calculation/-1'
+    - name: Output DMFT at P1, T2 calculation
+      section: '../upload/archive/mainfile/pressure1/temperature2/dmft_p1_t2.hdf5#/run/0/calculation/-1'
+```
+Now, `tasks` are defined for each of the methodologies performed (each corresponding to an underlying SinglePoint workflow). To define a valid workflow, each task must contain an input that corresponds to one of the outputs of the previous task. Moreover, the first task should take as input the overall input of the workflow, and the final task should also have as an output the overall workflow output.
+Then:
+```yaml
+  tasks:
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/dft_p1.xml#/workflow2'
+      name: DFT at P1
+      inputs:
+        - name: Input structure
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+      outputs:
+        - name: Output DFT at P1 calculation
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/tb_p1.wout#/workflow2'
+      name: TB at P1
+      inputs:
+        - name: Input DFT at P1 calculation
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/calculation/-1'
+      outputs:
+        - name: Output TB at P1 calculation
+          section: '../upload/archive/mainfile/pressure1/tb_p1.wout#/run/0/calculation/-1'
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/workflow2'
+      name: DMFT at P1 and T1
+      inputs:
+        - name: Input TB at P1 calculation
+          section: '../upload/archive/mainfile/pressure1/tb_p1.wout#/run/0/calculation/-1'
+      outputs:
+        - name: Output DMFT at P1, T1 calculation
+          section: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/run/0/calculation/-1'
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/workflow2'
+      name: DMFT at P1 and T2
+      inputs:
+        - name: Input TB at P1 calculation
+          section: '../upload/archive/mainfile/pressure1/tb_p1.wout#/run/0/calculation/-1'
+      outputs:
+        - name: Output DMFT at P1, T2 calculation
+          section: '../upload/archive/mainfile/pressure1/temperature2/dmft_p1_t2.hdf5#/run/0/calculation/-1'
+```
+Note here:
+
+- The `inputs` for each subsequent step are the `outputs` of the previous step.
+- The final two `outputs` coincide with the `workflow2` `outputs`.
+
+This workflow (`pressure1.archive.yaml`) file will then produce an entry with the following Overview page:
+
+![Pressure P1 workflow visualizer](images/pressure1.png){.screenshot}
+
+Similarly, for P<sub>2</sub> you can upload a new `pressure2.archive.yaml` file with the same content, except when substituting 'pressure1' and 'p1' by their counterparts. This will produce a similar graph than the one showed before but for "P2".
+
+
+## The top-level workflow
+
+After adding the workflow YAML files, Your upload folder directory now looks like:
+```
+.
+├── pressure1
+│   │   ├── dmft_p1_t1.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── temperature2
+│   │   ├── dmft_p1_t2.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── dft_p1.xml
+│   ├── tb_p1.wout
+│   └── ...extra auxiliary files
+├── pressure1.archive.yaml
+├── pressure2
+│   ├── temperature1
+│   │   ├── dmft_p2_t1.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── temperature2
+│   │   ├── dmft_p2_t2.hdf5
+│   │   └── ...extra auxiliary files
+│   ├── dft_p2.xml
+│   ├── tb_p2.wout
+│   └── ...extra auxiliary files
+├── pressure2.archive.yaml
+└── single_point.archive.yaml
+```
+In order to define the general workflow that groups all pressure calculations, YOU can reference directly the previous `pressureX.archive.yaml` files as tasks. Still, `inputs` and `outputs` must be referenced to their corresponding mainfile and section paths.
+
+Create a new `fullworkflow.archive.yaml` file with the `inputs`:
+```yaml
+workflow2:
+  name: Full calculation at different pressures for SrVO3
+  inputs:
+    - name: Input structure at P1
+      section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+    - name: Input structure at P2
+      section: '../upload/archive/mainfile/pressure2/dft_p2.xml#/run/0/system/-1'
+```
+And `outputs`:
+```yaml
+  outputs:
+    - name: Output DMFT at P1, T1 calculation
+      section: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/run/0/calculation/-1'
+    - name: Output DMFT at P1, T2 calculation
+      section: '../upload/archive/mainfile/pressure1/temperature2/dmft_p1_t2.hdf5#/run/0/calculation/-1'
+    - name: Output DMFT at P2, T1 calculation
+      section: '../upload/archive/mainfile/pressure2/temperature1/dmft_p2_t1.hdf5#/run/0/calculation/-1'
+    - name: Output DMFT at P2, T2 calculation
+      section: '../upload/archive/mainfile/pressure2/temperature2/dmft_p2_t2.hdf5#/run/0/calculation/-1'
+```
+Finally, `tasks` references the previous YAML schemas as follows:
+```yaml
+  tasks:
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure1.archive.yaml#/workflow2'
+      name: DFT+TB+DMFT at P1
+      inputs:
+        - name: Input structure at P1
+          section: '../upload/archive/mainfile/pressure1/dft_p1.xml#/run/0/system/-1'
+      outputs:
+        - name: Output DMFT at P1, T1 calculation
+          section: '../upload/archive/mainfile/pressure1/temperature1/dmft_p1_t1.hdf5#/run/0/calculation/-1'
+        - name: Output DMFT at P1, T2 calculation
+          section: '../upload/archive/mainfile/pressure1/temperature2/dmft_p1_t2.hdf5#/run/0/calculation/-1'
+    - m_def: nomad.datamodel.metainfo.workflow.TaskReference
+      task: '../upload/archive/mainfile/pressure2.archive.yaml#/workflow2'
+      name: DFT+TB+DMFT at P2
+      inputs:
+        - name: Input structure at P2
+          section: '../upload/archive/mainfile/pressure2/dft_p2.xml#/run/0/system/-1'
+      outputs:
+        - name: Output DMFT at P2, T1 calculation
+          section: '../upload/archive/mainfile/pressure2/temperature1/dmft_p2_t1.hdf5#/run/0/calculation/-1'
+        - name: Output DMFT at P2, T2 calculation
+          section: '../upload/archive/mainfile/pressure2/temperature2/dmft_p2_t2.hdf5#/run/0/calculation/-1'
+```
+
+This will produce the following entry and its Overview page:
+
+![Full workflow visualizer](images/fullworkflow.png){.screenshot}
+
+
+## Automatic workflows
+
+There are some cases where the NOMAD infrastructure is able to recognize certain workflows automatically when processing the uploaded files. The simplest example is any `SinglePoint` calculation, as explained above. Other examples include `GeometryOptimization`, `Phonons`, `GW`, and `MolecularDynamics`. Automated workflow detection may require your folder structure to fulfill certain conditions.
+
+Here are some general guidelines for preparing your upload folder in order to make it easier for the _automatic workflow recognition_ to work:
+
+- Always organize your files in an **top-down structure**, i.e., the initial _tasks_ should be upper in the directory tree, while the later _tasks_ lower on it.
+- Avoid having to go up and down between folders if some properties are derived between these files. These situations are very complicated to predict for the current NOMAD infrastructure.
+- Avoid duplication of files in subfolders. If initially you do a calculation A from which a later calculation B is derived and you want to store B in a subfolder, there is no need to copy the A files inside the subfolder B.
+
+The folder structure used throughout this part is a good example of a clean upload which is friendly and easy to work with when defining NOMAD workflows.
+<!-- Another example can be found in [Part II](../part2.md), when you learned how to upload a DFT + GW calculation for bulk Si<sub>2</sub>. In this case, an automatic GW workflow entry was generated. -->
diff --git a/docs/examples/overview.md b/docs/examples/overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..466fbce25264c291a7727ad89098197559a26bb2
--- /dev/null
+++ b/docs/examples/overview.md
@@ -0,0 +1,62 @@
+# NOMAD Domain-specific Examples
+
+Build upon your understanding of NOMAD's features with domain-specific examples and explanations.
+
+<div markdown="block" class="home-grid">
+<div markdown="block">
+
+### Computational data
+
+Historically a repository for Density Functional Theory calculations, NOMAD now supports a wide range of computational methodologies including advanced many-body calculations and classical molecular dynamics simulations, as well as complex simulation workflows.
+
+- [Quick Start: Uploading computational data](computational_data/uploading.md)
+- [Standard and custom computational workflows in NOMAD](computational_data/workflows.md)
+<!--
+#### subcategory 1
+- links...
+#### subcategory 2
+- links... -->
+
+</div>
+<div markdown="block">
+
+### Category 2
+
+More categories coming soon...
+<!--
+#### subcategory 1
+- links...
+#### subcategory 2
+- links... -->
+
+</div>
+
+<div markdown="block">
+
+<!-- ### Category 3
+
+description...
+
+#### subcategory 1
+- links...
+#### subcategory 2
+- links...
+
+</div>
+<div markdown="block">
+
+### Category 4
+
+description...
+
+#### subcategory 1
+- links...
+#### subcategory 2
+- links... -->
+
+</div>
+</div>
+
+<h2>One last thing</h2>
+
+If you can't find what you're looking for in our guides, [contact our friendly team](mailto:support@nomad-lab.eu) for personalized help and assistance. Don't worry, we're here to help and learn what we're doing wrong!
\ No newline at end of file
diff --git a/docs/explanation/architecture.md b/docs/explanation/architecture.md
index a4a2958375a97b68433fa5baa181eec3da7e2d74..3ce9bc4d800b0ad9eb8baa039d84f5986128e578 100644
--- a/docs/explanation/architecture.md
+++ b/docs/explanation/architecture.md
@@ -8,7 +8,7 @@ installed and run on a single sever, NOMAD advocates the use of containers and o
 NOMAD in a cloud environment.
 
 <figure markdown>
-  ![nomad architecture](architecture.png)
+  ![nomad architecture](images/architecture.png)
   <figcaption>NOMAD architecture</figcaption>
 </figure>
 
@@ -28,7 +28,7 @@ Other services are:
 - a content management system to provide other web-page content (not part of the Oasis)
 
 All NOMAD software is bundled in a single NOMAD docker image and a Python package
-([nomad-lab on pypi](https://pypi.org/project/nomad-lab/)). The NOMAD docker
+([nomad-lab on pypi](https://pypi.org/project/nomad-lab/){:target="_blank"}). The NOMAD docker
 image can be downloaded from our public registry.
 NOMAD software is organized in multiple git repositories. We use continuous integration
 to constantly provide the latest version of docker image and Python package.
@@ -42,7 +42,7 @@ of its processing, storage, availability, and scaling goals. The following is a
 comprehensive overview of used languages, libraries, frameworks, and services.
 
 <figure markdown>
-  ![nomad stack](stack.png)
+  ![nomad stack](images/stack.png)
   <figcaption>NOMAD components and dependencies</figcaption>
 </figure>
 
@@ -50,18 +50,18 @@ comprehensive overview of used languages, libraries, frameworks, and services.
 
 The *backend* of nomad is written in Python. This includes all parsers, normalizers,
 and other data processing. We only use Python 3 and there is no compatibility with
-Python 2. Code is formatted close to [pep8](https://www.python.org/dev/peps/pep-0008/),
-critical parts use [pep484](https://www.python.org/dev/peps/pep-0484/) type-hints.
-[ruff](https://docs.astral.sh/ruff), and
-[mypy](http://mypy-lang.org/) (static type checker) are used to ensure quality.
-Tests are written with [pytest](https://docs.pytest.org/en/latest/contents.html).
-Logging is done with [structlog](https://www.structlog.org/en/stable/) and *logstash* (see
-Elasticstack below). Documentation is driven by [Sphinx](http://www.sphinx-doc.org/en/master/).
+Python 2. Code is formatted close to [pep8](https://www.python.org/dev/peps/pep-0008/){:target="_blank"},
+critical parts use [pep484](https://www.python.org/dev/peps/pep-0484/){:target="_blank"} type-hints.
+[ruff](https://docs.astral.sh/ruff){:target="_blank"}, and
+[mypy](http://mypy-lang.org/){:target="_blank"} (static type checker) are used to ensure quality.
+Tests are written with [pytest](https://docs.pytest.org/en/latest/contents.html){:target="_blank"}.
+Logging is done with [structlog](https://www.structlog.org/en/stable/){:target="_blank"} and *logstash* (see
+Elasticstack below). Documentation is driven by [Sphinx](http://www.sphinx-doc.org/en/master/){:target="_blank"}.
 
 
 #### celery
 
-[Celery](http://celeryproject.org) (+ [rabbitmq](https://www.rabbitmq.com/))
+[Celery](http://celeryproject.org){:target="_blank"} (+ [rabbitmq](https://www.rabbitmq.com/){:target="_blank"})
 is a popular combination for realizing long running tasks in internet applications.
 We use it to drive the processing of uploaded files.
 It allows us to transparently distribute processing load while keeping processing state
@@ -70,36 +70,36 @@ available to inform the user.
 
 #### elastic search
 
-[Elasticsearch](https://www.elastic.co/webinars/getting-started-elasticsearch)
+[Elasticsearch](https://www.elastic.co/webinars/getting-started-elasticsearch){:target="_blank"}
 is used to store repository data (not the raw files).
 Elasticsearch enables flexible, scalable search and analytics.
 
 
 #### mongodb
 
-[Mongodb](https://docs.mongodb.com/) is used to store and track the state of the
+[Mongodb](https://docs.mongodb.com/){:target="_blank"} is used to store and track the state of the
 processing of uploaded files and the generated entries. We use
-[mongoengine](http://docs.mongoengine.org/) to program with mongodb.
+[mongoengine](http://docs.mongoengine.org/){:target="_blank"} to program with mongodb.
 
 
 #### Keycloak
 
-[Keycloak](https://www.keycloak.org/) is used for user management. It manages users and
+[Keycloak](https://www.keycloak.org/){:target="_blank"} is used for user management. It manages users and
 provides functions for registration, forgetting passwords, editing user accounts, and single
 sign-on to fairdi@nomad and other related services.
 
 
 #### FastAPI
 
-The ReSTful API is build with the [FastAPI](https://fastapi.tiangolo.com/)
-framework. This allows us to automatically derive a [OpenAPI](https://swagger.io/specification/) description
+The ReSTful API is build with the [FastAPI](https://fastapi.tiangolo.com/){:target="_blank"}
+framework. This allows us to automatically derive a [OpenAPI](https://swagger.io/specification/){:target="_blank"} description
 of the nomad API.
-Fruthermore, you can browse and use the API via [OpenAPI dashboard](https://swagger.io/tools/swagger-ui/).
+Fruthermore, you can browse and use the API via [OpenAPI dashboard](https://swagger.io/tools/swagger-ui/){:target="_blank"}.
 
 
 #### Elasticstack
 
-The [elastic stack](https://www.elastic.co/guide/index.html)
+The [elastic stack](https://www.elastic.co/guide/index.html){:target="_blank"}
 (previously *ELK* stack) is a centralized logging, metrics, and monitoring
 solution that collects data within the cluster and provides a flexible analytics front end
 for that data.
@@ -108,12 +108,12 @@ for that data.
 #### Javascript, React, Material-UI
 
 The frontend (GUI) of **nomad@FAIRDI** is built on the
-[React](https://reactjs.org/docs/getting-started.html) component framework.
+[React](https://reactjs.org/docs/getting-started.html){:target="_blank"} component framework.
 This allows us to build the GUI as a set of re-usable components to
 achieve a coherent representations for all aspects of nomad, while keeping development
-efforts manageable. React uses [JSX](https://reactjs.org/docs/introducing-jsx.html)
+efforts manageable. React uses [JSX](https://reactjs.org/docs/introducing-jsx.html){:target="_blank"}
 (a ES6 variety) that allows to mix HTML with Javascript code.
-The component library [Material-UI](https://material-ui.com/)
+The component library [Material-UI](https://material-ui.com/){:target="_blank"}
 (based on Google's popular material design framework) provides a consistent look-and-feel.
 
 
@@ -123,24 +123,24 @@ To run a **nomad@FAIRDI** instance, many services have to be orchestrated:
 the nomad app, nomad worker, mongodb, Elasticsearch, Keycloak, RabbitMQ,
 Elasticstack (logging), the nomad GUI, and a reverse proxy to keep everything together.
 Further services might be needed (e.g. JypiterHUB), when nomad grows.
-The container platform [Docker](https://docs.docker.com/) allows us to provide all services
+The container platform [Docker](https://docs.docker.com/){:target="_blank"} allows us to provide all services
 as pre-build images that can be run flexibly on all types of platforms, networks,
-and storage solutions. [Docker-compose](https://docs.docker.com/compose/) allows us to
+and storage solutions. [Docker-compose](https://docs.docker.com/compose/){:target="_blank"} allows us to
 provide configuration to run the whole nomad stack on a single server node.
 
 
 #### kubernetes + helm
 
-To run and scale nomad on a cluster, you can use [kubernetes](https://kubernetes.io/docs/home/)
-to orchestrated the  necessary containers. We provide a [helm](https://docs.helm.sh/)
+To run and scale nomad on a cluster, you can use [kubernetes](https://kubernetes.io/docs/home/){:target="_blank"}
+to orchestrated the  necessary containers. We provide a [helm](https://docs.helm.sh/){:target="_blank"}
 chart with all necessary service and deployment descriptors that allow you to set up and
 update nomad with only a few commands.
 
 
 #### GitLab
 
-Nomad as a software project is managed via [GitLab](https://docs.gitlab.com/).
-The **nomad@FAIRDI** project is hosted [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR).
+Nomad as a software project is managed via [GitLab](https://docs.gitlab.com/){:target="_blank"}.
+The **nomad@FAIRDI** project is hosted [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR){:target="_blank"}.
 GitLab is used to manage versions, different branches of development, tasks and issues,
-as a [registry for Docker images](https://docs.gitlab.com/ee/user/packages/container_registry/index.html),
-and [CI/CD platform](https://docs.gitlab.com/ee/ci/).
+as a [registry for Docker images](https://docs.gitlab.com/ee/user/packages/container_registry/index.html){:target="_blank"},
+and [CI/CD platform](https://docs.gitlab.com/ee/ci/){:target="_blank"}.
diff --git a/docs/explanation/basics.md b/docs/explanation/basics.md
index 0ec2f0cd17dce1697d2f7b7ca135cec86871e09c..08b058a0c2fc371c84d310256375ce82a0b079c4 100644
--- a/docs/explanation/basics.md
+++ b/docs/explanation/basics.md
@@ -7,7 +7,7 @@ basis to make data FAIR. It allows us to build search interfaces, APIs, visualiz
 analysis tools independent from specific file formats.
 
 <figure markdown>
-  ![datamodel](datamodel.png)
+  ![datamodel](images/datamodel.png)
   <figcaption>NOMAD's datamodel and processing</figcaption>
 </figure>
 
@@ -26,6 +26,15 @@ Raw files are managed by users and they are never changed by NOMAD.
     of related mainfiles to automatize ELNs, or
     generating additional files to convert a mainfile into a standardized format like nexus or cif.
 
+## Files
+
+We already said that all uploaded files are **raw files**. Recognized files that have an
+entry are called **mainfiles**. Only the mainfile of the entry is
+passed to the parser during processing. However, a parser can call other tools or read other files.
+Therefore, we consider all files in the same directory of the mainfile as **auxillary files**,
+even though there is not necessarily a formal relationship with the entry. If
+formal relationships with aux files are established, e.g. via a reference to the file within
+the processed data, is up to the parser.
 
 ## Entries
 
@@ -46,22 +55,28 @@ manually.
     determined by the users. Contrary to the processed data that is created
     from raw files by NOMAD.
 
+## Datasets
+
+Users can build collections of entries to form **datasets**. You can imagine datasets
+like tags or albums in other systems. Each entry can be contain in many datasets and
+a dataset can hold many entries. Datasets can also overlap. Datasets are only
+indirectly related to files. The main purpose of **datasets** in NOMAD is to have citable
+collections of data. Users can get a DOI for their datasets. Datasets have no influence
+on the processing of data.
 
 ## Processing
 
-Also the processing of entries is automatic. Initially and on each mainfile change,
+The processing of entries is automatic. Initially and on each mainfile change,
 the entry corresponding to the mainfile, will be processed. Processing consist of
-**parsing**, **normalizing**, and storing the created data.
-
+**parsing**, **normalizing**, and **persisting** the created data, as explained in more detail in the [Processing section](processing.md).
 
 ### Parsing
 
 Parsers are small programs that transform data from a recognized *mainfile* into a
 structured machine processable tree of data that we call the *archive* or [**processed data**](data.md)
 of the entry. Only one parser is used for each entry. The used parser is determined
-during matching and depends on the file format. Parsers can be added to NOMAD as
-[plugins](../plugins/parsers.md); this is a list of [all built-in parsers](../reference/parsers.md).
-
+during matching and depends on the file format. [A dedicated guide](../howto/customization/parsers.md#match-your-raw-file) shows how to match a specific file from your parser. Parsers can be added to NOMAD as
+[plugins](../howto/customization/plugins_dev.md#develop-a-parser-plugin); this is a list of [all built-in parsers](../reference/parsers.md).
 
 !!! note
     A special case is the parsing of NOMAD archive files. Usually a parser converts a file
@@ -83,7 +98,7 @@ processed data. Learn more about why to normalize in the documentation on [struc
 There are two principle ways to implement normalization in NOMAD:
 **normalizers** and **normalize** functions.
 
-[Normalizers](../develop/normalizers.md) are small programs that take processed data as input.
+[Normalizers](../howto/customization/normalizers.md) are small programs that take processed data as input.
 There is a list of normalizers registered in the [NOMAD configuration](../reference/config.md#normalize).
 In the future, normalizers might be
 added as plugins as well. They run in the configured order. Every normalizer is run
@@ -91,41 +106,22 @@ on all entries and the normalizer might decide to do something or not, depending
 it sees in the processed data.
 
 Normalize functions are special functions implemented as part of section definitions
-in [Python schemas](../plugins/schemas.md#writing-schemas-in-python-compared-to-yaml-schemas).
+in [Python schemas](../howto/customization/plugins_dev.md##writing-schemas-in-python-compared-to-yaml-schemas).
 There is a special normalizer that will go through all processed data and execute these
 function if they are defined. Normalize functions get the respective section instance as
-input. This allows [schema plugin](../plugins/schemas.md) developers to add normalizing to their sections.
+input. This allows [schema plugin](../howto/customization/plugins_dev.md#develop-a-schema-plugin) developers to add normalizing to their sections.
 Read about our [structured data](./data.md) to learn more about the different sections.
 
 ### Storing and indexing
 
 As a last technical step, the processed data is stored and some information is passed
 into the search index. The store for processed data is internal to NOMAD and processed
-data cannot be accessed directly and only via the [archive API](../apis/api.md#access-processed-data-archives)
-or [ArchiveQuery](../apis/archive_query.md) Python library functionality.
+data cannot be accessed directly and only via the [archive API](../howto/programmatic/api.md#access-processed-data-archives)
+or [ArchiveQuery](../howto/programmatic/archive_query.md) Python library functionality.
 What information is stored in the search index is determined
 by the *metadata* and *results* sections and cannot be changed by users or plugins.
 However, all scalar values in the processed data are also index as key-values pairs.
 
-!!! attention
+!!! warning "Attention"
     This part of the documentation should be more substantiated. There will be a learn section
-    about the search soon.
-
-## Files
-
-We already said that all uploaded files are **raw files**. Recognized files that have an
-entry are called **mainfiles**. Only the mainfile of the entry is
-passed to the parser during processing. However, a parser can call other tools or read other files.
-Therefore, we consider all files in the same directory of the mainfile as **auxillary files**,
-even though there is not necessarily a formal relationship with the entry. If
-formal relationships with aux files are established, e.g. via a reference to the file within
-the processed data, is up to the parser.
-
-## Datasets
-
-Users can build collections of entries to form **datasets**. You can imagine datasets
-like tags or albums in other systems. Each entry can be contain in many datasets and
-a dataset can hold many entries. Datasets can also overlap. Datasets are only
-indirectly related to files. The main purpose of **datasets** in NOMAD is to have citable
-collections of data. Users can get a DOI for their datasets. Datasets have no influence
-on the processing of data.
+    about the search soon.
\ No newline at end of file
diff --git a/docs/explanation/data.md b/docs/explanation/data.md
index 7749d1c721a3ec90307263b229ecfa9098c386c7..5328fd6d7f6a95fe312e5b9c5fc8f34324e1e46b 100644
--- a/docs/explanation/data.md
+++ b/docs/explanation/data.md
@@ -1,5 +1,3 @@
-# Structured data and the NOMAD Metainfo
-
 NOMAD structures data into **sections**, where each section can contain data and more sections.
 This allows to browse complex data like you would browse files and directories on your computer.
 Each section follows a **definition** and all the contained data and sub-section have a
@@ -9,7 +7,7 @@ increases consistency and interoperability, enables search, APIs, visualization,
 analysis.
 
 <figure markdown>
-  ![processed data screenshot](screenshot.png)
+  ![processed data screenshot](images/screenshot.png)
   <figcaption>Browsing structured data in the NOMAD UI (<a href="https://nomad-lab.eu/prod/v1/gui/search/entries/entry/id/zQJMKax7xk384h_rx7VW_-6bRIgi/data/run/0/system/0/atoms/positions">link</a>)</figcaption>
 </figure>
 
@@ -17,12 +15,12 @@ analysis.
 ## Schema language
 
 The bases for structured data are schemas written in a **schema language**. Our
-schema language is called the **NOMAD Metainfo** language. It
-defines the tools to define sections, organize definitions into **packages**, and define
+schema language is called the **NOMAD Metainfo** language. The name is evocative of the rich **metadata information** that should be associated with the research data and made available in a machine-readable format.
+It defines the tools to define sections, organize definitions into **packages**, and define
 section properties (**sub-sections** and **quantities**).
 
 <figure markdown>
-  ![schema language](schema_language.png)
+  ![schema language](images/schema_language.png)
   <figcaption>The NOMAD Metainfo schema language for structured data definitions</figcaption>
 </figure>
 
@@ -38,7 +36,7 @@ Quantities are related to *properties*, *attributes*, *slots*, *columns*.
 Sub-sections might be called *containment* or *composition*. Sub-sections and quantities
 with a section type also define *relationships*, *links*, or *references*.
 
-Our guide on [how to write a schema](../schemas/basics.md) explains these concepts with an example.
+Our guide on [how to write a schema](../howto/customization/basics.md) explains these concepts with an example.
 
 ## Schema
 
@@ -56,7 +54,7 @@ complement the shared entry structure. They define specific data structures to r
 specific types of data.
 
 <figure markdown>
-  ![schema language](schema.png)
+  ![schema language](images/schema.png)
   <figcaption>
     The three different categories of NOMAD schema definitions
   </figcaption>
@@ -72,16 +70,16 @@ The goal is to re-use as much as possible and to not re-invent the same sections
 and over again. Tools build around certain base section, provide an incentive to
 use them.
 
-!!! attention
+!!! warning "Attention"
     There is no detailed how-to or reference documentation on the existing base sections
     and how to use them yet.
 
-One example for re-usable base section is the [workflow package](../schemas/workflows.md).
+One example for re-usable base section is the [workflow package](../howto/customization/workflows.md).
 These allow to define workflows in a common way. They allow to place workflows in
 the shared entry structure, and the UI provides a card with workflow visualization and
 navigation for all entries that have a workflow inside.
 
-!!! attention
+!!! warning "Attention"
     Currently there are two version of the workflow schema. They are stored in two
     top-level `EntryArchive` sub-sections (`workflow` and `workflow2`). This
     will change soon to something that supports multiple workflows used in
@@ -95,7 +93,7 @@ these kinda of data. Therefore, it is important to also translate (at least some
 into a more generic and standardized form.
 
 <figure markdown>
-  ![schema language](data.png)
+  ![schema language](images/data.png)
   <figcaption>
     From specific data to more general interoperable data.
   </figcaption>
@@ -121,12 +119,12 @@ normalization algorithm needs to be implemented.
 ### Exploring the schema
 
 All built-in definitions that come with NOMAD or one of the installed plugins can
-be explored with the [Metainfo browser](https://nomad-lab.eu/prod/v1/gui/analyze/metainfo/nomad.datamodel.datamodel.EntryArchive). You can start with the root section `EntryArchive`
+be explored with the [Metainfo browser](https://nomad-lab.eu/prod/v1/gui/analyze/metainfo/nomad.datamodel.datamodel.EntryArchive){:target="_blank"}. You can start with the root section `EntryArchive`
 and browse based on sub-sections, or explore the Metainfo through packages.
 
-To see all user provided uploaded schemas, you can use a [search for the sub-section `definition`](https://nomad-lab.eu/prod/v1/gui/search/entries?quantities=definitions).
+To see all user provided uploaded schemas, you can use a [search for the sub-section `definition`](https://nomad-lab.eu/prod/v1/gui/search/entries?quantities=definitions){:target="_blank"}.
 The sub-section `definition` is a top-level `EntryArchive` sub-section. See also our
-[how-to on writing and uploading schemas](../schemas/basics.md#uploading-schemas).
+[how-to on writing and uploading schemas](../howto/customization/basics.md#uploading-schemas).
 
 ### Contributing to the Metainfo
 
@@ -136,7 +134,7 @@ Contributions here are only possible through merge requests.
 
 Base sections can be contributed via plugins. Here they can be explored in the Metainfo
 browser, your plugin can provide more tools, and you can make use of normalize functions.
-See also our [how-to on writing schema plugins](../plugins/schemas.md). You could
+See also our [how-to on writing schema plugins](../howto/customization/plugins_dev.md#develop-a-schema-plugin). You could
 also provide base sections via uploaded schemas, but those are harder to explore and
 distribute to other NOMAD installations.
 
@@ -145,7 +143,7 @@ schemas, you most likely also upload data in archive files (or use ELNs to edit
 Here you can also provide schemas and data in the same file. In many case
 specific schemas will be small and only re-combine existing base sections.
 See also our
-[how-to on writing schemas](../schemas/basics.md).
+[how-to on writing schemas](../howto/customization/basics.md).
 
 ## Data
 
@@ -155,10 +153,10 @@ for all data. However, it is independent of the actual representation of data in
 or how it might be stored in a file or database.
 
 The Metainfo has many serialized forms. You can write `.archive.json` or `.archive.yaml`
-files yourself. NOMAD internally stores all processed data in [message pack](https://msgpack.org/). Some
+files yourself. NOMAD internally stores all processed data in [message pack](https://msgpack.org/){:target="_blank"}. Some
 of the data is stored in mongodb or elasticsearch. When you request processed data via
-API, you receive it in JSON. When you use the [ArchiveQuery](../apis/archive_query.md), all data is represented
-as Python objects (see also [here](../plugins/schemas.md#starting-example)).
+API, you receive it in JSON. When you use the [ArchiveQuery](../howto/programmatic/archive_query.md), all data is represented
+as Python objects (see also [a starting example](../howto/customization/plugins_dev.md#starting-example)).
 
 No matter what the representation is, you can rely on the structure, names, types, shapes, and units
 defined in the schema to interpret the data.
@@ -187,15 +185,15 @@ will use concrete definitions that inherit from `EntryData`. There are also spec
 sections, like `run` for simulation data and `nexus` for nexus data.
 
 !!! note
-    As shown in [Uploading schemas](../schemas/basics.md#uploading-schemas), one can, in principle, create an archive file with both `definitions` and one of the *data* sections filled, although this is not always desired because it will stick together a schema and a particular instance of that schema. They should be kept separate so that it is still possible to generate new data files from the same schema file.
+    As shown in [Uploading schemas](../howto/customization/basics.md#uploading-schemas), one can, in principle, create an archive file with both `definitions` and one of the *data* sections filled, although this is not always desired because it will stick together a schema and a particular instance of that schema. They should be kept separate so that it is still possible to generate new data files from the same schema file.
 
-!!! attention
+!!! warning "Attention"
     The results, originally only designed for computational data, will soon be revised
     an replaced by a different section. However, the necessity and function of a section
     like this remains.
 
 <figure markdown>
-  ![schema language](super_structure.png)
+  ![schema language](images/super_structure.png)
   <figcaption>
     All entries instantiate the same section share the same structure.
   </figcaption>
diff --git a/docs/explanation/architecture.png b/docs/explanation/images/architecture.png
similarity index 100%
rename from docs/explanation/architecture.png
rename to docs/explanation/images/architecture.png
diff --git a/docs/explanation/data.png b/docs/explanation/images/data.png
similarity index 100%
rename from docs/explanation/data.png
rename to docs/explanation/images/data.png
diff --git a/docs/explanation/datamodel.png b/docs/explanation/images/datamodel.png
similarity index 100%
rename from docs/explanation/datamodel.png
rename to docs/explanation/images/datamodel.png
diff --git a/docs/explanation/oasis-use-cases.png b/docs/explanation/images/oasis-use-cases.png
similarity index 100%
rename from docs/explanation/oasis-use-cases.png
rename to docs/explanation/images/oasis-use-cases.png
diff --git a/docs/explanation/processing-multi-single.png b/docs/explanation/images/processing-multi-single.png
similarity index 100%
rename from docs/explanation/processing-multi-single.png
rename to docs/explanation/images/processing-multi-single.png
diff --git a/docs/explanation/processing-single-multi.png b/docs/explanation/images/processing-single-multi.png
similarity index 100%
rename from docs/explanation/processing-single-multi.png
rename to docs/explanation/images/processing-single-multi.png
diff --git a/docs/explanation/processing-single-single.png b/docs/explanation/images/processing-single-single.png
similarity index 100%
rename from docs/explanation/processing-single-single.png
rename to docs/explanation/images/processing-single-single.png
diff --git a/docs/explanation/processing-spawn.png b/docs/explanation/images/processing-spawn.png
similarity index 100%
rename from docs/explanation/processing-spawn.png
rename to docs/explanation/images/processing-spawn.png
diff --git a/docs/explanation/processing.png b/docs/explanation/images/processing.png
similarity index 100%
rename from docs/explanation/processing.png
rename to docs/explanation/images/processing.png
diff --git a/docs/explanation/reader-writer.png b/docs/explanation/images/reader-writer.png
similarity index 100%
rename from docs/explanation/reader-writer.png
rename to docs/explanation/images/reader-writer.png
diff --git a/docs/explanation/schema.png b/docs/explanation/images/schema.png
similarity index 100%
rename from docs/explanation/schema.png
rename to docs/explanation/images/schema.png
diff --git a/docs/explanation/schema_language.png b/docs/explanation/images/schema_language.png
similarity index 100%
rename from docs/explanation/schema_language.png
rename to docs/explanation/images/schema_language.png
diff --git a/docs/explanation/screenshot.png b/docs/explanation/images/screenshot.png
similarity index 100%
rename from docs/explanation/screenshot.png
rename to docs/explanation/images/screenshot.png
diff --git a/docs/explanation/stack.png b/docs/explanation/images/stack.png
similarity index 100%
rename from docs/explanation/stack.png
rename to docs/explanation/images/stack.png
diff --git a/docs/explanation/super_structure.png b/docs/explanation/images/super_structure.png
similarity index 100%
rename from docs/explanation/super_structure.png
rename to docs/explanation/images/super_structure.png
diff --git a/docs/explanation/oasis.md b/docs/explanation/oasis.md
index 8a7ae97ca840806e7e574b025a12746e9e8b47a5..df1f38b2e7854aec05d38b906825cd0f67fd2d15 100644
--- a/docs/explanation/oasis.md
+++ b/docs/explanation/oasis.md
@@ -1,9 +1,9 @@
 The software that runs NOMAD is Open-Source and can be used independently of the NOMAD
-*central installation* at [http://nomad-lab.eu](http://nomad-lab.eu).
+*central installation* at [http://nomad-lab.eu](http://nomad-lab.eu){:target="_blank"}.
 We call any NOMAD installation that is not the *central* one a NOMAD Oasis.
 
 <figure markdown>
-  ![oasis use-cases](oasis-use-cases.png){ width=700 }
+  ![oasis use-cases](images/oasis-use-cases.png){ width=700 }
   <figcaption>NOMAD Oasis use-cases</figcaption>
 </figure>
 
@@ -15,5 +15,5 @@ uses and hybrids are imaginable:
 - Industry: Use of Oasis to manage private data and full internal use of published data in compliance with strict privacy policies
 - FAIRmat: Use Oasis to form a network of repositories to build a federated data infrastructure
 for materials science.
-This is what we do in the [FAIRmat project](https://www.fair-di.eu/fairmat/consortium).
+This is what we do in the [FAIRmat project](https://www.fair-di.eu/fairmat/consortium){:target="_blank"}.
 
diff --git a/docs/explanation/processing.md b/docs/explanation/processing.md
index 5e8c919ef283bc085839e001c45e528c7ef24778..d0861c983b2cfcca4ee801c6af4f6b4a6640e2a1 100644
--- a/docs/explanation/processing.md
+++ b/docs/explanation/processing.md
@@ -16,7 +16,7 @@ Those matched files are now [*mainfiles*](../reference/glossary.md#mainfile) and
 3. **Persisting** (including indexing of) the extracted data.
 
 <figure markdown>
-  ![processing](processing.png)
+  ![processing](images/processing.png)
   <figcaption>Processing steps and how they interact with files, entries, and archives.</figcaption>
 </figure>
 
@@ -62,16 +62,19 @@ In most scenarios, entry processing is not triggered individually, but as part o
 processing. Many entries of one upload might be processed at the same time. Some order
 can be enforced through *processing levels*. Levels are part of the parser metadata and
 entries paired to parsers with a higher level are processed after entries with a
-parser of lower level. See als [how to write parser plugins](../plugins/parsers.md).
+parser of lower level. See also [how to write parser plugins](../howto/customization/plugins_dev.md#develop-a-parser-plugin).
 
 
 ## Customize processing
 
 NOMAD provides just the framework for processing. The actual work depends on plugins, parsers
-and schemas for specific file types. While NOMAD comes with a build-in set of plugins, you can build
-your own plugins to support new file types, ELNs, and workflows.
+and schemas for specific file types. While NOMAD comes with a build-in set of plugins, you can build your own plugins to support new file types, ELNs, and workflows.
 
-### Plugins, schemas, parsers
+### Schemas, parsers, plugins
+
+The primary function of a parser is to systematically analyze and organize the incoming data, ensuring adherence to the established schema.
+The interaction between a parser and a schema plays a crucial role in ensuring data consistency to a predefined structure. It takes raw data inputs and utilizes the schema as a guide to interpret and organize the information correctly. By connecting the parser to the schema, users can establish a framework for the expected data structure. The modular nature of the parser and schema relationship allows for flexibility, as the parser can be designed to accommodate various schemas, making it adaptable to different data models or updates in research requirements.
+This process ensures that the resulting filled template meets the compliance standards dictated by the schema.
 
 Processing is run on the NOMAD (Oasis) server as part of the NOMAD app or worker. In
 principle, executed processing code can access all files, all databases, the underlying
@@ -87,10 +90,9 @@ section, might indirectly use custom processing functionality.
 
 A parser plugin can define a new parser and therefore add to the *matching*, *parsing*, (and *normalizing*).
 A schema plugin defines new sections that can contain `normalize` functions that add to the *normalizing*.
-See also the how-tos on [plugins](../plugins/plugins.md), [parsers](../plugins/parsers.md), and [schema](../plugins/schemas.md).
-
+See also the how-tos on [plugins installation](../howto/oasis/plugins_install.md), and development of [parsers and schemas](../howto/customization/plugins_dev.md).
 
-### Matching
+#### Matching
 
 All parsers have a `is_mainfile` function. This is its signature:
 
@@ -113,7 +115,7 @@ uses certain criteria, for example:
 - regular expressions on mimetypes
 - regular expressions on header content
 
-See [How to write a parser](../plugins/parsers.md) for more details.
+See [How to write a parser](../howto/customization/plugins_dev.md#develop-a-parser-plugin) for more details.
 
 The matching step of an upload's processing, will call this function for every file
 and on all parsers. There are some hidden optimizations and additional parameters, but
@@ -122,7 +124,7 @@ The first matched parser will be used and the order of configured parser is impo
 If no parser can be matched, the file is not considered for processing and no entry
 is created.
 
-### Parsing
+#### Parsing
 
 All parsers have a `parse` function. This is its signature:
 
@@ -142,7 +144,6 @@ to access the file system, open other files, open the archives of other entries,
 create or update files, spawn the processing of created or updated files.
 See also the [create files, spawn entries scenario](#creating-files-spawning-entries).
 
-
 ### Normalizing
 
 After parsing, entries are "normalized". We distinguish *normalizers* and `normalize`
@@ -176,7 +177,7 @@ function implementation.
 ### Single file, single entry
 
 <figure markdown>
-  ![processing](processing-single-single.png)
+  ![processing](images/processing-single-single.png)
 </figure>
 
 This is the "normal" case. A parser is matched to a mainfile. During processing,
@@ -185,7 +186,7 @@ only the mainfile is read to populate the `EntryArchive` with data.
 ### Multiple files, single entry
 
 <figure markdown>
-  ![processing](processing-multi-single.png)
+  ![processing](images/processing-multi-single.png)
 </figure>
 
 Same as above: a parser is matched to a mainfile. But, during processing,
@@ -195,15 +196,15 @@ to open and read other *auxiliary* files.
 
 A notable special case are ELNs with `normalize` functions and references to files.
 ELNs can be designed to link the ELN with uploaded files via `FileEditQuantities` (see
-also [How to define ELNs](../schemas/elns.md#example-eln-schema) or [ELN Annotations](../reference/annotations.md#eln-annotations)).
+also [How to define ELNs](../howto/customization/elns.md#example-eln-schema) or [ELN Annotations](../reference/annotations.md#eln-annotations)).
 The underlying ELN's schema usually defines `normalize` functions that open the referenced
-files for more data. Certain modes of the [tabular parser](../schemas/tabular.md), for example, use
+files for more data. Certain modes of the [tabular parser](../howto/customization/tabular.md), for example, use
 this.
 
 ### Single file, multiple entries
 
 <figure markdown>
-  ![processing](processing-single-multi.png)
+  ![processing](images/processing-single-multi.png)
 </figure>
 
 A parser can match a mainfile in two ways. It's `is_mainfile` function can simply return `True`,
@@ -223,7 +224,7 @@ the entry identity is internally locked to the mainfile (and the respective key)
 ### Creating files, spawning entries
 
 <figure markdown>
-  ![processing](processing-spawn.png)
+  ![processing](images/processing-spawn.png)
 </figure>
 
 During processing, parsers or `normalize` functions can also create new (or update existing)
@@ -315,7 +316,7 @@ occurs. *Readers* and *writers* might be a good strategy to re-use parts of a pa
 but *reader* and *writer* implementations remain strongly connected and inter-dependent.
 
 <figure markdown>
-  ![processing](reader-writer.png)
+  ![processing](images/reader-writer.png)
   <figcaption>Read and written data items might overlap but are rarely the same.</figcaption>
 </figure>
 
diff --git a/docs/schemas/base_sections.md b/docs/howto/customization/base_sections.md
similarity index 91%
rename from docs/schemas/base_sections.md
rename to docs/howto/customization/base_sections.md
index 5cd02e4cf3efe2c21ede3f5fdf3abc32a60d063e..9744520c609fab789388c163254216be0691101d 100644
--- a/docs/schemas/base_sections.md
+++ b/docs/howto/customization/base_sections.md
@@ -1,6 +1,4 @@
----
-title: Base sections
----
+# How to use base sections
 
 As previously mentioned in ["How to write a schema"](basics.md#base-sections-and-inheritance),
 base sections can be used when writing custom schemas to inherit properties, and more
@@ -16,9 +14,9 @@ model.
     In this part of the documentation we use UML Class diagrams to illustrate the
     inheritance, composition and association between the base sections.
     For more information on UML Class diagrams please see
-    [en.wikipedia.org/wiki/Class_diagram](https://en.wikipedia.org/wiki/Class_diagram).
+    [en.wikipedia.org/wiki/Class_diagram](https://en.wikipedia.org/wiki/Class_diagram){:target="_blank"}.
 
-![entity activity model](base-sections.svg)
+![entity activity model](images/base-sections.svg)
 
 All the base sections defined in this model are abstract in the sense that they cannot be
 instantiated in NOMAD directly.
@@ -50,17 +48,17 @@ Theses are:
     "An object that persists, endures, or continues to exist through time while maintaining
     its identity."
 
-    See [BFO_0000002](http://purl.obolibrary.org/obo/BFO_0000002) for semantic context.
+    See [BFO_0000002](http://purl.obolibrary.org/obo/BFO_0000002){:target="_blank"} for semantic context.
 
 The `Entity` section is currently subclassed by `System`, `Collection` and `Instrument`.
 
-![entity sections](entity-sections.svg)
+![entity sections](images/entity-sections.svg)
 
 #### `Collection`
 
 The `Collection` section should be inherited when attempting to group entities together.
 
-![collection sections](collection-sections.svg)
+![collection sections](images/collection-sections.svg)
 
 !!! example
     The user wants to write a data schema for a batch of substrates.
@@ -85,7 +83,7 @@ the `lab_id` or vice versa.
 The `Instrument` section should be inherited when describing any tools used for material
 creation or characterization.
 
-![instrument sections](instrument-sections.svg)
+![instrument sections](images/instrument-sections.svg)
 
 #### `System`
 
@@ -100,7 +98,7 @@ There are two specializations of `System` which differentiates
 between the theoretical concept of a pure material, `PureSubstance`, and an actual physical
 material combining several pure substances, `CompositeSystem`.
 
-![system](system-sections.svg)
+![system](images/system-sections.svg)
 
 ##### `PubChemPureSubstanceSection`
 
@@ -124,20 +122,20 @@ Otherwise a search query is made for the filled attributes in the following orde
 
     "An action that has a temporal extension and for some time depends on some entity."
 
-    See [BFO_0000015](http://purl.obolibrary.org/obo/BFO_0000015) for semantic context.
+    See [BFO_0000015](http://purl.obolibrary.org/obo/BFO_0000015){:target="_blank"} for semantic context.
 
 The `Activity` section is currently subclassed by `Process`, `Measurement`, `Analysis`,
 and `Experiment`.
 These subclasses are intended to cover all types of activities and should be used instead
 of inheriting directly from `Activity`.
 
-![activity sections](activity-sections.svg)
+![activity sections](images/activity-sections.svg)
 
 #### `Experiment`
 
 The `Experiment` section should be inherited when attempting to group activities together.
 
-![experiment sections](experiment-sections.svg)
+![experiment sections](images/experiment-sections.svg)
 
 !!! example
     In a sample centric view the activities are grouped together by the sample but if the
@@ -159,21 +157,21 @@ The `Experiment` section should be inherited when attempting to group activities
      - sample preparative method
      - material transformations"
 
-    See [OBI_0000094](http://purl.obolibrary.org/obo/OBI_0000094) for semantic context.
+    See [OBI_0000094](http://purl.obolibrary.org/obo/OBI_0000094){:target="_blank"} for semantic context.
 
 The `Process` section is the base for the `SynthesisMethod` section which in turn is
 specialized further in the [`nomad-material-processing`](#plugin-nomad-material-processing)
 plugin detailed below.
 The main feature of the `Process` section is that it adds `ProcessSteps` with a duration.
 
-![process sections](process-sections.svg)
+![process sections](images/process-sections.svg)
 
 !!! info
     By "SynthesisMethod" we mean:
 
     "A method used to synthesise a sample."
 
-    See [CHMO_0001301](http://purl.obolibrary.org/obo/CHMO_0001301) for semantic context.
+    See [CHMO_0001301](http://purl.obolibrary.org/obo/CHMO_0001301){:target="_blank"} for semantic context.
 
 #### `Measurement`
 
@@ -183,12 +181,12 @@ The main feature of the `Process` section is that it adds `ProcessSteps` with a
     "A planned process with the objective to produce information about the material entity
     that is the evaluant, by physically examining it or its proxies. [ obi : pppb ]"
 
-    See [OBI_0000070](http://purl.obolibrary.org/obo/OBI_0000070) for semantic context.
+    See [OBI_0000070](http://purl.obolibrary.org/obo/OBI_0000070){:target="_blank"} for semantic context.
 
 The `Measurement` section adds `samples` which are references to instances of (subclasses
 of) `CompositeSystem`.
 
-![measurement sections](measurement-sections.svg)
+![measurement sections](images/measurement-sections.svg)
 
 #### `Analysis`
 
@@ -202,14 +200,14 @@ of) `CompositeSystem`.
      - data processing
      - data analysis"
 
-    See [OBI_0200000](http://purl.obolibrary.org/obo/OBI_0200000) for semantic context.
+    See [OBI_0200000](http://purl.obolibrary.org/obo/OBI_0200000){:target="_blank"} for semantic context.
 
 The `Analysis` section provides `inputs` which are references to any section (including
 sub sections) of some archive.
 In addition, it provides the `outputs` which is a repeating section of `AnalysisResult`
 which are intended to be further specialized by the user.
 
-![analysis sections](analysis-sections.svg)
+![analysis sections](images/analysis-sections.svg)
 
 ### `ReadableIdentifiers`
 
@@ -272,4 +270,4 @@ mainfile.
 
 This plugin contains more specialized base sections for material processing, is
 maintained by FAIRmat and is currently hosted on
-[https://github.com/FAIRmat-NFDI](https://github.com/FAIRmat-NFDI/AreaA-data_modeling_and_schemas).
\ No newline at end of file
+[https://github.com/FAIRmat-NFDI](https://github.com/FAIRmat-NFDI/AreaA-data_modeling_and_schemas){:target="_blank"}.
\ No newline at end of file
diff --git a/docs/schemas/basics.md b/docs/howto/customization/basics.md
similarity index 99%
rename from docs/schemas/basics.md
rename to docs/howto/customization/basics.md
index 7f8dc4a538bfe8c8e67fe9d8ef4a0f45ac63c982..cf825b927e53ee324d8702b51083f277d99acf17 100644
--- a/docs/schemas/basics.md
+++ b/docs/howto/customization/basics.md
@@ -1,6 +1,8 @@
-# Write NOMAD Schemas in YAML
+# How to write a schema
 
-This guide explains how to write and upload NOMAD schemas in our `.archive.yaml` format. For more information on how an archive file is composed, visit the [learn section on schemas](../explanation/data.md).
+<!-- # Write NOMAD Schemas in YAML -->
+
+This guide explains how to write and upload NOMAD schemas in our `.archive.yaml` format. For more information on how an archive file is composed, visit the [learn section on schemas](../../explanation/data.md).
 
 ## Example data
 
@@ -359,7 +361,7 @@ above:
 --8<-- "examples/docs/references/multiple_files/data.archive.yaml"
 ```
 
-!!! attention
+!!! warning "Attention"
     You cannot create definitions that lead to circular loading of `*.archive.yaml` files.
     Each `definitions` section in an NOMAD entry represents a schema *package*. Each *package*
     needs to be fully loaded and analyzed before it can be used by other *packages* in other entries.
diff --git a/docs/schemas/elns.md b/docs/howto/customization/elns.md
similarity index 86%
rename from docs/schemas/elns.md
rename to docs/howto/customization/elns.md
index 64ed0b0bf7e6e0c4f88ab8c76bc1d73411b6f945..1c8046f6eb75b20a9445a59cea2d39255f840376 100644
--- a/docs/schemas/elns.md
+++ b/docs/howto/customization/elns.md
@@ -1,8 +1,10 @@
-# Schemas for ELNs
+# How to define and use ELNs in NOMAD
+
+## Schemas for ELNs
 
 A schema defines all possible data structures. With small editions to our schemas, we can instruct NOMAD to provide respective editors for data. This allows us to build Electronic Lab Notebooks (ELNs) as tools to acquire data in a formal and structured way. For schemas with ELN annotations, users can create new entries in NOMAD GUI and edit the archive (structured data) of these entries directly in the GUI.
 
-## Annotations
+### Annotations
 
 Definitions in a schema can have annotations. With these annotations you can provide additional information that NOMAD can use to alter its behavior around these definitions. Annotations are named blocks of key-value pairs:
 
@@ -16,10 +18,11 @@ definitions:
           key2: value
 ```
 
-Many annotations control the representation of data in the GUI. This can be for plots or data entry/editing capabilities. There are three main categories of annotations relevant to ELNs. You find a reference of all annotations [here](../reference/annotations.md).
+Many annotations control the representation of data in the GUI. This can be for plots or data entry/editing capabilities. There are three main categories of annotations relevant to ELNs. You find a reference of all annotations [here](../../reference/annotations.md).
 
-## Example ELN schema
+### Example ELN schema
 The is the commented ELN schema from our ELN example upload that can be created from NOMAD's upload page:
 ```yaml
 --8<-- "examples/data/eln/schema.archive.yaml"
 ```
+
diff --git a/docs/schemas/hdf5.md b/docs/howto/customization/hdf5.md
similarity index 96%
rename from docs/schemas/hdf5.md
rename to docs/howto/customization/hdf5.md
index 1218123e9314b3141ae003bb9bc8ead2e4e11452..5a54086d789d40e2653116e15dc8165cbce53d5f 100644
--- a/docs/schemas/hdf5.md
+++ b/docs/howto/customization/hdf5.md
@@ -1,6 +1,5 @@
----
-title: Large data
----
+# How to reference hdf5 files
+
 The NOMAD schemas and processed data system is designed to describe and manage
 intricate hierarchies of connected data. This is ideal for metadata and lots of small
 data quantities, but does not work for large quantities. Quantities are atomic and
@@ -32,12 +31,12 @@ HDF5 files. Structuring HDF5 files and processed data alike, might simplify late
 NOMAD clients (e.g. NOMAD UI) can pick up on these `HDF5Reference` quantities and
 provide respective functionality (e.g. showing a H5Web view).
 
-!!! attention
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
 
 ## Metadata for large quantities
 
-!!! attention
+!!! warning "Attention"
 
     This will be implemented and documented soon.
\ No newline at end of file
diff --git a/docs/schemas/2col.png b/docs/howto/customization/images/2col.png
similarity index 100%
rename from docs/schemas/2col.png
rename to docs/howto/customization/images/2col.png
diff --git a/docs/schemas/2col_notes.png b/docs/howto/customization/images/2col_notes.png
similarity index 100%
rename from docs/schemas/2col_notes.png
rename to docs/howto/customization/images/2col_notes.png
diff --git a/docs/schemas/activity-sections.svg b/docs/howto/customization/images/activity-sections.svg
similarity index 100%
rename from docs/schemas/activity-sections.svg
rename to docs/howto/customization/images/activity-sections.svg
diff --git a/docs/schemas/analysis-sections.svg b/docs/howto/customization/images/analysis-sections.svg
similarity index 100%
rename from docs/schemas/analysis-sections.svg
rename to docs/howto/customization/images/analysis-sections.svg
diff --git a/docs/schemas/base-sections.svg b/docs/howto/customization/images/base-sections.svg
similarity index 100%
rename from docs/schemas/base-sections.svg
rename to docs/howto/customization/images/base-sections.svg
diff --git a/docs/schemas/collection-sections.svg b/docs/howto/customization/images/collection-sections.svg
similarity index 100%
rename from docs/schemas/collection-sections.svg
rename to docs/howto/customization/images/collection-sections.svg
diff --git a/docs/schemas/columns.png b/docs/howto/customization/images/columns.png
similarity index 100%
rename from docs/schemas/columns.png
rename to docs/howto/customization/images/columns.png
diff --git a/docs/schemas/entity-sections.svg b/docs/howto/customization/images/entity-sections.svg
similarity index 100%
rename from docs/schemas/entity-sections.svg
rename to docs/howto/customization/images/entity-sections.svg
diff --git a/docs/schemas/example-workflow.png b/docs/howto/customization/images/example-workflow.png
similarity index 100%
rename from docs/schemas/example-workflow.png
rename to docs/howto/customization/images/example-workflow.png
diff --git a/docs/schemas/experiment-sections.svg b/docs/howto/customization/images/experiment-sections.svg
similarity index 100%
rename from docs/schemas/experiment-sections.svg
rename to docs/howto/customization/images/experiment-sections.svg
diff --git a/docs/schemas/instrument-sections.svg b/docs/howto/customization/images/instrument-sections.svg
similarity index 100%
rename from docs/schemas/instrument-sections.svg
rename to docs/howto/customization/images/instrument-sections.svg
diff --git a/docs/schemas/measurement-sections.svg b/docs/howto/customization/images/measurement-sections.svg
similarity index 100%
rename from docs/schemas/measurement-sections.svg
rename to docs/howto/customization/images/measurement-sections.svg
diff --git a/docs/schemas/process-sections.svg b/docs/howto/customization/images/process-sections.svg
similarity index 100%
rename from docs/schemas/process-sections.svg
rename to docs/howto/customization/images/process-sections.svg
diff --git a/docs/schemas/rows.png b/docs/howto/customization/images/rows.png
similarity index 100%
rename from docs/schemas/rows.png
rename to docs/howto/customization/images/rows.png
diff --git a/docs/schemas/rows_subsection.png b/docs/howto/customization/images/rows_subsection.png
similarity index 100%
rename from docs/schemas/rows_subsection.png
rename to docs/howto/customization/images/rows_subsection.png
diff --git a/docs/schemas/system-sections.svg b/docs/howto/customization/images/system-sections.svg
similarity index 100%
rename from docs/schemas/system-sections.svg
rename to docs/howto/customization/images/system-sections.svg
diff --git a/docs/schemas/workflow-schema.png b/docs/howto/customization/images/workflow-schema.png
similarity index 100%
rename from docs/schemas/workflow-schema.png
rename to docs/howto/customization/images/workflow-schema.png
diff --git a/docs/develop/normalizers.md b/docs/howto/customization/normalizers.md
similarity index 89%
rename from docs/develop/normalizers.md
rename to docs/howto/customization/normalizers.md
index 6451afb7c2d2bbb3e4604f9d1acbdb6b3f7fcb19..bcfd9f87212a82502804b9f6a926a1316e563612 100644
--- a/docs/develop/normalizers.md
+++ b/docs/howto/customization/normalizers.md
@@ -14,6 +14,13 @@ A normalizer is run in any case, but it might choose not to do anything. A norma
 can perform any operation on the archive, but in general it should only add more
 information, not alter existing information.
 
+<!-- TODO Everything below needs to be checked, as it is combining 2 distinct changes during rebase!! -->
+
+## Getting started
+
+Fork and clone the [normalizer example project](https://github.com/nomad-coe/nomad-normalizer-plugin-example) as described in [How to develop and publish plugins](plugins_dev.md). Follow the original [How to write a parser](parsers.md).
+
+{{pydantic_model('nomad.config.plugins.Normalizer', heading='### Normalizer plugin metadata')}}
 ## Starting example
 
 This is an example for a very simple normalizer that computes the unit cell volume from
diff --git a/docs/develop/parsers.md b/docs/howto/customization/parsers.md
similarity index 97%
rename from docs/develop/parsers.md
rename to docs/howto/customization/parsers.md
index a2c561c87c196defdb9341b59752c30eae6d8b87..f29a1c184fec2d3c3977c63110dc9233c92ab6df 100644
--- a/docs/develop/parsers.md
+++ b/docs/howto/customization/parsers.md
@@ -25,7 +25,7 @@ your fork accordingly.
 
 The project structure should be:
 
-```
+```txt
 ├── example
 │   ├── exampleparser
 │   │   ├── __init__.py
@@ -87,6 +87,12 @@ respective `program.name` as in the following:
 }
 ```
 
+## Match your raw file
+
+!!! warning "Attention"
+    This part of the documentation should be more substantiated. There will be a section
+    about this topic soon.
+
 ## Parsing test files
 
 We will now show you how to parse ASCII files containing some structure information, a
@@ -300,7 +306,7 @@ on which files. To accomplish this, specific parser attributes are matched to a
 file. These are specified by interfacing the parser with `MatchingParser`. There are a
 couple of ways to do this, first as a plug-in (`nomad.config.__init__.py::plugins`) and
 second, directly adding it to the list of parsers (`nomad.parsing.parsers.py::parsers`),
-the former being the preferred route. See [how to write parser plug-ins](../plugins/parsers.md)
+the former being the preferred route. See [how to write parser plug-ins](plugins_dev.md#develop-a-parser-plugin)
 to learn more.
 
 ```python
@@ -338,7 +344,7 @@ MatchingParserInterface(
 Not all of these attributes have to be used. Those that are given must all match in order
 to use the parser on a file.
 
-The NOMAD infrastructure keeps a [list of parser](../reference/parsers.md#supported-parsers) objects (in
+The NOMAD infrastructure keeps a [list of parser](../../reference/parsers.md#supported-parsers) objects (in
 `nomad/parsing/parsers.py::parsers`). These parsers are considered in the order they
 appear in the list. The first matching parser is used to parse a given file.
 
diff --git a/docs/plugins/schemas.md b/docs/howto/customization/plugins_dev.md
similarity index 85%
rename from docs/plugins/schemas.md
rename to docs/howto/customization/plugins_dev.md
index 535a783d19c50ccdd4dfe5d351431eecd33a8e23..2b8d72e91a3b8644909b22e562c81af27824715f 100644
--- a/docs/plugins/schemas.md
+++ b/docs/howto/customization/plugins_dev.md
@@ -1,15 +1,27 @@
-## Getting started
+# How to develop and publish plugins
 
-Fork and clone the [schema example project](https://github.com/nomad-coe/nomad-schema-plugin-example) as described in [before](plugins.md).
+We provide template projects on GitHub. You can fork these projects and follow the
+instructions in their `README.md`. These instructions will give you everything you
+need to run and test your plugin as a plugin developer.<br />
+[How-to install a plugin](../oasis/plugins_install.md) section explains how to add plugins to a NOMAD installation.
+Dedicated Explanation sections provide more background information on [what is a schema](../../explanation/data.md#schema) and [what is a parser](../../explanation/data.md#parser)
 
-## Writing schemas in Python compared to YAML schemas
+- [schema plugin](https://github.com/nomad-coe/nomad-schema-plugin-example){:target="_blank"}
+- [parser plugin](https://github.com/nomad-coe/nomad-parser-plugin-example){:target="_blank"}
 
-In this [guide](../schemas/basics.md), we explain how to write and upload schemas in the `.archive.yaml` format. Writing and uploading such YAML schemas is a good way for NOMAD users to add schemas. But it has limitations. As a NOMAD developer or Oasis administrator you can add Python schemas to NOMAD. All build in NOMAD schemas (e.g. for electronic structure code data) are written an Python and are part of the NOMAD sources (`nomad.datamodel.metainfo.*`).
+## Develop a schema plugin
 
-There is a 1-1 translation between Python schemas (written in classes) and YAML (or JSON) schemas (written in objects). Both use the same fundamental concepts, like *section*, *quantity*, or *sub-section*, introduced in [YAML schemas](../schemas/basics.md).
+### Getting started
 
+Fork and clone the [schema example project](https://github.com/nomad-coe/nomad-schema-plugin-example){:target="_blank"} as described in [How-to install a plugin](../oasis/plugins_install.md).
 
-## Starting example
+### Writing schemas in Python compared to YAML schemas
+
+In this [guide](basics.md), we explain how to write and upload schemas in the `.archive.yaml` format. Writing and uploading such YAML schemas is a good way for NOMAD users to add schemas. But it has limitations. As a NOMAD developer or Oasis administrator you can add Python schemas to NOMAD. All build in NOMAD schemas (e.g. for electronic structure code data) are written an Python and are part of the NOMAD sources (`nomad.datamodel.metainfo.*`).
+
+There is a 1-1 translation between Python schemas (written in classes) and YAML (or JSON) schemas (written in objects). Both use the same fundamental concepts, like *section*, *quantity*, or *sub-section*, introduced in [YAML schemas](basics.md).
+
+### Starting example
 
 ```python
 from nomad.metainfo import MSection, Quantity, SubSection, Units
@@ -90,12 +102,12 @@ This will convert the data into JSON:
 }
 ```
 
-## Definitions
+### Definitions
 
 The following describes the schema language (the sum of all possible definitions) and how it is expressed in Python.
 
 
-### Common attributes of Metainfo Definitions
+#### Common attributes of Metainfo Definitions
 
 In the example, you have already seen the basic Python interface to the Metainfo. *Sections* are
 represented in Python as objects. To define a section, you write a Python classe that inherits
@@ -116,7 +128,7 @@ from its Python property, etc.
 - `more`, a dictionary of custom information. Any additional `kwargs` set when creating a definition
     are added to `more`.
 
-### Sections
+#### Sections
 
 Sections are defined with Python classes that extend `MSection` (or other section classes).
 
@@ -127,7 +139,8 @@ quantities) from all base classes. If this is `True`, all definitions in this se
 will be added to the properties of the base class section. This allows the extension of existing
 sections with additional properties.
 
-### Quantities
+#### Quantities
+
 Quantity definitions are the main building block of meta-info schemas. Each quantity
 represents a single piece of data. Quantities can be defined by:
 
@@ -138,11 +151,12 @@ a reference type.
 - A `shape` that defines the dimensionality of the quantity. Examples are: `[]` (number),
 `['*']` (list), `[3, 3]` (3 by 3 matrix), `['n_elements']` (a vector of length defined by
 another quantity `n_elements`).
-- A physics `unit`. We use [Pint](https://pint.readthedocs.io/en/stable/) here. You can
+- A physics `unit`. We use [Pint](https://pint.readthedocs.io/en/stable/){:target="_blank"} here. You can
 use unit strings that are parsed by Pint, e.g. `meter`, `m`, `m/s^2`. As a convention the
 metainfo uses only SI units.
 
-### Sub-Section
+#### Sub-Section
+
 A sub-section defines a named property of a section that refers to another section. It
 allows to define that a section can contain another section.
 
@@ -151,7 +165,7 @@ be contained.
 - `repeats` is a boolean that determines whether the sub-section relationship allows multiple section
 or only one.
 
-### References and Proxies
+#### References and Proxies
 
 Beside creating hierarchies (e.g. tree structures) with subsections, the metainfo
 also allows to create cross references between sections and other sections or quantity
@@ -210,7 +224,7 @@ class Calculation(MSection):
 The strings given to `SectionProxy` are paths within the available definitions.
 The above example works, if `System` is eventually defined in the same package.
 
-### Categories
+#### Categories
 
 In the old meta-info this was known as *abstract types*.
 
@@ -224,7 +238,7 @@ class CategoryName(MCategory):
     m_def = Category(links=['http://further.explanation.eu'], categories=[ParentCategory])
 ```
 
-### Packages
+#### Packages
 
 Metainfo packages correspond to Python packages. Typically your metainfo Python files should follow this pattern:
 ```python
@@ -237,13 +251,13 @@ m_package = Package()
 m_package.__init_metainfo__()
 ```
 
-## Adding Python schemas to NOMAD
+### Adding Python schemas to NOMAD
 
 Now you know how to write a schema as a Python module, but how should you
 integrate new schema modules into the existing code and what conventions need to be
 followed?
 
-### Schema super structure
+#### Schema super structure
 
 You should follow the basic [developer's getting started](../develop/setup.md) to setup a development environment. This will give you all the necessary libraries and allows you
 to place your modules into the NOMAD code.
@@ -267,7 +281,7 @@ metadata. It also contains the root section `EntryArchive`.
 For example the section `run` with all the simulation definitions (computational material science definitions)
 that are shared among the respective parsers.
 
-### Extending existing sections
+#### Extending existing sections
 
 Parsers can provide their own definitions. By conventions, these are placed into a
 `metainfo` sub-module of the parser Python module. The definitions here can add properties
@@ -285,7 +299,7 @@ class MyCodeRun(Method)
         type=MEnum('hpc', 'parallel', 'single'), description='...')
 ```
 
-### Schema conventions
+#### Schema conventions
 
 - Use lower snake case for section properties; use upper camel case for section definitions.
 - Use a `_ref` suffix for references.
@@ -296,11 +310,11 @@ workflow quantities.
 short handle of a code name or other special method prefix.
 
 
-## Use Python schemas to work with data
+### Use Python schemas to work with data
 
-### Access structured data via API
+#### Access structured data via API
 
-The [API section](../apis/api.md#access-archives) demonstrates how to access an Archive, i.e.
+The [API section](../programmatic/api.md#access-archives) demonstrates how to access an Archive, i.e.
 retrieve the processed data from a NOAMD entry. This API will give you JSON data likes this:
 
 ```json title="https://nomad-lab.eu/prod/v1/api/v1/entries/--dLZstNvL_x05wDg2djQmlU_oKn/archive"
@@ -351,11 +365,11 @@ To learn what each key means, you need to look up its definition in the Metainfo
 {{ metainfo_data() }}
 
 
-### Wrap data with Python schema classes
+#### Wrap data with Python schema classes
 
 In Python, JSON data is typically represented as nested combinations of dictionaries
 and lists. Of course, you could work with this right away. To make it easier for Python
-programmers the [NOMAD Python package](../apis/pythonlib.md) allows you to use this
+programmers the [NOMAD Python package](../programmatic/pythonlib.md) allows you to use this
 JSON data with a higher level interface, which provides the following advantages:
 
 - code completion in dynamic coding environments like Jupyter notebooks
@@ -383,13 +397,13 @@ import json
 print(json.dumps(calc.m_to_dict(), indent=2))
 ```
 
-### Access structured data via the NOMAD Python package
+#### Access structured data via the NOMAD Python package
 
 The NOMAD Python package provides utilities to [query large amounts of
-archive data](/archive_query.md). This uses the built-in Python schema classes as
+archive data](../programmatic/archive_query.md). This uses the built-in Python schema classes as
 an interface to the data.
 
-## Custom normalizers
+### Custom normalizers
 
 For custom schemas, you might want to add custom normalizers. All files are parsed
 and normalized when they are uploaded or changed. The NOMAD metainfo Python interface
@@ -433,4 +447,12 @@ we will get a final normalized archive that contains our data like this:
 }
 ```
 
-{{pydantic_model('nomad.config.plugins.Schema', heading='## Schema plugin metadata')}}
\ No newline at end of file
+{{pydantic_model('nomad.config.plugins.Schema', heading='## Schema plugin metadata')}}
+
+## Develop a Parser plugin
+
+NOMAD uses parsers to convert raw code input and output files into NOMAD's common Archive format. This is the documentation on how to develop such a parser.
+
+Fork and clone the [parser example project](https://github.com/nomad-coe/nomad-parser-plugin-example){:target="_blank"} as described in [How-to install a plugin](../oasis/plugins_install.md). Follow the original [how-to on writing a parser](parsers.md).
+
+{{pydantic_model('nomad.config.plugins.Parser', heading='### Parser plugin metadata', hide=['code_name','code_category','code_homepage','metadata'])}}
\ No newline at end of file
diff --git a/docs/schemas/tabular.md b/docs/howto/customization/tabular.md
similarity index 92%
rename from docs/schemas/tabular.md
rename to docs/howto/customization/tabular.md
index 33ddeb704a98647996771454f240959b7c3daef2..15e0b7d63d8896b46adca90149e5e7938bc1afaf 100644
--- a/docs/schemas/tabular.md
+++ b/docs/howto/customization/tabular.md
@@ -1,11 +1,13 @@
-Refer to the [Reference guide](../reference/annotations.md) for the full list of annotations connected to this parser and to the [Tabular parser tutorial](../tutorial/custom.md#the-built-in-tabular-parser)  for a detailed description of each of them.
+# How to use tabular parser
+
+Refer to the [Reference guide](../../reference/annotations.md) for the full list of annotations connected to this parser and to the [Tabular parser tutorial](../../tutorial/custom.md#the-built-in-tabular-parser)  for a detailed description of each of them.
 
 ## Preparing the tabular data file
 
-NOMAD and `Excel` support multiple-sheets data manipulations and imports. Each quantity in the schema will be annotated with a source path composed by sheet name and column header. The path to be used with the tabular data displayed below would be `Sheet1/My header 1` and it would be placed it the `tabular` annotation, see [Schema annotations](../tutorial/custom.md#to-be-an-entry-or-not-to-be-an-entry) section.
+NOMAD and `Excel` support multiple-sheets data manipulations and imports. Each quantity in the schema will be annotated with a source path composed by sheet name and column header. The path to be used with the tabular data displayed below would be `Sheet1/My header 1` and it would be placed it the `tabular` annotation, see [Schema annotations](../../tutorial/custom.md#to-be-an-entry-or-not-to-be-an-entry) section.
 
 <p align="center" width="100%">
-    <img width="30%" src="2col.png">
+    <img width="30%" src="images/2col.png">
 </p>
 
 In the case there is only one sheet in the Excel file, or when using a `.csv` file that is a single-sheet format, the sheet name is not required in the path.
@@ -16,14 +18,14 @@ The data sheets can be stored in one or more files depending on the user needs.
  each column contains an array of cells that we want to parse into one quantity. Example: time and temperature arrays to be plotted as x and y.
 
 <p align="center" width="100%">
-    <img width="30%" src="columns.png">
+    <img width="30%" src="images/columns.png">
 </p>
 
 2) Rows:<br />
  each row contains a set of cells that we want to parse into a section, i. e. a set of quantities. Example: an inventory tabular data file (for substrates, precursors, or more) where each column represents a property and each row corresponds to one unit stored in the inventory.
 
 <p align="center" width="100%">
-    <img width="30%" src="rows.png">
+    <img width="30%" src="images/rows.png">
 </p>
 
 3) Rows with repeated columns:<br />
@@ -32,13 +34,13 @@ The data sheets can be stored in one or more files depending on the user needs.
 in addition to the mode 2), whenever the parser detects the presence of multiple columns (or multiple sets of columns) with same headers, these are taken as multiple instances of a subsection. More explanations will be delivered when showing the schema for such a structure. Example: a crystal growth process where each row is a step of the crystal growth and the repeated columns describe the "precursor materials", that can be more than one during such processes and they are described by the same "precursor material" section.
 
 <p align="center" width="100%">
-    <img width="45%" src="rows_subsection.png">
+    <img width="45%" src="images/rows_subsection.png">
 </p>
 
 Furthermore, we can insert comments before our data, we can use a special character to mark one or more rows as comment rows. The special character is annotated within the schema in the [parsing options](#parsing-options) section:
 
 <p align="center" width="100%">
-    <img width="30%" src="2col_notes.png">
+    <img width="30%" src="images/2col_notes.png">
 </p>
 
 ## Inheriting the TableData base section
@@ -74,16 +76,16 @@ losing/overwriting your manually-entered data by the parser!
 After writing a schema file and creating a new upload in NOMAD (or using an existing upload), it is possible to upload the schema file. After creating a new Entry out of one section of the schema, the tabular data file must be dropped in the quantity designated by the `FileEditQuantity` annotation. After clicking save the parsing will start. In the Overview page of the NOMAD upload, new Entries are created and appended to the Processed data section. In the Entry page, clicking on DATA tab (on top of the screen) and in the Entry lane, the data is populated under the `data` subsection.
 ## Hands-on examples of all tabular parser modes
 
-In this section eight examples will be presented, containing all the features available in tabular parser. Refer to the [Tutorial](../tutorial/custom.md#to-be-an-entry-or-not-to-be-an-entry) for more comments on the implications of the structures generated by the following yaml files.
+In this section eight examples will be presented, containing all the features available in tabular parser. Refer to the [Tutorial](../../tutorial/custom.md#to-be-an-entry-or-not-to-be-an-entry) for more comments on the implications of the structures generated by the following yaml files.
 
 
 ### 1. Column mode, current Entry, parse to root
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-1.png">
+    <img width="100%" src="../tutorial/images/tabular-1.png">
 </p>
 
-The first case gives rise to the simplest data archive file. Here the tabular data file is parsed by columns, directly within the Entry where the `TableData` is inherited and filling the quantities in the root level of the schema (see dedicated how-to to learn [how to inherit tabular parser in your schema](../schemas/tabular.md#inheriting-the-tabledata-base-section)).
+The first case gives rise to the simplest data archive file. Here the tabular data file is parsed by columns, directly within the Entry where the `TableData` is inherited and filling the quantities in the root level of the schema (see dedicated how-to to learn [how to inherit tabular parser in your schema](tabular.md#inheriting-the-tabledata-base-section)).
 
 !!! important
     - `data_file` quantity, i.e. the tabular data file name, is located in the same Entry of the parsed quantities.
@@ -97,7 +99,7 @@ The first case gives rise to the simplest data archive file. Here the tabular da
 ### 2. Column mode, current Entry, parse to my path
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-2.png">
+    <img width="100%" src="../../tutorial/images/tabular-2.png">
 </p>
 
 The parsing mode presented here only differs from the previous for the `sections` annotations. In this case the section that we want to fill with tabular data can be nested arbitrarily deep in the schema and the `sections` annotation must be filled with a forward slash path to the desired section, e. g. `my_sub_section/my_sub_sub_section`.
@@ -115,7 +117,7 @@ The parsing mode presented here only differs from the previous for the `sections
 ### 3. Row mode, current Entry, parse to my path
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-3.png">
+    <img width="100%" src="../../tutorial/images/tabular-3.png">
 </p>
 
 The current is the first example of parsing in row mode. This means that every row of the excel file while be placed in one instance of the section that is defined in `sections`. This section must be decorated with `repeats: true` annotation, it will allow to generate multiple instances that will be appended in a list with sequential numbers. Instead of sequential numbers, the list can show specific names if `label_quantity` annotation is appended to the repeated section. This annotation is included in the how-to example. The section is written separately in the schema and it does not need the `EntryData` inheritance because the instances will be grafted directly in the current Entry. As explained [below](#91-row-mode-current-entry-parse-to-root), it is not possible for `row` and `current_entry` to parse directly in the root because we need to create multiple instances of the selected subsection and organize them in a list.
@@ -135,7 +137,7 @@ The current is the first example of parsing in row mode. This means that every r
 ### 4. Column mode, single new Entry, parse to my path
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-4.png">
+    <img width="100%" src="../../tutorial/images/tabular-4.png">
 </p>
 
 One more step of complexity is added here: the parsing is not performed in the current Entry, but a new Entry it automatically generated and filled.
@@ -155,7 +157,7 @@ This structure foresees a parent Entry where we collect one or more tabular data
 ### 5. Row mode, single new Entry, parse to my path
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-5.png">
+    <img width="100%" src="../../tutorial/images/tabular-5.png">
 </p>
 
 Example analogous to the previous, where the new created Entry contains now a repeated subsection with a list of instances made from each line of the tabular data file, as show in the [Row mode, current Entry, parse to my path](#3-row-mode-current-entry-parse-to-my-path) case.
@@ -176,7 +178,7 @@ Example analogous to the previous, where the new created Entry contains now a re
 ### 6. Row mode, multiple new entries, parse to root
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-6.png">
+    <img width="100%" src="../../tutorial/images/tabular-6.png">
 </p>
 
 The last feature available for tabular parser is now introduced: `multiple_new_entries`. It is only meaningful for `row` mode because each row of the tabular data file will be placed in a new Entry that is an instance of a class defined in the schema, this would not make sense for columns, though, as they usually need to be parsed all together in one class of the schema, for example the "timestamp" and "temperature" columns in a spreadsheet file would need to lie in the same class as they belong to the same part of experiment.
@@ -197,7 +199,7 @@ A further comment is needed to explain the combination of this feature with `roo
 ### 7. Row mode, multiple new entries, parse to my path
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-7.png">
+    <img width="100%" src="../../tutorial/images/tabular-7.png">
 </p>
 
 As anticipated in the previous example, `row` mode in connection to `multiple_new_entries` will produce a manyfold of instances of a specific class, each of them being a new Entry. In the present case, each instance will also automatically be placed in a `ReferenceEditQuantity` quantity lying in a subsection defined within the parent Entry, coloured in plum in the following example image.
@@ -218,7 +220,7 @@ As anticipated in the previous example, `row` mode in connection to `multiple_ne
 ### 8. The Sub-Subsection nesting schema
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-8.png">
+    <img width="100%" src="../../tutorial/images/tabular-8.png">
 </p>
 
 If the tabular data file contains multiple columns with exact same name, there is a way to parse them using `row` mode. As explained in previous examples, this mode creates an instance of a subsection of the schema for each row of the file. Whenever column with same name are found they are interpreted as multiple instances of a sub-subsection nested inside the subsection. To build a schema with such a feature it is enough to have two nested classes, each of them bearing a `repeats: true` annotation. This structure can be applied to each and every of the cases above with `row` mode parsing.
diff --git a/docs/schemas/workflows.md b/docs/howto/customization/workflows.md
similarity index 96%
rename from docs/schemas/workflows.md
rename to docs/howto/customization/workflows.md
index bd6c42b88a61c6f3a8c8a86a88e961cabbe41ab8..05cecdfad3b77502b25e577a8f443e7d568c02ef 100644
--- a/docs/schemas/workflows.md
+++ b/docs/howto/customization/workflows.md
@@ -1,6 +1,4 @@
----
-title: Workflows
----
+# How to define workflows
 
 ## The built-in abstract workflow schema
 
@@ -12,7 +10,7 @@ performed . This often is also referred to as *data provenance* or *provenance g
 The following shows the overall abstract schema for *worklows* that can be found
 in `nomad.datamodel.metainfo.workflow` (blue):
 
-![workflow schema](workflow-schema.png)
+![workflow schema](images/workflow-schema.png)
 
 The idea is that *workflows* are stored in a top-level archive section along-side other
 sections that contain the *inputs* and *outputs*. This way the *workflow* or *provenance graph*
@@ -27,7 +25,7 @@ top-level section.
 
 Here is a logical depiction of the workflow and all its tasks, inputs, and outputs.
 
-![example workflow](example-workflow.png)
+![example workflow](images/example-workflow.png)
 
 ### Simple workflow entry
 
diff --git a/docs/develop/code.md b/docs/howto/develop/code.md
similarity index 62%
rename from docs/develop/code.md
rename to docs/howto/develop/code.md
index e207674c34a61ff4d6a5fbabbb2fac2502ae514f..e0f9888063937880199d733202a8264822cdfdfd 100644
--- a/docs/develop/code.md
+++ b/docs/howto/develop/code.md
@@ -5,8 +5,8 @@ about the codebase and ideas about what to look at first.
 
 ## Git Projects
 
-There is one [main NOMAD project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR)
-(and its [fork on GitHub](https://github.com/nomad-coe/nomad)). This project contains
+There is one [main NOMAD project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR){:target="_blank"}
+(and its [fork on GitHub](https://github.com/nomad-coe/nomad){:target="_blank"}). This project contains
 all the framework and infrastructure code. It instigates all checks, builds, and
 deployments for the public NOMAD service, the NOMAD Oasis, and the `nomad-lab` Python
 package. All contributions to NOMAD have to go through this project eventually.
@@ -16,17 +16,17 @@ them all in the `dependencies` directory or its subdirectories) or they are
 listed as PyPI packages in the `pyproject.toml` of the main project (or one of its
 submodules).
 
-You can also have a look at the [list of parsers](../reference/parsers.md) and
-[built-in plugins](../reference/plugins.md) that constitute the majority of these
-projects. The only other projects are [MatID](https://github.com/nomad-coe/matid),
-[DOS fingerprints](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-dos-fingerprints),
+You can also have a look at the [list of parsers](../../reference/parsers.md) and
+[built-in plugins](../../reference/plugins.md) that constitute the majority of these
+projects. The only other projects are [MatID](https://github.com/nomad-coe/matid){:target="_blank"},
+[DOS fingerprints](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-dos-fingerprints){:target="_blank"},
 and the
-[NOMAD Remote Tools Hub](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-remote-tools-hub).
+[NOMAD Remote Tools Hub](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-remote-tools-hub){:target="_blank"}.
 
 !!! note
-    The GitLab organization [nomad-lab](https://gitlab.mpcdf.mpg.de/nomad-lab) and the
-    GitHub organizations for [FAIRmat](https://github.com/fairmat-nfdi) and the
-    [NOMAD CoE](https://github.com/nomad-coe) all represent larger infrastructure and
+    The GitLab organization [nomad-lab](https://gitlab.mpcdf.mpg.de/nomad-lab){:target="_blank"} and the
+    GitHub organizations for [FAIRmat](https://github.com/fairmat-nfdi){:target="_blank"} and the
+    [NOMAD CoE](https://github.com/nomad-coe){:target="_blank"} all represent larger infrastructure and
     research projects, and they include many other Git projects that are not related.
     When navigating the codebase, only follow the submodules.
 
@@ -36,7 +36,7 @@ There are three main directories with Python code:
 
 - `nomad`: The actual NOMAD code. It is structured into more subdirectories and modules.
 
-- `tests`: Tests ([pytest](https://docs.pytest.org)) for the NOMAD code.
+- `tests`: Tests ([pytest](https://docs.pytest.org){:target="_blank"}) for the NOMAD code.
   It follows the same module structure, but Python files are prefixed with `test_`.
 
 - `examples`: A few small Python scripts that might be linked in the documentation.
@@ -44,60 +44,60 @@ There are three main directories with Python code:
 The `nomad` directory contains the following "main" modules. This list is not extensive
 but should help you to navigate the codebase:
 
-- `app`: The [FastAPI](https://fastapi.tiangolo.com/) APIs: v1 and v1.2 NOMAD APIs,
-  [OPTIMADE](https://www.optimade.org/), [DCAT](https://www.w3.org/TR/vocab-dcat-2/),
-  [h5grove](https://github.com/silx-kit/h5grove), and more.
+- `app`: The [FastAPI](https://fastapi.tiangolo.com/){:target="_blank"} APIs: v1 and v1.2 NOMAD APIs,
+  [OPTIMADE](https://www.optimade.org/){:target="_blank"}, [DCAT](https://www.w3.org/TR/vocab-dcat-2/){:target="_blank"},
+  [h5grove](https://github.com/silx-kit/h5grove){:target="_blank"}, and more.
 
 - `archive`: Functionality to store and access archive files. This is the storage format
   for all processed data in NOMAD. See also the docs on
-  [structured data](../explanation/data.md).
+  [structured data](../../explanation/data.md).
 
-- `cli`: The command line interface (based on [Click](https://click.palletsprojects.com)).
+- `cli`: The command line interface (based on [Click](https://click.palletsprojects.com){:target="_blank"}).
   Subcommands are structured into submodules.
 
 - `config`: NOMAD is configured through the `nomad.yaml` file. This contains all the
-  ([Pydantic](https://docs.pydantic.dev/)) models and default config parameters.
+  ([Pydantic](https://docs.pydantic.dev/){:target="_blank"}) models and default config parameters.
 
 - `datamodel`: The built-in schemas (e.g. `nomad.datamodel.metainfo.simulation` used by
   all the theory parsers). The base sections and section for the shared entry structure.
-  See also the docs on the [datamodel](../explanation/data.md) and
-  [processing](../explanation/basics.md).
+  See also the docs on the [datamodel](../../explanation/data.md) and
+  [processing](../../explanation/basics.md).
 
 - `metainfo`: The Metainfo system, e.g. the schema language that NOMAD uses.
 
 - `normalizing`: All the normalizers. See also the docs on
-  [processing](../explanation/basics.md#normalizing).
+  [processing](../../explanation/basics.md#normalizing).
 
 - `parsing`: The base classes for parsers, matching functionality, parser initialization,
   some fundamental parsers like the *archive* parser. See also the docs on
-  [processing](../explanation/basics.md#parsing).
+  [processing](../../explanation/basics.md#parsing).
 
 - `processing`: It's all about processing uploads and entries. The interface to
-  [Celery](https://docs.celeryq.dev/en/stable/) and [MongoDB](https://www.mongodb.com).
+  [Celery](https://docs.celeryq.dev/en/stable/){:target="_blank"} and [MongoDB](https://www.mongodb.com).
 
 - `units`: The unit and unit conversion system based on
-  [Pint](https://pint.readthedocs.io).
+  [Pint](https://pint.readthedocs.io){:target="_blank"}.
 
 - `utils`: Utility modules, e.g. the structured logging system
-  ([structlog](https://www.structlog.org/)), id generation, and hashes.
+  ([structlog](https://www.structlog.org/){:target="_blank"}), id generation, and hashes.
 
 - `files.py`: Functionality to maintain the files for uploads in staging and published.
   The interface to the file system.
 
 - `search.py`: The interface to
-  [Elasticsearch](https://www.elastic.co/guide/en/enterprise-search/current/start.html).
+  [Elasticsearch](https://www.elastic.co/guide/en/enterprise-search/current/start.html){:target="_blank"}.
 
 ## GUI code
 
-The NOMAD UI is written as a [React](https://react.dev/) single-page application (SPA). It
-uses (among many other libraries) [MUI](https://mui.com/),
-[Plotly](https://plotly.com/python/), and [D3](https://d3js.org/). The GUI code is
+The NOMAD UI is written as a [React](https://react.dev/){:target="_blank"} single-page application (SPA). It
+uses (among many other libraries) [MUI](https://mui.com/){:target="_blank"},
+[Plotly](https://plotly.com/python/){:target="_blank"}, and [D3](https://d3js.org/){:target="_blank"}. The GUI code is
 maintained in the `gui` directory. Most relevant code can be found in
 `gui/src/components`. The application entry point is `gui/src/index.js`.
 
 ## Documentation
 
-The documentation is based on [MkDocs](https://www.mkdocs.org/). The important files
+The documentation is based on [MkDocs](https://www.mkdocs.org/){:target="_blank"}. The important files
 and directories are:
 
 - `docs`: Contains all the Markdown files that contribute to the documentation system.
@@ -106,7 +106,7 @@ and directories are:
   added here as well.
 
 - `nomad/mkdocs.py`: Python code that defines
-  [macros](https://mkdocs-macros-plugin.readthedocs.io/) which can be used in Markdown.
+  [macros](https://mkdocs-macros-plugin.readthedocs.io/){:target="_blank"} which can be used in Markdown.
 
 ## Other top-level directories
 
diff --git a/docs/develop/contrib.md b/docs/howto/develop/contrib.md
similarity index 91%
rename from docs/develop/contrib.md
rename to docs/howto/develop/contrib.md
index b172b7881f50af33ad1abccf1420cd236bda1079..0a16a3b6f27b4ab7f2ed8c4ed181074219e1c25d 100644
--- a/docs/develop/contrib.md
+++ b/docs/howto/develop/contrib.md
@@ -3,8 +3,8 @@
 !!! note
 
     The NOMAD source code is maintained in two synchronized projects on
-    [GitHub](https://github.com/nomad-coe/nomad) and a
-    [GitLab run by MPCDF](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR).
+    [GitHub](https://github.com/nomad-coe/nomad){:target="_blank"} and a
+    [GitLab run by MPCDF](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR){:target="_blank"}.
     Everyone can contribute on GitHub. The GitLab instance requires an account for active
     contribution.
     This not an ideal situation: there are historic reasons and there is
@@ -15,18 +15,18 @@
 
 ### Issue trackers
 
-Everyone can open a [new issue](https://github.com/nomad-coe/nomad/issues/new) in our main
-[GitHub project](https://github.com/nomad-coe/nomad).
+Everyone can open a [new issue](https://github.com/nomad-coe/nomad/issues/new){:target="_blank"} in our main
+[GitHub project](https://github.com/nomad-coe/nomad){:target="_blank"}.
 
 Use issues to ask questions, report bugs, or suggest features. If in doubt, use the main
 project to engage with us. If you address a specific plugin (e.g. parser), you can also
-post into the respective projects. See also the list of [parsers](../reference/parsers.md)
-and the list of [built-in plugins](../reference/plugins.md).
+post into the respective projects. See also the list of [parsers](../../reference/parsers.md)
+and the list of [built-in plugins](../../reference/plugins.md).
 
 If you are a member of FAIRmat, the NOMAD CoE, or are a close collaborator, you
 probably have an MPCDF GitLab account (or should ask us for one). Please use the
 issue tracker on our main
-[GitLab project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR).
+[GitLab project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR){:target="_blank"}.
 This is where most of the implementation work is planned and executed.
 
 ### Issue content
@@ -47,7 +47,7 @@ A few tips that will help us to solve your issues quicker:
     - Is there an upload or entry id that we can look at?
     - Example files or code snippets?
     - Don't screenshot code, copy and paste instead. Use
-      [code blocks](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting).
+      [code blocks](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting){:target="_blank"}.
 
 - **Features**: Augment your feature descriptions with a use case that helps us understand
   the feature and its scope.
@@ -81,7 +81,7 @@ See also the [documentation part](./code.md#documentation) in our code navigatio
 ## Plugins
 
 Also read the guide on
-[how to develop, publish, and distribute plugins](../plugins/plugins.md).
+[how to develop, publish, and distribute plugins](../customization/plugins_dev.md).
 
 ### Built-in plugins (and submodules)
 
@@ -93,8 +93,8 @@ in the build and therefore automatically distributed as part of the NOMAD docker
 and Python package.
 
 To contribute to these plugins, use the respective GitHub projects. See also the
-list of [parsers](../reference/parsers.md) and the list of
-[built-in plugins](../reference/plugins.md). The same rules apply there. A merge request
+list of [parsers](../../reference/parsers.md) and the list of
+[built-in plugins](../../reference/plugins.md). The same rules apply there. A merge request
 to the main project will also be required to update the submodule.
 
 All these submodules are placed in the `dependencies` directory. After merging or
@@ -115,7 +115,7 @@ potential built-in plugin (i.e. as part of the public NOMAD service).
 
 ## Branches and Tags
 
-On the [main GitLab project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR) we use
+On the [main GitLab project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR){:target="_blank"} we use
 *protected* and *feature* branches. You must not commit to protected branches directly
 (even if you have the rights).
 
@@ -131,7 +131,7 @@ On the [main GitLab project](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR) w
 - `vX.X.X` or `vX.X.XrcX`: *tags* for (pre-)releases.
 
 The `develop` branch and release tags are automatically synchronized to the
-[GitHub project](https://github.com/nomad-coe/nomad). Otherwise, this project is mostly
+[GitHub project](https://github.com/nomad-coe/nomad){:target="_blank"}. Otherwise, this project is mostly
 the target for [pull requests](#pull-requests-pr-github) and does not contain other relevant
 branches.
 
@@ -145,7 +145,7 @@ should be based on the `develop` branch and merge request should target `develop
 
 ### Commit
 
-Make sure you follow our [code guidelines](./guides.md) and
+Make sure you follow our [code guidelines](../../reference/code_guidelines.md) and
 [set up your IDE](./setup.md#set-up-your-ide) to enforce style checks, linting, and static
 analysis. You can also run [tests locally](./setup.md#running-tests). Try to keep a clean
 commit history and follow our [Git tips](#tips-for-a-clean-git-history).
@@ -185,9 +185,9 @@ The trailer value (`Fixed` in the example) has to be one of the following values
 - `Changed` for general improvements, e.g. updated documentation, refactoring,
 improving performance, etc.
 
-These categories are consistent with [keepachangelog.com](https://keepachangelog.com/).
+These categories are consistent with [keepachangelog.com](https://keepachangelog.com/){:target="_blank"}.
 For more information about the changelog generation read the
-[GitLab documentation](https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file).
+[GitLab documentation](https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file){:target="_blank"}.
 
 ### CI/CD pipeline and review
 
@@ -234,7 +234,7 @@ to the references put in by GitLab.
 
 ## Pull requests (PR, GitHub)
 
-You can fork the [main NOMAD project](https://github.com/nomad-coe/nomad) and create pull
+You can fork the [main NOMAD project](https://github.com/nomad-coe/nomad){:target="_blank"} and create pull
 requests following the usual GitHub flow. Make sure to target the `develop` branch. A team
 member will pick up your pull request and automatically copy it to GitLab to run the
 pipeline and potentially perform the merge. This process is made transparent in the pull
@@ -250,7 +250,7 @@ code. This can only be done if we keep a "clean" history.
 
 - Use descriptive commit messages. Use simple verbs (*added*, *removed*, *refactored*,
   etc.) name features and changed components.
-  [Include issue numbers](https://docs.gitlab.com/ee/user/project/issues/crosslinking_issues.html)
+  [Include issue numbers](https://docs.gitlab.com/ee/user/project/issues/crosslinking_issues.html){:target="_blank"}
   to create links in GitLab.
 
 - Learn how to *amend* to avoid lists of small related commits.
@@ -261,8 +261,8 @@ code. This can only be done if we keep a "clean" history.
 
 - Some videos on more advanced Git usage:
 
-    - [Tools & Concepts for Matering Version Control with Git](https://youtu.be/Uszj_k0DGsg)
-    - [Interactive Rebase, Cherry-Picking, Reflog, Submodules, and more](https://youtu.be/qsTthZi23VE)
+    - [Tools & Concepts for Matering Version Control with Git](https://youtu.be/Uszj_k0DGsg){:target="_blank"}
+    - [Interactive Rebase, Cherry-Picking, Reflog, Submodules, and more](https://youtu.be/qsTthZi23VE){:target="_blank"}
 
 ### Amend
 
@@ -310,4 +310,4 @@ unexpected errors in CI/CD, because you needed a save point, etc. Again the goal
 have coherent commits, where each commit makes sense on its own.
 
 Squashing can also be applied on a selection of commits during an
-[interactive rebase](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#_squashing).
+[interactive rebase](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#_squashing){:target="_blank"}.
diff --git a/docs/develop/search.md b/docs/howto/develop/search.md
similarity index 98%
rename from docs/develop/search.md
rename to docs/howto/develop/search.md
index c57b94a219ba3d59b738b3de507a2f30e14244c8..7a7607865c3259713d3663cd1ee93583cd3bbd02 100644
--- a/docs/develop/search.md
+++ b/docs/howto/develop/search.md
@@ -99,4 +99,5 @@ quantity path, e.g. `mainfile.path`.
 
 ## The search web interface
 
-Coming soon ...
\ No newline at end of file
+!!! warning "Attention"
+        Coming soon ...
\ No newline at end of file
diff --git a/docs/develop/setup.md b/docs/howto/develop/setup.md
similarity index 90%
rename from docs/develop/setup.md
rename to docs/howto/develop/setup.md
index 2f37234b5730294b03d789fa13bffd7ea4dc8596..158eee3a295bd28fc5375e03a629daa995e7dfaa 100644
--- a/docs/develop/setup.md
+++ b/docs/howto/develop/setup.md
@@ -1,4 +1,4 @@
-# How to get started
+# How to get started in development
 
 This is a step-by-step guide to get started with NOMAD development. You will clone
 all sources, set up a *Python* and *Node.js* environment, install all necessary dependencies,
@@ -6,12 +6,12 @@ run the infrastructure in development mode, learn to run the test suites, and se
 *Visual Studio Code* for NOMAD development.
 
 This is not about working with the NOMAD Python package `nomad-lab`. You can find its
-documentation [here](../apis/pythonlib.md).
+documentation [here](../programmatic/pythonlib.md).
 
 ## Clone the sources
 
 If not already done, you should clone NOMAD. If you have an account at the
-[MPDCF Gitlab](https://gitlab.mpcdf.mpg.de/), you can clone with the SSH URL:
+[MPDCF Gitlab](https://gitlab.mpcdf.mpg.de/){:target="_blank"}, you can clone with the SSH URL:
 
 ```shell
 git clone git@gitlab.mpcdf.mpg.de:nomad-lab/nomad-FAIR.git nomad
@@ -64,7 +64,7 @@ The NOMAD code currently targets Python 3.9. You should work in a Python virtual
 #### Pyenv
 
 If your host machine has an older version installed,
-you can use [pyenv](https://github.com/pyenv/pyenv) to use Python 3.9 in parallel with your
+you can use [pyenv](https://github.com/pyenv/pyenv){:target="_blank"} to use Python 3.9 in parallel with your
 system's Python.
 
 #### Virtualenv
@@ -72,7 +72,7 @@ system's Python.
 Create a virtual environment. It allows you
 to keep NOMAD and its dependencies separate from your system's Python installation.
 Make sure that the virtual environment is based on Python 3.9.
-Use either the built-in `venv` module (see example) or [virtualenv](https://pypi.org/project/virtualenv/).
+Use either the built-in `venv` module (see example) or [virtualenv](https://pypi.org/project/virtualenv/){:target="_blank"}.
 
 ```shell
 python3 -m venv .pyenv
@@ -116,7 +116,7 @@ brew install libmagic
 
 If you are using a Mac with Apple Silicon, we recommend that you use rosetta, homebrew
 for Intel, and install and use an Intel-based Python. The second answer in this
-[Stackoverflow post](https://stackoverflow.com/questions/64882584/how-to-run-the-homebrew-installer-under-rosetta-2-on-m1-macbook)
+[Stackoverflow post](https://stackoverflow.com/questions/64882584/how-to-run-the-homebrew-installer-under-rosetta-2-on-m1-macbook){:target="_blank"}
 describes how to use both the Apple and Intel homebrew simultaneously.
 
 ### Install NOMAD
@@ -183,9 +183,9 @@ all the tests, see below.
 
 ### Install Docker
 
-You need to install [Docker](https://docs.docker.com/engine/install/).
+You need to install [Docker](https://docs.docker.com/engine/install/){:target="_blank"}.
 Docker nowadays comes with Docker Compose (`docker compose`) built-in. Prior, you needed to
-install the standalone [Docker Compose (`docker-compose`)](https://docs.docker.com/compose/install/).
+install the standalone [Docker Compose (`docker-compose`)](https://docs.docker.com/compose/install/){:target="_blank"}.
 
 ### Run required 3rd party services
 
@@ -206,7 +206,7 @@ The default virtual memory for Elasticsearch will likely be too low. On Linux, y
 sysctl -w vm.max_map_count=262144
 ```
 
-To set this value permanently, see [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html). Then you can run all services with:
+To set this value permanently, see [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html){:target="_blank"}. Then you can run all services with:
 
 ```shell
 cd ops/docker-compose/infrastructure
@@ -214,7 +214,7 @@ docker compose up -d elastic mongo rabbitmq
 cd ../../..
 ```
 
-If your system almost ran out of disk space, Elasticsearch enforces a read-only index block ([read more](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/disk-allocator.html)), but
+If your system almost ran out of disk space, Elasticsearch enforces a read-only index block ([read more](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/disk-allocator.html){:target="_blank"}), but
 after clearing up the disk space you need to reset it manually using the following command:
 
 ```shell
@@ -224,7 +224,7 @@ curl -XPUT -H "Content-Type: application/json" http://localhost:9200/_all/_setti
 Note that the Elasticsearch service has a known problem in quickly hitting the
 virtual memory limits of your OS. If you experience issues with the
 Elasticsearch container not running correctly or crashing, try increasing the
-virtual memory limits as shown [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html).
+virtual memory limits as shown [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html){:target="_blank"}.
 
 To shut down everything, just `ctrl-c` the running output. If you started everything
 in *deamon* mode (`-d`) use:
@@ -284,7 +284,7 @@ Or both together in one process:
 nomad admin run appworker
 ```
 
-On MacOS you might run into multiprocessing errors. That can be solved as described [here](https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr).
+On MacOS you might run into multiprocessing errors. That can be solved as described [here](https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr){:target="_blank"}.
 
 The app will run at port 8000 by default.
 
@@ -295,8 +295,8 @@ celery -A nomad.processing worker -l info
 ```
 
 If you run the GUI on its own (e.g. with the React dev server below), you also need to start
-the app manually. The GUI and its dependencies run on [Node.js](https://nodejs.org) and
-the [Yarn](https://yarnpkg.com/) dependency manager. Read their documentation on how to
+the app manually. The GUI and its dependencies run on [Node.js](https://nodejs.org){:target="_blank"} and
+the [Yarn](https://yarnpkg.com/){:target="_blank"} dependency manager. Read their documentation on how to
 install them for your platform.
 
 ```shell
@@ -336,10 +336,10 @@ north:
 - You have to generate a `crypt key` with `openssl rand -hex 32`.
 
 - You might need to install
-  [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy).
+  [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy){:target="_blank"}.
 
 The `configurable-http-proxy` comes as a Node.js package. See
-[Node.js](https://nodejs.org) for how to install `npm`. The proxy can be globally
+[Node.js](https://nodejs.org){:target="_blank"} for how to install `npm`. The proxy can be globally
 installed with:
 
 ```shell
@@ -385,15 +385,25 @@ pytest -sv tests
 ```
 
 !!! note
-    If you excluded plugins in your [NOMAD config](### `nomad.yaml`), then those tests
-    will fail.
+    Some of these tests will fail because a few large files are not included in the Git
+    repository. You may ignore these for local testing, they are still checked by the
+    CI/CD pipeline:
+
+    ```text
+    FAILED tests/archive/test_archive.py::test_read_springer - AttributeError: 'NoneType' object has no attribute 'seek'
+    FAILED tests/normalizing/test_material.py::test_material_bulk - assert None
+    FAILED tests/normalizing/test_system.py::test_springer_normalizer - IndexError: list index out of range
+    ```
 
-We use Ruff and Mypy to maintain code quality. Additionally, we recommend installing the Ruff [plugins](https://docs.astral.sh/ruff/integrations/) for your code editor to streamline the process. To execute Ruff and Mypy from the command line, you can utilize the following command:
+    If you excluded plugins in your [NOMAD config](### `nomad.yaml`), then those tests
+    will also fail.
 
+We use Ruff and Mypy to maintain code quality. Additionally, we recommend installing the Ruff [plugins](https://docs.astral.sh/ruff/integrations/){:target="_blank"} for your code editor to streamline the process. To execute Ruff and Mypy from the command line, you can utilize the following command:
 ```shell
 nomad dev qa --skip-tests
 ```
 
+
 To run all tests and code QA:
 
 ```shell
@@ -405,9 +415,9 @@ This mimics the tests and checks that the GitLab CI/CD will perform.
 ### Frontend tests
 
 We use
-[`testing-library`](https://testing-library.com/docs/react-testing-library/intro/)
+[`testing-library`](https://testing-library.com/docs/react-testing-library/intro/){:target="_blank"}
 to implement our GUI tests and `testing-library` itself uses
-[`Jest`](https://jestjs.io/) to run the tests. Tests are written in `*.spec.js`
+[`Jest`](https://jestjs.io/){:target="_blank"} to run the tests. Tests are written in `*.spec.js`
 files that accompany the implementation. Tests should focus on functionality,
 not on implementation details: `testing-library` is designed to enforce this kind
 of testing.
@@ -415,7 +425,7 @@ of testing.
 !!! note
 
     When testing HTML output, the elements are rendered using
-    [jsdom](https://github.com/jsdom/jsdom): this is not completely identical
+    [jsdom](https://github.com/jsdom/jsdom){:target="_blank"}: this is not completely identical
     to using an actual browser (e.g. does not support WebGL), but in practice
     is realistic enough for the majority of the test.
 
@@ -427,18 +437,18 @@ utilities that are relevant for testing the code in that particular folder.
 These utilities can usually be placed into the following categories:
 
 - Custom renders: When testing React components, the
-  [`render`](https://testing-library.com/docs/react-testing-library/api/#render) function
+  [`render`](https://testing-library.com/docs/react-testing-library/api/#render){:target="_blank"} function
   is used to display them on the test DOM. Typically your components require
   some parts of the infrastructure to work properly, which is achieved by
   wrapping your component with other components that provide a context. Custom
   render functions can do this automatically for you, e.g. the default render
   as exported from `src/components/conftest.js` wraps your components with an
   infrastructure that is very similar to the production app. See
-  [here](https://testing-library.com/docs/react-testing-library/setup/#custom-render)
+  [here](https://testing-library.com/docs/react-testing-library/setup/#custom-render){:target="_blank"}
   for more information.
 
 - Custom queries: See
-  [here](https://testing-library.com/docs/react-testing-library/setup/#add-custom-queries)
+  [here](https://testing-library.com/docs/react-testing-library/setup/#add-custom-queries){:target="_blank"}
   for more information.
 
 - Custom expects: These are reusable functions that perform actual tests using
@@ -560,7 +570,7 @@ configuration. To do this, follow these steps:
 ## Build the Docker image
 
 Normally the Docker image is build via a CI/CD pipeline that is run when pushing commits
-to [NOMAD's GitLab at MPCDF](https://gitlab.mpcdf.mpg.de/). These images are distributed
+to [NOMAD's GitLab at MPCDF](https://gitlab.mpcdf.mpg.de/){:target="_blank"}. These images are distributed
 via NOMAD's GitLab container registry. For most purposes you would use these
 automatically-built images.
 
@@ -603,7 +613,7 @@ guidelines, we recommend to use a proper IDE for development and ditch any Vim/E
 
 We strongly recommend that all developers use *Visual Studio Code (VS Code)*. (This is a
 completely different product than *Visual Studio*.) It is available for free
-for all major platforms [here](https://code.visualstudio.com/download).
+for all major platforms [here](https://code.visualstudio.com/download){:target="_blank"}.
 
 You should launch and run VS Code directly from the project's root directory. The source
 code already contains settings for VS Code in the `.vscode` directory. The settings
diff --git a/docs/tutorial/third_party.md b/docs/howto/manage/eln.md
similarity index 73%
rename from docs/tutorial/third_party.md
rename to docs/howto/manage/eln.md
index 7d6598a572cd978dadf13a32d513d7c444fbb35a..867fee3267d9b2168c2cea64c687277d0ccc7124 100644
--- a/docs/tutorial/third_party.md
+++ b/docs/howto/manage/eln.md
@@ -1,20 +1,61 @@
-!!! attention
+# How to use ELNs
+
+This guide describes how to manually create entries and enter information
+via ELNs (electronic lab notebooks). NOMAD ELNs allow you to acquire
+consistently structured data from users to augment uploaded files.
+
+!!! warning "Attention"
+
+    This part of the documentation is still work in progress.
+
+## Create a basic ELN entry
+
+Go to `PUBLISH` / `Uploads`. Here you can create an upload with the `CREATE A NEW UPLOAD`
+button. This will bring you to the upload page.
+
+Click the `CREATE ENTRY` button. This will bring-up a dialog to choose an ELN schema.
+All ELNs (as any entry in NOMAD) needs to follow a schema. You can choose from uploaded
+custom schemas or NOMAD built-in schemas. You can choose the `Basic ELN` to create a
+simple ELN entry.
+
+The name of your ELN entry, will be the filename for your ELN without the `.archive.json`
+ending that will be added automatically. You can always find and download your ELNs
+on the `FILES` tab.
+
+The `Basic ELN` offers you simple fields for a *name*, *tags*, a *date/time*, and a rich text
+editor to enter your notes.
+
+## Add your own ELN schema
+
+To make NOMAD ELNs more useful, you can define your own schema to create you own data
+fields, create more sub-sections, reference other entries, and much more.
+
+You should have a look at our ELN example upload. Go to `PUBLISH` / `Uploads` and
+click the `ADD EXAMPLE UPLOADS` button. The `Electronic Lab Notebook` example, will
+contain a schema and entries that instantiate different parts of the schema.
+The *ELN example sample (`sample.archive.json`) demonstrates what you can do.
+
+Follow the [How-to write a schema](../customization/basics.md) and [How-to define ELN](../customization/elns.md)
+guides to create you own customized of ELNs.
+
+## Integration of third-party ELNs
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
 
 
 NOMAD offers integration with third-party ELN providers, simplifying the process of connecting
 and interacting with external platforms. Three main external ELN solutions that are integrated into NOMAD
-are: [elabFTW](https://www.elabftw.net/), [Labfolder](https://labfolder.com/) and [chemotion](https://chemotion.net/).
+are: [elabFTW](https://www.elabftw.net/){:target="_blank"}, [Labfolder](https://labfolder.com/){:target="_blank"} and [chemotion](https://chemotion.net/){:target="_blank"}.
 The process of data retrieval and data mapping onto NOMAD's schema
 varies for each of these third-party ELN provider as they inherently allow for certain ways of communicating with their
 database. Below you can find a <b>How-to</b> guide on importing your data from each of these external
 repositories.
 
 
-## elabFTW integration
+### elabFTW integration
 
-elabFTW is part of [the ELN Consortium](https://github.com/TheELNConsortium)
+elabFTW is part of [the ELN Consortium](https://github.com/TheELNConsortium){:target="_blank"}
 and supports exporting experimental data in ELN file format. ELNFileFormat is a zipped file
 that contains <b>metadata</b> of your elabFTW project along with all other associated data of
 your experiments.
@@ -39,7 +80,7 @@ links to external resources and extra fields. <b>experiment_files</b> section is
 containing metadata and additional info of the files associated with the experiment.
 
 
-## Labfolder integration
+### Labfolder integration
 
 Labfolder provides API endpoints to interact with your ELN data. NOMAD makes API calls to
 retrieve, parse and map the data from your Labfolder instance/database to a NOMAD's schema.
@@ -75,7 +116,7 @@ a special Labfolder element where the data is structured in JSON format. Every d
 `IMAGE` element contains information of any image stored in your Labfolder project. `TEXT` element
 contains data of any text field in your Labfodler project.
 
-## Chemotion integration
+### Chemotion integration
 
 NOMAD supports importing your data from Chemotion repository via `chemotion` parser. The parser maps
 your data that is structured under chemotion schema, into a predefined NOMAD schema. From your Chemotion
diff --git a/docs/data/explore.md b/docs/howto/manage/explore.md
similarity index 78%
rename from docs/data/explore.md
rename to docs/howto/manage/explore.md
index 4912780c7d26026afe7afaef928684ae81a8f5d3..a089469e5a90c867c6cdc8445a253d0bf97ddc6b 100644
--- a/docs/data/explore.md
+++ b/docs/howto/manage/explore.md
@@ -1,6 +1,7 @@
-!!! attention
+# How to explore data
 
-    This part of the documentation is still work in progress.
+!!! warning "Attention"
+    We are currently working to update this content.
 
 While we are still working on this, please use our video tutorial as a starting point:
 
diff --git a/docs/data/north.md b/docs/howto/manage/north.md
similarity index 58%
rename from docs/data/north.md
rename to docs/howto/manage/north.md
index d0ff3d97ff0f06bb885954d14b2af9ac9f46206a..9e389275703e5afeb02567f6d9b943b3c0902464 100644
--- a/docs/data/north.md
+++ b/docs/howto/manage/north.md
@@ -1,3 +1,5 @@
-!!! attention
+# How to use NORTH
+
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
diff --git a/docs/data/upload.md b/docs/howto/manage/upload.md
similarity index 94%
rename from docs/data/upload.md
rename to docs/howto/manage/upload.md
index b97773c0fa0d565990bda80647e2537c713b4c5d..60edbc6708463e5d0584ad672ba654d39e4e9c23 100644
--- a/docs/data/upload.md
+++ b/docs/howto/manage/upload.md
@@ -1,4 +1,6 @@
-This guide describes how to upload data in NOMAD [supported file formats](../reference/parsers.md). You find a
+# How to upload and publish data for supported formats
+
+This guide describes how to upload data in NOMAD [supported file formats](../../reference/parsers.md). You find a
 list of supported formats on top of each upload page, see below.
 
 ## Preparing files
@@ -10,7 +12,7 @@ NOMAD will simply extract them and consider the whole directory structure within
 
 ## Create an upload and add files
 
-Open [NOMAD](https://nomad-lab.eu/prod/v1) and log in; if you don't have a NOMAD account, please create one.
+Open [NOMAD](https://nomad-lab.eu/prod/v1){:target="_blank"} and log in; if you don't have a NOMAD account, please create one.
 
 Go to `PUBLISH` / `Uploads`. Here you can create an upload with the `CREATE A NEW UPLOAD`
 button. This will bring you to the upload page.
@@ -96,7 +98,7 @@ not remove entries.
 
 - One upload cannot exceed **32 GB** in size.
 - Only **10 non published uploads** are allowed per user.
-- Only uploads with at least one recognized entry can be published. See also [supported codes/formats](../reference/parsers.md) below.
+- Only uploads with at least one recognized entry can be published. See also [supported codes/formats](../../reference/parsers.md) below.
 
 
 ## Strategies for large amounts of data
@@ -106,7 +108,7 @@ and small subset of your data. Use this to simulate a larger upload that you can
 in the normal way. You do not have to publish this test upload; simply delete it before publish,
 once you are satisfied with the results.
 
-Ask for assistance and [Contact us](https://nomad-lab.eu/about/support) in advance. This will
+Ask for assistance and [Contact us](https://nomad-lab.eu/about/support){:target="_blank"} in advance. This will
 allow us to react to your specific situation and eventually prepare additional measures.
 Allow enough time before you need your data to be published. Adding multiple hundreds of
 GBs to NOMAD isn't a trivial feat and will take some time and effort on all sides.
diff --git a/docs/oasis/admin.md b/docs/howto/oasis/admin.md
similarity index 98%
rename from docs/oasis/admin.md
rename to docs/howto/oasis/admin.md
index 3dcd1cf3bc6b98045d39d8b376afdcec962961be..ccd27b5eae690875c501aec5325dc80cb7f7dc13 100644
--- a/docs/oasis/admin.md
+++ b/docs/howto/oasis/admin.md
@@ -1,3 +1,5 @@
+# How to perform admin tasks
+
 ## Backups
 
 To backup your Oasis at least the file data and mongodb data needs to be saved. You determined the path to your file data (your uploads) during the installation. By
@@ -62,7 +64,7 @@ Here are a few examples:
 - `--outdated` Select published uploads with older NOMAD versions than the current
 - `--processing-failure` Uploads with processing failures.
 
-For a complete list refer to the [CLI reference documentation](../reference/cli.md#nomad-admin-uploads).
+For a complete list refer to the [CLI reference documentation](../../reference/cli.md#nomad-admin-uploads).
 
 Alternatively, you can use a list of upload ids at the end of the command, e.g.:
 
diff --git a/docs/oasis/apps.md b/docs/howto/oasis/apps.md
similarity index 95%
rename from docs/oasis/apps.md
rename to docs/howto/oasis/apps.md
index 43d83e7fc1948572c19342aad751f062dec0dd76..3adb24ac85caf67876ba5a1cdd2333dd7c6b7305 100644
--- a/docs/oasis/apps.md
+++ b/docs/howto/oasis/apps.md
@@ -1,3 +1,5 @@
+# How to configure custom apps
+
 Apps provide customized views of data for specific domains, making it easier for
 the users to navigate and understand the data. This typically means that certain
 domain-specific properties are highlighted, different units may be used for
@@ -7,8 +9,8 @@ mixture of experiments and simulations, different techniques, and physical
 properties spanning different time and length scales.
 
 Apps only affect the way data is *displayed* for the user: if you wish to affect
-the underlying data structure, you will need to define a custom [Python schema](../plugins/schemas.md)
-or [YAML schema](../schemas/basics.md). It is common that a custom schema has
+the underlying data structure, you will need to define a custom [Python schema](../customization/plugins_dev.md#develop-a-schema-plugin)
+or [YAML schema](../customization/basics.md). It is common that a custom schema has
 an app associated with it, but apps can also provide different views of the same
 underlying data.
 
@@ -80,7 +82,7 @@ dashboard:
 ## Customizing default apps in a NOMAD installation
 
 Each NOMAD installation has a set of built-in apps, which are controlled through
-the [ui.apps](../reference/config.md#ui) field in the `nomad.yaml` configuration file. These are
+the [ui.apps](../../reference/config.md#ui) field in the `nomad.yaml` configuration file. These are
 the apps that are defined by default in a NOMAD installation:
 
 {{ default_apps_list()}}
diff --git a/docs/howto/oasis/customize.md b/docs/howto/oasis/customize.md
new file mode 100644
index 0000000000000000000000000000000000000000..c6eea560d959eb3dd4dbd6115d2c3f1cf8e88408
--- /dev/null
+++ b/docs/howto/oasis/customize.md
@@ -0,0 +1,17 @@
+# How to customize an Oasis
+
+!!! warning "Attention"
+
+    This part of the documentation is still work in progress.
+
+This is an incomplete list of potential customizations. Please read the respective
+guides to learn more.
+
+- Installation specific changes (domain, path-prefix): [How to install an Oasis](install.md)
+- [Restricting user access](admin.md#restricting-access-to-your-oasis)
+- [Configure custom apps](apps.md)
+- Write .yaml based [schemas](../customization/basics.md) and [ELNs](../customization/elns.md)
+- Learn how to use the [tabular parser](../customization/tabular.md) to manage data from .xls or .csv
+- Develop a [schema plugin](../customization/plugins_dev.md#develop-a-schema-plugin) for more powerful schemas and ELNs
+- Use and develop [parser plugins](../customization/plugins_dev.md#develop-a-parser-plugin) for specific file formats
+- Add specialized [NORTH tools](../manage/north.md)
\ No newline at end of file
diff --git a/docs/oasis/install.md b/docs/howto/oasis/install.md
similarity index 95%
rename from docs/oasis/install.md
rename to docs/howto/oasis/install.md
index 60998a7da16bbd1bed8a7368fdb02dae85bf6810..f581cdd1b8dd1868b91e9fa00c7018f48a0fcea3 100644
--- a/docs/oasis/install.md
+++ b/docs/howto/oasis/install.md
@@ -1,4 +1,6 @@
-# Operating an OASIS
+# How to install an Oasis
+
+<!-- # Operating an OASIS -->
 
 Originally, NOMAD Central Repository is a service run at Max-Planck's computing facility in Garching, Germany.
 However, the NOMAD software is Open-Source, and everybody can run it. Any service that
@@ -13,15 +15,15 @@ central NOMAD installation.
     If you installed (or even just plan to install) a NOMAD Oasis, please take
     the time to register your Oasis with FAIRmat. This will help us to assist
     you in an problems and keep you updated on new releases. You can register
-    by filling out this [simple form](https://www.fairmat-nfdi.eu/fairmat/oasis_registration).
+    by filling out this [simple form](https://www.fairmat-nfdi.eu/fairmat/oasis_registration){:target="_blank"}.
 
 ## Quick-start
 
 - Find a linux computer.
-- Make sure you have [docker](https://docs.docker.com/engine/install/) installed.
+- Make sure you have [docker](https://docs.docker.com/engine/install/){:target="_blank"} installed.
 Docker nowadays comes with `docker compose` build in. Prior, you needed to
-install the stand alone [docker-compose](https://docs.docker.com/compose/install/).
-- Download our basic configuration files [nomad-oasis.zip](../assets/nomad-oasis.zip)
+install the stand alone [docker-compose](https://docs.docker.com/compose/install/){:target="_blank"}.
+- Download our basic configuration files [nomad-oasis.zip](../../assets/nomad-oasis.zip)
 - Run the following commands (skip `chown` on MacOS and Windows computers)
 
 
@@ -34,7 +36,7 @@ docker compose up -d
 curl localhost/nomad-oasis/alive
 ```
 
-- Open [http://localhost/nomad-oasis](http://localhost/nomad-oasis) in your browser.
+- Open [http://localhost/nomad-oasis](http://localhost/nomad-oasis){:target="_blank"} in your browser.
 
 To run NORTH (the NOMAD Remote Tools Hub), the `hub` container needs to run docker and
 the container has to be run under the docker group. You need to replace the default group
@@ -131,14 +133,14 @@ NOMAD software is distributed as a set of docker containers and there are also o
 Further, we use docker-compose to setup all necessary containers in the simplest way possible.
 
 You will need a single computer, with **docker** and **docker-compose** installed. Refer
-to the official [docker](https://docs.docker.com/engine/install/) (and [docker-compose](https://docs.docker.com/compose/install/))
+to the official [docker](https://docs.docker.com/engine/install/){:target="_blank"} (and [docker-compose](https://docs.docker.com/compose/install/){:target="_blank"})
 documentation for installation instructions. Newer version of docker have a re-implementation
 of docker-compose integrated as the `docker compose` sub-command. This should be fully
 compatible and you might chose to can replace `docker compose` with `docker-compose` in this tutorial.
 
 The following will run all necessary services with docker. These comprise: a **mongo**
 database, an **elasticsearch**, a **rabbitmq** distributed task queue, the NOMAD **app**,
-NOMAD **worker**, and NOMAD **gui**. In this [introduction](../index.md#architecture),
+NOMAD **worker**, and NOMAD **gui**. In this [introduction](../../index.md#architecture),
 you will learn what each service does and why it is necessary.
 
 ### Configuration
@@ -153,7 +155,7 @@ There are three files to configure:
 - `configs/nginx.conf`
 
 In this example, we have all files in the same directory (the directory we are also working in).
-You can download minimal example files [here](../assets/nomad-oasis.zip).
+You can download minimal example files [here](../../assets/nomad-oasis.zip).
 
 #### docker-compose.yaml
 
@@ -176,7 +178,7 @@ A few things to notice:
 
 - The app, worker, and north service use the NOMAD docker image. Here we use the `latest` tag, which
 gives you the latest *beta* version of NOMAD. You might want to change this to `stable`,
-a version tag (format is `vX.X.X`, you find all releases [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/tags)), or a specific [branch tag](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/branches).
+a version tag (format is `vX.X.X`, you find all releases [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/tags){:target="_blank"}), or a specific [branch tag](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/branches){:target="_blank"}.
 - All services use docker volumes for storage. This could be changed to host mounts.
 - It mounts two configuration files that need to be provided (see below): `nomad.yaml`, `nginx.conf`.
 - The only exposed port is `80` (proxy service). This could be changed to a desired port if necessary.
@@ -229,7 +231,7 @@ A few things to notice:
 - `client_max_body_size` sets a limit to the possible upload size.
 
 You can add an additional reverse proxy in front or modify the nginx in the docker-compose.yaml
-to [support https](http://nginx.org/en/docs/http/configuring_https_servers.html).
+to [support https](http://nginx.org/en/docs/http/configuring_https_servers.html){:target="_blank"}.
 If you operate the GUI container behind another proxy, keep in mind that your proxy should
 not buffer requests/responses to allow streaming of large requests/responses for `api/v1/uploads` and `api/v1/.*/download`.
 An nginx reverse proxy location on an additional reverse proxy, could have these directives
@@ -313,7 +315,7 @@ If you want to report problems with your OASIS. Please provide the logs for
 
 ### Provide and connect your own user management
 
-NOMAD uses [keycloak](https://www.keycloak.org/) for its user management. NOMAD uses
+NOMAD uses [keycloak](https://www.keycloak.org/){:target="_blank"} for its user management. NOMAD uses
 keycloak in two ways. First, the user authentication uses the OpenID Connect/OAuth interfaces provided by keycloak.
 Second, NOMAD uses the keycloak realm-management API to get a list of existing users.
 Keycloak is highly customizable and numerous options to connect keycloak to existing
@@ -330,7 +332,7 @@ installation above. There are just a three changes.
 - The `nomad.yaml` has modifications to tell nomad to use your and not the official NOMAD keycloak.
 
 You can start with the regular installation above and manually adopt the config or
-download the already updated configuration files: [nomad-oasis-with-keycloak.zip](../assets/nomad-oasis-with-keycloak.zip).
+download the already updated configuration files: [nomad-oasis-with-keycloak.zip](../../assets/nomad-oasis-with-keycloak.zip).
 The download also contains an additional `configs/nomad-realm.json` that allows you
 to create an initial keycloak realm that is configured for NOMAD automatically.
 
@@ -505,5 +507,7 @@ This should give you a working OASIS at `http://<your-host>/<your-path-prefix>`.
 
 ## Kubernetes
 
-*This is not yet documented.*
+!!! warning "Attention"
+
+    This is not yet documented.
 
diff --git a/docs/oasis/migrate.md b/docs/howto/oasis/migrate.md
similarity index 94%
rename from docs/oasis/migrate.md
rename to docs/howto/oasis/migrate.md
index 9abbc6b712da45e77effabc6b261771b1b2c4f87..6ef3acfaf055cc6b28011688d047133918d1eb1b 100644
--- a/docs/oasis/migrate.md
+++ b/docs/howto/oasis/migrate.md
@@ -1,7 +1,9 @@
+# How to migrate Oasis versions
+
 ## Software versions
 
 We distribute NOMAD as docker images that are available in our
-[public docker registry](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/container_registry/36).
+[public docker registry](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/container_registry/36){:target="_blank"}.
 The a NOMAD image names looks like this:
 
 ```
@@ -21,7 +23,7 @@ Our semantic interpretation of **"minor"** is the following:
 - minor version might introduce new features that are only available after certain
 actions [migration steps](#migration-steps).
 
-A road-map for major features can be found on our homepage [here](https://nomad-lab.eu/nomad-lab/features.html). You'll find a detailed change log in the source code [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/blob/develop/CHANGELOG.md).
+A road-map for major features can be found on our homepage [here](https://nomad-lab.eu/nomad-lab/features.html){:target="_blank"}. You'll find a detailed change log in the source code [here](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/blob/develop/CHANGELOG.md){:target="_blank"}.
 
 
 ## Configuration versions
diff --git a/docs/plugins/plugins.md b/docs/howto/oasis/plugins_install.md
similarity index 86%
rename from docs/plugins/plugins.md
rename to docs/howto/oasis/plugins_install.md
index d08ea1c53c0202ed858a3c17bd09da2306adf234..f1b36a12d9da0b1e264f7ef939a93571d5b9ab64 100644
--- a/docs/plugins/plugins.md
+++ b/docs/howto/oasis/plugins_install.md
@@ -1,3 +1,5 @@
+# How to install plugins
+
 Plugins allow you to add Python-based functionality to NOMAD without a custom build
 NOMAD image or release. Plugins can be installed at NOMAD start-up time. Therefore, you can
 configure each NOMAD (Oasis) with a different custom set of plugins or disable unnecessary
@@ -15,20 +17,22 @@ We support different kinds of plugins:
 We provide template projects on GitHub. You can fork these projects and follow the
 instructions in their `README.md`. These instructions will give you everything you
 need to run and test your plugin as a plugin developer.
-The following sections here contain more background information and explain how to
-add plugins to a NOMAD installation.
 
-- [schema plugin](https://github.com/nomad-coe/nomad-schema-plugin-example)
-- [parser plugin](https://github.com/nomad-coe/nomad-parser-plugin-example)
-- [normalizer plugin](https://github.com/nomad-coe/nomad-normalizer-plugin-example.git)
+The following sections explain how to add plugins to a NOMAD installation.<br />
+Dedicated Explanation sections provide more background information on [what is a schema](../../explanation/data.md#schema) and [what is a parser](../../explanation/processing.md#schemas-parsers-plugins)
+
+- [schema plugin](https://github.com/nomad-coe/nomad-schema-plugin-example){:target="_blank"}
+- [parser plugin](https://github.com/nomad-coe/nomad-parser-plugin-example){:target="_blank"}
+- [normalizer plugin](https://github.com/nomad-coe/nomad-normalizer-plugin-example.git){:target="_blank"}
+
 
-### Plugin anatomy
+## Plugin anatomy
 
 A plugin usually consist of the *plugin code* (a Python package) and
 *plugin metadata*. The installation independent *plugin metadata* (e.g. name, description, python package, etc.)
 can be defined in a `nomad_plugin.yaml` that is part of the *plugin code*.
 The installation dependent *plugin metadata* (e.g. plugin key, order and priority, parser matching rules, etc.)
-is added to the `nomad.yaml` of the NOMAD installation.
+is added to the [`nomad.yaml` file](../develop/setup.md#nomadyaml) of the NOMAD installation.
 
 Here is the project layout of the schema example:
 
@@ -48,17 +52,17 @@ my-nomad-schema
 └── requirements.txt
 ```
 
-### Plugin code
+## Plugin code
 
 The directory `nomadschemaexample` is our Python package *plugin code*. In this case,
-it contains a simple `schema.py`. Read the [Schema plugin documentation](schemas.md)
+it contains a simple `schema.py`. Read the [Schema plugin documentation](../customization/plugins_dev.md#develop-a-schema-plugin)
 for more details:
 
 ```python
 {{ file_contents('examples/plugins/schema/nomadschemaexample/schema.py') }}
 ```
 
-### Plugin metadata
+## Plugin metadata
 
 The file `nomad_plugin.yaml` contains the installation independent *plugin metadata*:
 
@@ -73,7 +77,7 @@ metadata that is necessary to use them. See below for a reference of the *plugin
 models.
 
 The file `nomad.yaml` shows how to add the plugin to a nomad installation. As a plugin
-developer you have [installed our Python package](../apis/pythonlib.md) and can run the `nomad parse`
+developer you have [installed our Python package](../programmatic/pythonlib.md) and can run the `nomad parse`
 command as your "installation" to try your schema:
 
 ```yaml
@@ -96,18 +100,18 @@ code. This also means that the package has to be in your `PYTHONPATH` (see below
 
 Now follow the instructions for one of our examples and try for yourself:
 
-- [schema plugin](https://github.com/nomad-coe/nomad-schema-plugin-example)
-- [parser plugin](https://github.com/nomad-coe/nomad-parser-plugin-example)
-- [normalizer plugin](https://github.com/nomad-coe/nomad-normalizer-plugin-example)
+- [schema plugin](https://github.com/nomad-coe/nomad-schema-plugin-example){:target="_blank"}
+- [parser plugin](https://github.com/nomad-coe/nomad-parser-plugin-example){:target="_blank"}
+- [normalizer plugin](https://github.com/nomad-coe/nomad-normalizer-plugin-example){:target="_blank"}
 
 
-## Publish a plugin
+# Publish a plugin
 
-!!! attention
+!!! warning "Attention"
     The processes around publishing plugins and using plugins of others are still
     worked on. The "best" practices mentioned here are preliminary.
 
-### Create a (GitHub) project
+## Create a (GitHub) project
 
 If you forked from our examples, you already have a GitHub project. Otherwise, you
 should create one. This allows others to get your plugin sources or initiate communication
@@ -122,12 +126,12 @@ plugins in a project (a project can contain multiple modules with multiple
 
 !!! note
     If you develop a plugin in the context of **FAIRmat** or the **NOMAD CoE**, put your
-    plugin projects in the respective GitHub organization for [FAIRmat](https://github.com/fairmat-nfdi)
-    and the [NOMAD CoE](https://github.com/nomad-coe). Here, the naming convention above is binding.
+    plugin projects in the respective GitHub organization for [FAIRmat](https://github.com/fairmat-nfdi){:target="_blank"}
+    and the [NOMAD CoE](https://github.com/nomad-coe){:target="_blank"}. Here, the naming convention above is binding.
 
 Your plugin projects should follow the layout of our example projects.
 
-### Different forms of plugin distribution
+## Different forms of plugin distribution
 
 - **source code**: Mounting plugin code into a NOMAD (Oasis) installation. This is described above and only
 the plugin source code is needed.
@@ -141,9 +145,9 @@ when building a customized docker images (see [below](#pypipip-package)).
 Independent of the form of distribution, you'll still need to add the plugin to
 your configuration as explained above.
 
-### PyPI/pip package
+## PyPI/pip package
 
-Learn from the PyPI documentation how to [create a package for PyPI](https://packaging.python.org/en/latest/tutorials/packaging-projects/).
+Learn from the PyPI documentation how to [create a package for PyPI](https://packaging.python.org/en/latest/tutorials/packaging-projects/){:target="_blank"}.
 We recommend to use the `pyproject.toml`-based approach. Here is an example `pyproject.toml` file:
 
 ```toml
@@ -156,7 +160,7 @@ pip install build
 python -m build --sdist
 ```
 
-Learn from the PyPI documentation how to [publish a package to PyPI](https://packaging.python.org/en/latest/tutorials/packaging-projects/#uploading-the-distribution-archives).
+Learn from the PyPI documentation how to [publish a package to PyPI](https://packaging.python.org/en/latest/tutorials/packaging-projects/#uploading-the-distribution-archives){:target="_blank"}.
 If you have access to the MPCDF GitLab and NOMAD's presence there, you can also
 use the `nomad-FAIR` registry:
 
@@ -168,34 +172,34 @@ twine upload \
     dist/nomad-example-schema-plugin-*.tar.gz
 ```
 
-### Register your plugin
+## Register your plugin
 
-!!! attention
+!!! warning "Attention"
     This is work in progress. We plan to provide a plugin registry that allows you to
     publish your plugin's *metadata*. This will then be used to simplify plugin management
     within a NOMAD installation.
 
-    The built-in plugins can already be found in the [documentation reference](../reference/plugins.md).
+    The built-in plugins can already be found in the [documentation reference](../../reference/plugins.md).
 
-## Add a plugin to your NOMAD
+# Add a plugin to your NOMAD
 
 Adding a plugin, depends on the form of plugin distribution and how you run NOMAD.
 Eventually, you need to add the *plugin metadata* to `nomad.yaml` (see above) and you need
 to add the *plugin code* to the `PYTHONPATH`. The `nomad.yaml` needs to be
 edited manually in the usual ways. There are several ways to add *plugin code*.
 
-### Built-in plugins
+## Built-in plugins
 
 Those are already part of the NOMAD sources or NOMAD docker images. You only need
 to configure them in your `nomad.yaml`.
 
-### Add to Python path
+## Add to Python path
 
 When you run NOMAD as a developer, simply add the plugin directory to the `PYTHONPATH` environment variable.
 When you start the application (e.g. `nomad admin run appworker`), Python will find your code when NOMAD
 imports the `python_package` given in the `plugins.options` of your `nomad.yaml`.
 
-### Mount into a NOMAD Oasis
+## Mount into a NOMAD Oasis
 
 The NOMAD docker image adds the folder `/app/plugins` to the `PYTHONPATH`. You simply have
 to add the *plugin metadata* to your Oasis' `nomad.yaml` and mount your code into the `/app/plugins`
@@ -228,7 +232,7 @@ export COMPOSE_FILE=docker-compose.yaml:docker-compose.plugins.yaml
 docker compose up -d
 ```
 
-Here is a complete Oasis setup [nomad-oasis-with-plugins.zip](../assets/nomad-oasis-with-plugins.zip).
+Here is a complete Oasis setup [nomad-oasis-with-plugins.zip](../../assets/nomad-oasis-with-plugins.zip).
 Simply download, extract, and start like any other Oasis:
 
 ```sh
@@ -244,7 +248,7 @@ docker compose up -d
 curl localhost/nomad-oasis/alive
 ```
 
-!!! attention
+!!! warning "Attention"
     It is important to set up the correct user rights for your volumes and
     plugins. Our default `docker-compose` setup uses the user `1000` in group
     `1000` to run the services, this is the reason for the `chown` commands
@@ -252,9 +256,9 @@ curl localhost/nomad-oasis/alive
     volumes and in the plugins. If you use another user/group to run the docker
     services, update the commands accordingly.
 
-Read the [Oasis install guide](../oasis/install.md) for more details.
+Read the [Oasis install guide](install.md) for more details.
 
-### Install PyPI/pip package
+## Install PyPI/pip package
 
 If the plugin is published on PyPI, you can simply install it with pip. If the
 plugin was published to our MPCDF GitLab registry, you have to use the `--index-url`
@@ -268,7 +272,7 @@ Installing via pip works for NOMAD developers, but how to pip install into an Oa
 The package could either be installed when NOMAD is started or via
 a customized docker image.
 
-!!! attention
+!!! warning "Attention"
     We still need to implement that configured plugins, if not already installed,
     get automatically installed during NOMAD start.
 
diff --git a/docs/howto/overview.md b/docs/howto/overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..3068085e4f4acc1d099a63c0715be59aeb48bc10
--- /dev/null
+++ b/docs/howto/overview.md
@@ -0,0 +1,90 @@
+---
+hide: toc
+---
+
+# NOMAD How-to guides
+
+
+## Users
+These how-to guides target NOMAD users and cover data management, exploration, analysis with NOMAD graphical web-interface and APIs.
+
+<div markdown="block" class="home-grid">
+<div markdown="block">
+
+### Manage and find data
+
+Use NOMAD to manage, explore, and analyze data.
+
+- [Upload and publish data for supported formats](manage/upload.md)
+- [Use ELNs](manage/eln.md)
+- [Explore data](manage/explore.md)
+- [Use NORTH](manage/north.md)
+
+</div>
+<div markdown="block">
+
+### Programmatic use
+
+Use NOMAD's functions programmatically and via its APIs.
+
+- [Use the API](programmatic/api.md)
+- [Publish data using python](programmatic/publish_python.md)
+- [Install nomad-lab](programmatic/pythonlib.md)
+- [Access processed data](programmatic/archive_query.md)
+- [Run a parser](programmatic/local_parsers.md)
+
+</div>
+</div>
+
+## Data stewards, administrators, and developers
+These how-to guides allow advanced users, NOMAD administrators, data stewards, and developers to customize and operate NOMAD and NOMAD Oasis or contribute to NOMAD's development.
+
+<div markdown="block" class="home-grid">
+<div markdown="block">
+
+### NOMAD Oasis
+
+Host NOMAD for your lab or institution.
+
+- [Install an Oasis](oasis/install.md)
+- [Customize an Oasis](oasis/customize.md)
+- [Install plugins](oasis/plugins_install.md)
+- [Configure custom apps](oasis/apps.md)
+- [Migrate Oasis versions](oasis/migrate.md)
+- [Administrate and maintain an Oasis](oasis/admin.md)
+
+</div>
+<div markdown="block">
+
+### Customization
+
+Customize NOMAD, write plugins, and tailor NOMAD Oasis.
+
+- [Write a schema](customization/basics.md)
+- [Define ELNs](customization/elns.md)
+- [Base sections](customization/base_sections.md)
+- [Tabular parser](customization/tabular.md)
+- [Define workflows](customization/workflows.md)
+- [Reference hdf5 files](customization/hdf5.md)
+- [Develop and  publish plugins](customization/plugins_dev.md)
+- [Write a parser](customization/parsers.md)
+- [Write a normalizer](customization/normalizers.md)
+
+</div>
+<div markdown="block">
+
+### Development
+
+Become a NOMAD developer and contribute to the source code.
+
+- [Get started](develop/setup.md)
+- [Navigate the code](develop/code.md)
+- [Contribute](develop/contrib.md)
+- [Extend the search](develop/search.md)
+
+</div>
+</div>
+
+<h2>One last thing</h2>
+
+If you can't find what you're looking for in our guides, [contact our team](mailto:support@nomad-lab.eu) for personalized help and assistance. Don't worry, we're here to help and learn what we're doing wrong!
diff --git a/docs/apis/api.md b/docs/howto/programmatic/api.md
similarity index 94%
rename from docs/apis/api.md
rename to docs/howto/programmatic/api.md
index 1d57adcafe62000dc36790c13d7d75fce4b5487e..e74317bdcf29d5fbf5042f571016f5e3bb94583a 100644
--- a/docs/apis/api.md
+++ b/docs/howto/programmatic/api.md
@@ -1,15 +1,17 @@
+# How to use the API
+
 This guide is about using NOMAD's REST APIs directly, e.g. via Python's *request*.
 
 To access the processed data with our client library `nomad-lab` follow
-[How to access the processed data](archive_query.md). You watch our
-[video tutorial on the API](../tutorial/access_api.md#access-data-via-api).
+[How to access processed data](archive_query.md). You can also watch our
+[video tutorial on the API](../../tutorial/access_api.md#access-data-via-api).
 
 ## Different options to use the API
 
 NOMAD offers all its functionality through application
-programming interfaces (APIs). More specifically [RESTful HTTP APIs](https://en.wikipedia.org/wiki/Representational_state_transfer) that allows you
+programming interfaces (APIs). More specifically [RESTful HTTP APIs](https://en.wikipedia.org/wiki/Representational_state_transfer){:target="_blank"} that allows you
 to use NOMAD as a set of resources (think data) that can be uploaded, accessed, downloaded,
-searched for, etc. via [HTTP requests](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol).
+searched for, etc. via [HTTP requests](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol){:target="_blank"}.
 
 You can get an overview on all NOMAD APIs on the [API page]({{ nomad_url() }}../../gui/analyze/apis).
 We will focus here on NOMAD's main API (v1). In fact, this API is also used by
@@ -44,7 +46,7 @@ API functions that allows you to try these functions in the browser.
 
 <h4>Use NOMAD's Python package</h4>
 
-Install the [NOMAD Python client library](pythonlib.md) and use it's `ArchiveQuery`
+Install the [NOMAD Python client library](./pythonlib.md) and use it's `ArchiveQuery`
 functionality for a more convenient query based access of archive data following the
 [How-to access the processed data](archive_query.md) guide.
 
@@ -221,9 +223,9 @@ The result will look like this:
 ```
 
 You can work with the results in the given JSON (or respective Python dict/list) data already.
-If you have [NOMAD's Python library](pythonlib.md) installed ,
+If you have [NOMAD's Python library](./pythonlib.md) installed ,
 you can take the archive data and use the Python interface.
-The [Python interface](../plugins/schemas.md#wrap-data-with-python-schema-classes) will help with code-completion (e.g. in notebook environments),
+The [Python interface](../customization/plugins_dev.md#wrap-data-with-python-schema-classes) will help with code-completion (e.g. in notebook environments),
 resolve archive references (e.g. from workflow to calculation to system), and allow unit conversion:
 ```py
 from nomad.datamodel import EntryArchive
@@ -250,7 +252,7 @@ the API:
 - Raw files, the files as they were uploaded to NOMAD.
 - Archive data, all of the extracted data for an entry.
 
-There are also different entities (see also [Datamodel](../explanation/basics.md)) with different functions in the API:
+There are also different entities (see also [Datamodel](../../explanation/basics.md)) with different functions in the API:
 
 - Entries
 - Uploads
@@ -373,7 +375,7 @@ response = requests.get(
 uploads = response.json()['data']
 ```
 
-If you have the [NOMAD Python package](pythonlib) installed. You can use its `Auth`
+If you have the [NOMAD Python package](./pythonlib.md) installed. You can use its `Auth`
 implementation:
 
 ```py
diff --git a/docs/apis/archive_query.md b/docs/howto/programmatic/archive_query.md
similarity index 98%
rename from docs/apis/archive_query.md
rename to docs/howto/programmatic/archive_query.md
index d32f9c3afc514b8b70adc71781453e8f18737679..d40aa823003f5c5ba9d3648e8bb1cb7a17719fc3 100644
--- a/docs/apis/archive_query.md
+++ b/docs/howto/programmatic/archive_query.md
@@ -1,7 +1,9 @@
+# How to access processed data
+
 The `ArchiveQuery` allows you to search for entries and access their parsed and processed *archive* data
 at the same time. Furthermore, all data is accessible through a convenient Python interface
 based on the schema rather than plain JSON. See also this guide on using
-[NOMAD's Python schemas](../plugins/schemas.md#use-python-schemas-to-work-with-data)
+[NOMAD's Python schemas](../customization/plugins_dev.md#use-python-schemas-to-work-with-data)
 to work with processed data.
 
 As a requirement, you have to install the `nomad-lab` Python package. Follow the
@@ -206,7 +208,7 @@ The following arguments are acceptable for `ArchiveQuery`.
 
 ## The complete example
 
-!!! attention
+!!! warning "Attention"
     This examples uses the new `workflow2` workflow system. This is still under development
     and this example might not yet produce results on the public nomad data.
 
diff --git a/docs/apis/local_parsers.md b/docs/howto/programmatic/local_parsers.md
similarity index 88%
rename from docs/apis/local_parsers.md
rename to docs/howto/programmatic/local_parsers.md
index e4e2575bcd9675503e9b645694f2554f2cde33b6..6d7f052f9a3ec54f6fadf823ac63795c2632f5aa 100644
--- a/docs/apis/local_parsers.md
+++ b/docs/howto/programmatic/local_parsers.md
@@ -1,6 +1,6 @@
 # How to run a parser
 
-You can find a [list of all parsers](../reference/parsers.md) and supported files in the reference.
+You can find a [list of all parsers](../../reference/parsers.md) and supported files in the reference.
 
 First you need to have the `nomad-lab` pypi package installed. You find more detailed
 instructions [here](pythonlib.md):
@@ -11,7 +11,7 @@ pip install nomad-lab
 
 ## From the command line
 
-You can run NOMAD parsers from the [command line interface](../reference/cli.md) (CLI).
+You can run NOMAD parsers from the [command line interface](../../reference/cli.md) (CLI).
 The parse command will automatically match the right parser to your file and run the parser.
 There are two output formats:
 
@@ -30,7 +30,7 @@ nomad parse --show-archive <path-to-your-mainfile-code-output-file>
 
 To skip the parser matching, i.e. the process that determined which parser fits to
 the given file, and state the parser directly, you can use the `--parser` argument
-to provide a [parser name](../reference/parsers.md).
+to provide a [parser name](../../reference/parsers.md).
 
 ```
 nomad parser --parser parsers/vasp <path-to-your-mainfile-code-output-file>
diff --git a/docs/howto/programmatic/publish_python.md b/docs/howto/programmatic/publish_python.md
new file mode 100644
index 0000000000000000000000000000000000000000..6f7e7d76908e04095f0d6cd7c6d3c97939ebdc9a
--- /dev/null
+++ b/docs/howto/programmatic/publish_python.md
@@ -0,0 +1,224 @@
+# How to publish data using python
+
+## Uploading, changing metadata, and publishing via python API
+
+The [NOMAD API](https://nomad-lab.eu/prod/rae/docs/api.html){:target="_blank"} allows uploading, publishing, etc. using a local python environment, as an alternative to the NOMAD GUI. An overview of all API functionalities is provided in [How to use the API](api.md)
+
+We have prepare some simple python functions to facilitate use of this API. For use as demonstrated below, copy the following code into a file called NOMAD_API.py:
+
+```python
+import requests
+
+def get_authentication_token(nomad_url, username, password):
+    '''Get the token for accessing your NOMAD unpublished uploads remotely'''
+    try:
+        response = requests.get(
+            nomad_url + 'auth/token', params=dict(username=username, password=password), timeout=10)
+        token = response.json().get('access_token')
+        if token:
+            return token
+
+        print('response is missing token: ')
+        print(response.json())
+        return
+    except Exception:
+        print('something went wrong trying to get authentication token')
+        return
+
+
+def create_dataset(nomad_url, token, dataset_name):
+    '''Create a dataset to group a series of NOMAD entries'''
+    try:
+        response = requests.post(
+            nomad_url + 'datasets/',
+            headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json'},
+            json={"dataset_name": dataset_name},
+            timeout=10
+            )
+        dataset_id = response.json().get('dataset_id')
+        if dataset_id:
+            return dataset_id
+
+        print('response is missing dataset_id: ')
+        print(response.json())
+        return
+    except Exception:
+        print('something went wrong trying to create a dataset')
+        return
+
+def upload_to_NOMAD(nomad_url, token, upload_file):
+    '''Upload a single file for NOMAD upload, e.g., zip format'''
+    with open(upload_file, 'rb') as f:
+        try:
+            response = requests.post(
+                nomad_url + 'uploads',
+                headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json'},
+                data=f, timeout=30)
+            upload_id = response.json().get('upload_id')
+            if upload_id:
+                return upload_id
+
+            print('response is missing upload_id: ')
+            print(response.json())
+            return
+        except Exception:
+            print('something went wrong uploading to NOMAD')
+            return
+
+def check_upload_status(nomad_url, token, upload_id):
+    '''
+    # upload success => returns 'Process publish_upload completed successfully'
+    # publish success => 'Process publish_upload completed successfully'
+    '''
+    try:
+        response = requests.get(
+            nomad_url + 'uploads/' + upload_id,
+            headers={'Authorization': f'Bearer {token}'}, timeout=30)
+        status_message = response.json().get('data').get('last_status_message')
+        if status_message:
+            return status_message
+
+        print('response is missing status_message: ')
+        print(response.json())
+        return
+    except Exception:
+        print('something went wrong trying to check the status of upload' + upload_id)
+        # upload gets deleted from the upload staging area once published...or in this case something went wrong
+        return
+
+def edit_upload_metadata(nomad_url, token, upload_id, metadata):
+    '''
+    Example of new metadata:
+    upload_name = 'Test_Upload_Name'
+    metadata = {
+        "metadata": {
+        "upload_name": upload_name,
+        "references": ["https://doi.org/xx.xxxx/xxxxxx"],
+        "datasets": dataset_id,
+        "embargo_length": 0,
+        "coauthors": ["coauthor@affiliation.de"],
+        "comment": 'This is a test upload...'
+        },
+    }
+    '''
+
+    try:
+        response = requests.post(
+            nomad_url+'uploads/' + upload_id + '/edit',
+            headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json'},
+            json=metadata, timeout=30)
+        return response
+    except Exception:
+        print('something went wrong trying to add metadata to upload' + upload_id)
+        return
+
+def publish_upload(nomad_url, token, upload_id):
+    '''Publish an upload'''
+    try:
+        response = requests.post(
+            nomad_url+'uploads/' + upload_id + '/action/publish',
+            headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json'},
+            timeout=30)
+        return response
+    except Exception:
+        print('something went wrong trying to publish upload: ' + upload_id)
+        return
+```
+
+Now, we will demonstrate how to use these functions. Within a notebook or python script, import the above functions:
+
+```python
+from Nomad_API import *`
+```
+
+Define the following user information:
+```python
+username = 'nomad_email@affiliation.edu'
+password = 'password'
+```
+
+Define the NOMAD API endpoint:
+```python
+# nomad_url = 'https://nomad-lab.eu/prod/v1/api/v1/'  # production nomad
+nomad_url = 'https://nomad-lab.eu/prod/v1/test/api/v1/'  # test nomad (deleted occassionally)
+```
+
+Get a token for accessing your unpublished uploads:
+
+```python
+token = get_authentication_token(nomad_url, username, password)
+```
+
+Create a dataset for grouping uploads that belong to, e.g., a publication:
+
+```python
+dataset_id = create_dataset(nomad_url, token, 'Test_Dataset')
+```
+
+Upload some test data to NOMAD:
+
+```python
+upload_id = upload_to_NOMAD(nomad_url, token, 'test_data.zip')
+```
+
+Check the status to make sure the upload was processed correctly:
+
+```python
+last_status_message = check_upload_status(nomad_url, token, upload_id)
+print(last_status_message)
+```
+
+The immediate result may be:
+
+    'Waiting for results (level 0)'
+
+After some time you will get:
+
+    'Process process_upload completed successfully'
+
+??? tip
+
+    Some data, e.g., large systems or molecular dynamics trajectories, take some time to process. In this case, you can call the above function intermittantly, e.g., in a while loop with a sleep call in between, waiting for `last_status_message` to be "Process process_upload completed successfully"
+
+
+Now that the upload processing is complete, we can add coauthors, references, and other comments, as well as link to a dataset and provide a proper name for the upload:
+
+```python
+metadata = {
+    "metadata": {
+    "upload_name": 'Test_Upload',
+    "references": ["https://doi.org/xx.xxxx/x.xxxx"],
+    "datasets": dataset_id,
+    "embargo_length": 0,
+    "coauthors": ["coauthor@affiliation.de"],
+    "comment": 'This is a test upload...',
+},
+}
+response = edit_upload_metadata(nomad_url, token, upload_id, metadata)
+```
+
+Check the upload again to make sure that the metadata was changed:
+
+```python
+last_status_message = check_upload_status(nomad_url, token, upload_id)
+print(last_status_message)
+```
+
+    'Process edit_upload_metadata completed successfully'
+
+
+Now, we are ready to publish:
+
+```python
+response = publish_upload(nomad_url, token, upload_id)
+```
+
+Once again check the status:
+
+```python
+last_status_message = check_upload_status(nomad_url, token, upload_id)
+print(last_status_message)
+```
+
+    'Process publish_upload completed successfully'
+
diff --git a/docs/apis/pythonlib.md b/docs/howto/programmatic/pythonlib.md
similarity index 95%
rename from docs/apis/pythonlib.md
rename to docs/howto/programmatic/pythonlib.md
index af8a60d29f39de70e20080678dd67107ef99a985..af0cf3da37bb2771bab045b624e6c8e250631424 100644
--- a/docs/apis/pythonlib.md
+++ b/docs/howto/programmatic/pythonlib.md
@@ -1,10 +1,10 @@
-# Install the Python library
+# How to install nomad-lab
 
 We provide a Python package called `nomad-lab`. The package can be used to run
 certain NOMAD features within local Python programming environments. It includes
 the NOMAD parsers and normalizers, or convenience functions to query the processed data on NOMAD.
 
-Released version of the package are hosted on [pypi](https://pypi.org/project/nomad-lab/)
+Released version of the package are hosted on [pypi](https://pypi.org/project/nomad-lab/){:target="_blank"}
 and you can install it with *pip* (or conda).
 
 To install the newest pypi release, simply use pip:
@@ -12,7 +12,7 @@ To install the newest pypi release, simply use pip:
 pip install nomad-lab
 ```
 
-!!! attention
+!!! warning "Attention"
     The latest develop versions might still be considered beta and might not be published to
     pypi. If you require specific new features you might need to install `nomad-lab`
     from our GitLab package registry. To use features of a specific commit or
diff --git a/docs/index.md b/docs/index.md
index 1b0631715ee9805ed079f60a372fd609d62e5810..94703dac2e27f89ef7f7a6fd9804f5601ab8ef79 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -26,13 +26,10 @@ A series of tutorials will guide you through the main functionality of NOMAD.
 - [Upload and publish your own data](tutorial/upload_publish.md)
 - [Use the search interface to identify interesting data](tutorial/explore.md)
 - [Use the API to search and access processed data for analysis](tutorial/access_api.md)
-- [Find and use the automations of the built-in schemas available in NOMAD](tutorial/builtin.md)
 - [Create and use custom schemas in NOMAD](tutorial/custom.md)
-- [Customization at its best: user-defined schema and automation](tutorial/plugins.md)
-- [Third-party ELN integration](tutorial/third_party.md)
 
-- [Example data and exercises](https://www.fairmat-nfdi.eu/events/fairmat-tutorial-1/tutorial-1-materials)
-- [More videos and tutorials on YouTube](https://youtube.com/playlist?list=PLrRaxjvn6FDW-_DzZ4OShfMPcTtnFoynT)
+- [Example data and exercises](https://www.fairmat-nfdi.eu/events/fairmat-tutorial-1/tutorial-1-materials){:target="_blank"}
+- [More videos and tutorials on YouTube](https://youtube.com/playlist?list=PLrRaxjvn6FDW-_DzZ4OShfMPcTtnFoynT){:target="_blank"}
 
 
 </div>
@@ -40,12 +37,15 @@ A series of tutorials will guide you through the main functionality of NOMAD.
 
 ### How-to guides
 
-These docs provides step-by-step instructions for a wide range of tasks. For example:
+How-to guides provide step-by-step instructions for a wide range of tasks, with the overarching topics:
 
-- [How to upload and publish data](data/upload.md)
-- [How to write a custom ELN](schemas/elns.md)
-- [How to run a parser locally](apis/local_parsers.md)
-- [How to install NOMAD Oasis](oasis/install.md)
+- Manage and find data
+- Programmatic data access
+- Oasis
+- Customization
+- Development
+
+[Open the how-to guides](howto/overview.md){.md-button .nomad-button .nomad-button--card-action}
 
 </div>
 
@@ -53,7 +53,7 @@ These docs provides step-by-step instructions for a wide range of tasks. For exa
 
 ### Explanation
 
-This section provides background knowledge on what are
+The explanation section provides background knowledge on what are
 schemas and structured data, how does processing work, the NOMAD architecture, and more.
 
 </div>
@@ -73,10 +73,10 @@ NOMAD is an open source project that warmly welcomes community projects, contrib
 NOMAD is developed by FAIRmat, an open NFDI consortium of over 30 partners building a shared
 data structure of for materials science together.
 
-- [Get support](https://nomad-lab.eu/nomad-lab/support.html)
-- [Join our online forum](https://matsci.org/c/nomad/32)
-- [Contribute](develop/contrib.md)
-- [View our roadmap](https://nomad-lab.eu/nomad-lab/features.html)
-- [Code guidelines](develop/guides.md)
+- [Get support](https://nomad-lab.eu/nomad-lab/support.html){:target="_blank"}
+- [Join our online forum](https://matsci.org/c/nomad/32){:target="_blank"}
+- [Contribute](howto/develop/contrib.md)
+- [View our roadmap](https://nomad-lab.eu/nomad-lab/features.html){:target="_blank"}
+- [Code guidelines](reference/code_guidelines.md)
 
 Thinking about using NOMAD for your next project? Get in touch!
diff --git a/docs/oasis/customize.md b/docs/oasis/customize.md
deleted file mode 100644
index 10f9335a3fdc73a71b306d99233b090889509acf..0000000000000000000000000000000000000000
--- a/docs/oasis/customize.md
+++ /dev/null
@@ -1,15 +0,0 @@
-!!! attention
-
-    This part of the documentation is still work in progress.
-
-This is an incomplete list of potential customizations. Please read the respective
-guides to learn more.
-
-- Installation specific changes (domain, path-prefix): [How to install an Oasis](install.md)
-- [Restricting user access](admin.md#restricting-access-to-your-oasis)
-- [Configure custom apps](apps.md)
-- Write .yaml based [schemas](../schemas/basics.md) and [ELNs](../schemas/elns.md)
-- Learn how to use the [tabular parser](../schemas/tabular.md) to manage data from .xls or .csv
-- Develop a [schema plugin](../plugins/schemas.md) for more powerful schemas and ELNs
-- Use and develop [parser plugins](../plugins/parsers.md) for specific file formats
-- Add specialized [NORTH tools](../data/north.md)
\ No newline at end of file
diff --git a/docs/plugins/normalizers.md b/docs/plugins/normalizers.md
deleted file mode 100644
index 7891a5ed5514e11295ab369729fdfde61f27026f..0000000000000000000000000000000000000000
--- a/docs/plugins/normalizers.md
+++ /dev/null
@@ -1,17 +0,0 @@
-A normalizer can be any Python algorithm that takes the archive of an entry as input
-and manipulates (usually expands) the given archive. This way, a normalizer can add
-additional sections and quantities based on the information already available in the
-archive.
-
-All normalizers are executed after parsing. Normalizers are run for each entry (i.e. each
-set of files that represent a code run). Normalizers are run in a particular order, and
-you can make assumptions about the availability of data created by other normalizers.
-A normalizer is run in any case, but it might choose not to do anything. A normalizer
-can perform any operation on the archive, but in general it should only add more
-information, not alter existing information.
-
-## Getting started
-
-Fork and clone the [normalizer example project](https://github.com/nomad-coe/nomad-normalizer-plugin-example) as described in [before](plugins.md). Follow the original [how-to on writing a parser](../develop/normalizers.md).
-
-{{pydantic_model('nomad.config.plugins.Normalizer', heading='### Normalizer plugin metadata')}}
\ No newline at end of file
diff --git a/docs/plugins/parsers.md b/docs/plugins/parsers.md
deleted file mode 100644
index 24206ff4367a6e9d7f198466a89cb2d1bea68822..0000000000000000000000000000000000000000
--- a/docs/plugins/parsers.md
+++ /dev/null
@@ -1,7 +0,0 @@
-NOMAD uses parsers to convert raw code input and output files into NOMAD's common Archive format. This is the documentation on how to develop such a parser.
-
-## Getting started
-
-Fork and clone the [parser example project](https://github.com/nomad-coe/nomad-parser-plugin-example) as described in [before](plugins.md). Follow the original [how-to on writing a parser](../develop/parsers.md).
-
-{{pydantic_model('nomad.config.plugins.Parser', heading='### Parser plugin metadata', hide=['code_name','code_category','code_homepage','metadata'])}}
\ No newline at end of file
diff --git a/docs/reference/annotations.md b/docs/reference/annotations.md
index fd666175f2ec6c40548eaedcfdb47d85b634bbbc..fd92de53f9c37ad43a7f3277618bd314ee33836e 100644
--- a/docs/reference/annotations.md
+++ b/docs/reference/annotations.md
@@ -52,7 +52,7 @@ MySection:
 {{ pydantic_model('nomad.datamodel.metainfo.annotations.TabularAnnotation', heading='### `tabular`') }}
 
 Each and every quantity to be filled with data from tabular data files should be annotated as the following example.
-A practical example is provided in [How To](../schemas/tabular.md#preparing-the-tabular-data-file) section.
+A practical example is provided in [How To](../howto/customization/tabular.md#preparing-the-tabular-data-file) section.
 
 ```yaml
 my_quantity:
@@ -78,18 +78,18 @@ One special quantity will be dedicated to host the tabular data file. In the fol
 
 |Tutorial ref.|`file_mode`|`mapping_mode`|`sections`|How to ref.|
 |---|---|---|---|---|
-|1|`current_entry`|`column`|`root`|[HowTo](../schemas/tabular.md#1-column-mode-current-entry-parse-to-root)|
-|2|`current_entry`|`column`|my path|[HowTo](../schemas/tabular.md#2-column-mode-current-entry-parse-to-my-path)|
+|1|`current_entry`|`column`|`root`|[HowTo](../howto/customization/tabular.md#1-column-mode-current-entry-parse-to-root)|
+|2|`current_entry`|`column`|my path|[HowTo](../howto/customization/tabular.md#2-column-mode-current-entry-parse-to-my-path)|
 |<span style="color:red">np1</span>|`current_entry`|`row`|`root`|<span style="color:red">Not possible</span>|
-|3|`current_entry`|`row`|my path|[HowTo](../schemas/tabular.md#3-row-mode-current-entry-parse-to-my-path)|
+|3|`current_entry`|`row`|my path|[HowTo](../howto/customization/tabular.md#3-row-mode-current-entry-parse-to-my-path)|
 |<span style="color:red">np2</span>|`single_new_entry`|`column`|`root`|<span style="color:red">Not possible</span>|
-|4|`single_new_entry`|`column`|my path|[HowTo](../schemas/tabular.md#4-column-mode-single-new-entry-parse-to-my-path)|
+|4|`single_new_entry`|`column`|my path|[HowTo](../howto/customization/tabular.md#4-column-mode-single-new-entry-parse-to-my-path)|
 |<span style="color:red">np3</span>|`single_new_entry`|`row`|`root`|<span style="color:red">Not possible</span>|
-|5|`single_new_entry`|`row`|my path|[HowTo](../schemas/tabular.md#5-row-mode-single-new-entry-parse-to-my-path)|
+|5|`single_new_entry`|`row`|my path|[HowTo](../howto/customization/tabular.md#5-row-mode-single-new-entry-parse-to-my-path)|
 |<span style="color:red">np4</span>|`multiple_new_entries`|`column`|`root`|<span style="color:red">Not possible</span>|
 |<span style="color:red">np5</span>|`multiple_new_entries`|`column`|my path|<span style="color:red">Not possible</span>|
-|6|`multiple_new_entries`|`row`|`root`|[HowTo](../schemas/tabular.md#6-row-mode-multiple-new-entries-parse-to-root)|
-|7|`multiple_new_entries`|`row`|my path|[HowTo](../schemas/tabular.md#7-row-mode-multiple-new-entries-parse-to-my-path)|
+|6|`multiple_new_entries`|`row`|`root`|[HowTo](../howto/customization/tabular.md#6-row-mode-multiple-new-entries-parse-to-root)|
+|7|`multiple_new_entries`|`row`|my path|[HowTo](../howto/customization/tabular.md#7-row-mode-multiple-new-entries-parse-to-my-path)|
 
 ```yaml
 data_file:
diff --git a/docs/develop/guides.md b/docs/reference/code_guidelines.md
similarity index 90%
rename from docs/develop/guides.md
rename to docs/reference/code_guidelines.md
index 6c5ce9e7d090a1882044a1026c584d80de66043c..f4dc08541300990903983c402ca52df504cc1582 100644
--- a/docs/develop/guides.md
+++ b/docs/reference/code_guidelines.md
@@ -18,9 +18,9 @@ them carefully.
 The are some *rules* or better strong *guidelines* for writing code. The following
 applies to all Python code (and where applicable, also to Javascript and other code):
 
-- Use an IDE (e.g. [VS Code](https://code.visualstudio.com/)) or otherwise automatically
+- Use an IDE (e.g. [VS Code](https://code.visualstudio.com/){:target="_blank"}) or otherwise automatically
   enforce
-  [code formatting and linting](https://code.visualstudio.com/docs/python/linting).
+  [code formatting and linting](https://code.visualstudio.com/docs/python/linting){:target="_blank"}.
 
 - Use `nomad qa` before committing. This will run all tests, static type checks, linting,
   etc.
@@ -28,22 +28,22 @@ applies to all Python code (and where applicable, also to Javascript and other c
 - Test the public interface of each submodule (i.e. Python file).
 
 - There is a style guide to Python. Write
-  [PEP 8](https://www.python.org/dev/peps/pep-0008/)-compliant Python code. An exception
+  [PEP 8](https://www.python.org/dev/peps/pep-0008/){:target="_blank"}-compliant Python code. An exception
   is the line cap at 79, which can be broken but keep it 90-ish.
 
-- Be [Pythonic](https://docs.python-guide.org/writing/style/) and watch
-  [this talk about best practices](https://www.youtube.com/watch?v=wf-BqAjZb8M).
+- Be [Pythonic](https://docs.python-guide.org/writing/style/){:target="_blank"} and watch
+  [this talk about best practices](https://www.youtube.com/watch?v=wf-BqAjZb8M){:target="_blank"}.
 
 - Add docstrings to the *public* interface of each submodule (i.e. Python file). This
   includes APIs that are exposed to other submodules (i.e. other Python files).
 
 - The project structure follows
-  [this guide](https://docs.python-guide.org/writing/structure/). Keep it!
+  [this guide](https://docs.python-guide.org/writing/structure/){:target="_blank"}. Keep it!
 
 - Write tests for all contributions.
 
 - Adopt *Clean Code* practices. Here is a good
-  [introductory talk to Clean Code](https://youtu.be/7EmboKQH8lM).
+  [introductory talk to Clean Code](https://youtu.be/7EmboKQH8lM){:target="_blank"}.
 
 ## Enforcing rules with CI/CD
 
@@ -51,12 +51,13 @@ These *guidelines* are partially enforced by CI/CD. As part of CI all tests are
 branches; further we run a *linter*, *PEP 8* checker, and *mypy* (static type checker).
 You can run `nomad qa` to run all these tests and checks before committing.
 
-See [the contributing guide](./contrib.md) for more details on how to work with issues,
+See [the contributing guide](../howto/develop/contrib.md) for more details on how to work with issues,
 branches, merge requests, and CI/CD.
 
+
 ## Documenting code
 
-Write [Clean Code](https://youtu.be/7EmboKQH8lM) that is easy to comprehend.
+Write [Clean Code](https://youtu.be/7EmboKQH8lM){:target="_blank"} that is easy to comprehend.
 
 However, you should document the whole publicly exposed interface of a module. For Python
 this includes most classes and functions that you will write, for React its exported
@@ -65,15 +66,15 @@ components and their props.
 For all functionality that is exposed to clients (APIs, CLI, schema base classes and
 annotations, UI functionality), you must consider to add explanations, tutorials, and
 examples to the documentation system (i.e. the `docs` folder). This is built with
-[mkdocs](https://www.mkdocs.org/) and published as part of each NOMAD installation.
+[mkdocs](https://www.mkdocs.org/){:target="_blank"} and published as part of each NOMAD installation.
 Also mind `nomad/mkdocs.py` and `mkdocs.yaml` and have a look at used plugins and extra
 functions, e.g. this includes generation of Markdown from `examples` or Pydantic models.
 
 To document Python functions and classes, use Google
-[docstrings](https://github.com/NilsJPWerner/autoDocstring/blob/HEAD/docs/google.md).
+[docstrings](https://github.com/NilsJPWerner/autoDocstring/blob/HEAD/docs/google.md){:target="_blank"}.
 Use Markdown if you need to add markup but try to reduce this to a minimum.
 You can use VS Code plugins like
-[autoDocstring](https://github.com/NilsJPWerner/autoDocstring/tree/f7bc9f427d5ebcd87e6f5839077a87ecd1cbb404)
+[autoDocstring](https://github.com/NilsJPWerner/autoDocstring/tree/f7bc9f427d5ebcd87e6f5839077a87ecd1cbb404){:target="_blank"}
 to help.
 Always use single quotes, pad single-line docstrings with spaces and start multi-line ones
 on a new line.
@@ -198,7 +199,7 @@ Please follow the following rules when logging:
 - If a logger is not already provided, only use :py:func:`nomad.utils.get_logger` to
   acquire a new logger. Never use the built-in logging directly. These loggers work like
   the system loggers, but allow you to pass keyword arguments with additional context
-  data. See also the [structlog docs](https://structlog.readthedocs.io/en/stable/).
+  data. See also the [structlog docs](https://structlog.readthedocs.io/en/stable/){:target="_blank"}.
 
 - In many context, a logger is already provided (e.g. API, processing, parser,
   normalizer). This provided logger has already context information bounded. So it is
@@ -252,13 +253,13 @@ Keys that are present for events related to exceptions:
 
 - `digest`: If an exception was raised, the last 256 characters of the message are stored
   automatically into this key. If you wish to search for exceptions in
-  [Kibana](https://www.elastic.co/de/kibana), you will want to use this value as it will
+  [Kibana](https://www.elastic.co/de/kibana){:target="_blank"}, you will want to use this value as it will
   be indexed unlike the full exception object.
 
 ## Copyright notices
 
 We follow this
-[recommendation of the Linux Foundation](https://www.linuxfoundation.org/blog/2020/01/copyright-notices-in-open-source-software-projects/)
+[recommendation of the Linux Foundation](https://www.linuxfoundation.org/blog/2020/01/copyright-notices-in-open-source-software-projects/){:target="_blank"}
 for the copyright notice that is placed on top of each source code file.
 
 It is intended to provide a broad generic statement that allows all authors/contributors
@@ -266,12 +267,12 @@ of the NOMAD project to claim their copyright, independent of their organization
 individual ownership.
 
 You can simply copy the notice from another file. From time to time we can use a tool
-like [licenseheaders](https://pypi.org/project/licenseheaders/) to ensure correct
+like [licenseheaders](https://pypi.org/project/licenseheaders/){:target="_blank"} to ensure correct
 notices. In addition we keep a purely informative AUTHORS file.
 
 ## Git submodules and other "in-house" dependencies
 
 As the NOMAD ecosystem grows, you might develop libraries that are used by NOMAD instead
 of being part of its main codebase. The same guidelines should apply. You can use
-[GitHub Actions](https://github.com/features/actions) if your library is hosted on Github
+[GitHub Actions](https://github.com/features/actions){:target="_blank"} if your library is hosted on Github
 to ensure automated linting and tests.
diff --git a/docs/reference/config.md b/docs/reference/config.md
index 9172d0e007028b00f9bed8ddd7756d5b4bd5ef9c..fcf72f178551acd2116e9f20d53f6fea54021e93 100644
--- a/docs/reference/config.md
+++ b/docs/reference/config.md
@@ -100,7 +100,7 @@ The following is a reference of all configuration sections and attributes.
 
 ## User Interface
 
-These settings affect the behaviour of the user interface. Note that the configuration of apps is documented in more detail in the guide on [how to define apps](../oasis/apps.md).
+These settings affect the behaviour of the user interface. Note that the configuration of apps is documented in more detail in the guide on [how to define apps](../howto/oasis/apps.md).
 
 {{ config_models(['ui'])}}
 
diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md
index 7fda59e53bd8a1d9b5b765da78540d1c370af624..90f3bdea2a26fc6d291e83179794391c9a6b18b3 100644
--- a/docs/reference/glossary.md
+++ b/docs/reference/glossary.md
@@ -18,7 +18,7 @@ out the application and this documentation.
 
 *Annotations* are part of data [schemas](#schema) and they describe aspects that are not
 directly defining the type or shape of data. They often allow to alter how certain data is
-managed, represented, or edited. See [annotations in the schema documentation](../schemas/elns.md#annotations).
+managed, represented, or edited. See [annotations in the schema documentation](../howto/customization/elns.md#annotations).
 
 ### Archive
 
diff --git a/docs/reference/parsers.md b/docs/reference/parsers.md
index 53904ab601882ce760bb8081ae9edb4948e76b40..4e39953e51965573ba9a4bb67405c6490d1c7895 100644
--- a/docs/reference/parsers.md
+++ b/docs/reference/parsers.md
@@ -4,8 +4,8 @@
 
     You might also want to read:
 
-    - [How to run parsers locally](../apis/local_parsers.md)
-    - [How to develop a parser plugin](../plugins/parsers.md)
+    - [How to run parsers locally](../howto/programmatic/local_parsers.md)
+    - [How to develop a parser plugin](../howto/customization/plugins_dev.md#develop-a-parser-plugin)
 
 This is a list of all available parsers and supported file formats:
 
diff --git a/docs/reference/plugins.md b/docs/reference/plugins.md
index 9bdb7687bfdbff42e0b9f7891f42e8e010845a00..84ec301010fe41810301321aff06f6c25b2a9d02 100644
--- a/docs/reference/plugins.md
+++ b/docs/reference/plugins.md
@@ -2,7 +2,7 @@
 
 !!! note
 
-    You might also want to read [the plugin how-tos](../plugins/plugins.md)
+    You might also want to read [the plugin how-tos](../howto/customization/plugins_dev.md)
 
 This is a list of all built-in plugins:
 
diff --git a/docs/reference/tutorials.md b/docs/reference/tutorials.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb41b01fe8939d7e8d88c1d08ea76e56780308fb
--- /dev/null
+++ b/docs/reference/tutorials.md
@@ -0,0 +1,19 @@
+# List of NOMAD tutorials
+
+<!--
+Please follow the format:
+- Date(dd.mm.yyy) Event: Tutorial Title
+    + short list of topics covered
+ -->
+
+!!! warning "Attention"
+    This page is still being updated with older NOMAD tutorials.
+
+- 29.09.2023 [CECAM workshop: An Introduction to the NOMAD repository for soft matter simulators](https://fairmat-nfdi.github.io/AreaC-Tutorial-CECAM-2023/){:target="_blank"}
+    + Basic NOMAD usage with molecular dynamics simulations, custom workflows, python API
+
+- 14.06.2023 [FAIRmat Tutorial 10: FAIR electronic-structure data in NOMAD](https://fairmat-nfdi.github.io/AreaC-Tutorial10_2023/){:target="_blank"}
+    + Basic NOMAD usage for computational data, numerical precision filtering, custom workflows, knowledge-based XC functionals exploration
+
+- 15.02.2023 [FAIRmat Tutorial 7: Molecular Dynamics Trajectories and Workflows in NOMAD](https://www.fairmat-nfdi.eu/events/fairmat-tutorial-7/tutorial-7-materials){:target="_blank"}
+    + Uploading MD data, examining metadata, overview page, workflow visualizer, extracting MD data for trajectory analysis
\ No newline at end of file
diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css
index 321e087066b2696d28bf0d1c05085d2afca25a85..9f4674dcdeed56fd36da2c91c623fdc4d9fd1124 100644
--- a/docs/stylesheets/extra.css
+++ b/docs/stylesheets/extra.css
@@ -1,9 +1,13 @@
-
 .md-header__button.md-logo :where(img,svg) {
     width: 100%;
     height: 30px;
 }
 
+/* .md-nav--primary .md-nav__title[for=__drawer] {
+    background-color: var(--md-default-fg-color--lightest);
+    color: var(--md-default-fg-color--light);
+} */
+
 .md-header, .md-header__inner {
     background-color: #fff;
     color: #2A4CDF;
@@ -66,4 +70,56 @@
 .home-grid h3 {
     margin-top: 0;
     font-weight: 700;
+}
+
+.image-container p {
+    width: 100%;
+    display: flex;
+    flex-direction: row;
+    flex-wrap: nowrap;
+    gap: 1em;
+    margin-top: 1em;
+    margin-bottom: 1em;
+    align-content: stretch
+}
+
+.image-container img {
+    flex-grow: 1;
+    width: 100%;
+}
+
+.image-container::after {
+    content: "";
+    display: table;
+    clear: both;
+}
+
+.screenshot {
+    /* border: 1px solid var(--md-primary-fg-color--dark); */
+    box-shadow: var(--md-shadow-z3);
+    margin: 1em;
+    max-width: calc(100% - 2em) !important;
+}
+
+.nomad-button {
+    display: inline-block;
+    /* font-size: 14px; */
+    font-weight: 700;
+    text-align: center;
+    white-space: nowrap;
+    vertical-align: middle;
+    cursor: pointer;
+    border-radius: 4px !important;
+    background-color: #2A4CDF;
+    color: #ffffff !important;
+    border: none !important;
+    margin: 0;
+
+    /* Additional styles from the original SCSS */
+    letter-spacing: normal;
+    text-transform: none;
+  }
+
+.nomad-button--card-action {
+    margin: 1em 0 0 0 !important;
 }
\ No newline at end of file
diff --git a/docs/tutorial/access_api.md b/docs/tutorial/access_api.md
index 140c28795fd6587b5d1502ead4b0e61b5cf1f8fe..eeb1411ca1dd31f8e6b8da4da2f4c4fe6c6467b0 100644
--- a/docs/tutorial/access_api.md
+++ b/docs/tutorial/access_api.md
@@ -1,3 +1,6 @@
+!!! warning "Attention"
+    We are currently working to update this content.
+
 This video tutorial explains the basics of API and shows how to do simple requests
 against the NOMAD API.
 
diff --git a/docs/tutorial/builtin.md b/docs/tutorial/builtin.md
deleted file mode 100644
index d0ff3d97ff0f06bb885954d14b2af9ac9f46206a..0000000000000000000000000000000000000000
--- a/docs/tutorial/builtin.md
+++ /dev/null
@@ -1,3 +0,0 @@
-!!! attention
-
-    This part of the documentation is still work in progress.
diff --git a/docs/tutorial/custom.md b/docs/tutorial/custom.md
index 964dcdaf97ff32c7c250a47a2e79be0c004ec120..bb7191db82323a381dfd7e3e6212f68161b1530d 100644
--- a/docs/tutorial/custom.md
+++ b/docs/tutorial/custom.md
@@ -1,6 +1,6 @@
 ## What is a custom schema
 
-!!! attention
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
 
@@ -24,22 +24,22 @@ definitions:
           shape: ['*']
 ```
 
-## The base sections
+### The base sections
 
-!!! attention
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
 
-## Use of YAML files
+### Use of YAML files
 
-!!! attention
+!!! warning "Attention"
 
     This part of the documentation is still work in progress.
 
-## The built-in tabular parser
+### The built-in tabular parser
 
-NOMAD provides a standard parser to import your data from a spreadsheet file (`Excel` file with .xlsx extension) or from a CSV file (a Comma-Separated Values file with .csv extension). There are several ways to parse a tabular data file into a structured [data file](../explanation/data.md#data), depending on which structure we want to give our data. Therefore, the tabular parser can be set very flexibly, directly from the [schema file](../explanation/data.md#schema) through [annotations](../schemas/elns.md#annotations).
-In this tutorial we will focus on most common modes of the tabular parser. A complete description of all modes is given in the [Reference](../reference/annotations.md#tabular_parser) section. You can also follow the dedicated [How To](../schemas/tabular.md) to see practical examples of the NOMAD tabular parser, in each section you can find a commented sample schema with a step-by-step guide on how to set it to obtain the desired final structure of your parsed data.
+NOMAD provides a standard parser to import your data from a spreadsheet file (`Excel` file with .xlsx extension) or from a CSV file (a Comma-Separated Values file with .csv extension). There are several ways to parse a tabular data file into a structured [data file](../explanation/data.md#data), depending on which structure we want to give our data. Therefore, the tabular parser can be set very flexibly, directly from the [schema file](../explanation/data.md#schema) through [annotations](../howto/customization/elns.md#annotations).
+In this tutorial we will focus on most common modes of the tabular parser. A complete description of all modes is given in the [Reference](../reference/annotations.md#tabular_parser) section. You can also follow the dedicated [How To](../howto/customization/tabular.md) to see practical examples of the NOMAD tabular parser, in each section you can find a commented sample schema with a step-by-step guide on how to set it to obtain the desired final structure of your parsed data.
 We will make use of the tabular parser in a custom yaml schema. To obtain some structured data in NOMAD with this parser:<br />
 
 1) the schema files should follow the NOMAD [archive files](../explanation/data.md#archive-files-a-shared-entry-structure) naming convention (i.e. `.archive.json` or `.archive.yaml` extension)<br />
@@ -47,34 +47,34 @@ We will make use of the tabular parser in a custom yaml schema. To obtain some s
 
    [comment]: <> (--> a link to the part upload etc should be inserted)
 
-3) a tabular data file must be dragged in the annotated [quantity](../schemas/basics.md#quantities) in order for NOMAD to parse it (the quantity is called `data_file` in the following examples)
+3) a tabular data file must be dragged in the annotated [quantity](../howto/customization/basics.md#quantities) in order for NOMAD to parse it (the quantity is called `data_file` in the following examples)
 
-### To be an Entry or not to be an Entry
+#### To be an Entry or not to be an Entry
 
 To use this parser, three kinds of annotation must be included in the schema: `tabular`, `tabular_parser`, `label_quantity`. Refer to the dedicated [Reference](../reference/annotations.md#tabular-data) section for the full list of options.
 
-!!! important
+!!! tip "important"
     The ranges of the three `mapping_options`, namely `file_mode`, `mapping_mode`, and `sections` can give rise to twelve different combinations (see table in [Reference](../reference/annotations.md#available-combinations)). It is worth to analyze each of them to understand which is the best choice to pursue from case to case.
     Some of them give rise to "not possible" data structures but are still listed for completeness, a brief explanation of why it is not possible to implement them is also provided.
     The main bring-home message is that a tabular data file can be parsed in one or more entries in NOMAD, giving rise to diverse and arbitrarily complex structures.
 
-In the following sections, two examples will be illustrated. A [tabular data file](../schemas/tabular.md#preparing-the-tabular-data-file) is parsed into one or more [data archive files](../explanation/data.md#data), their structure is based on a [schema archive file](../explanation/data.md#schema). NOMAD archive files are denoted as Entries.
+In the following sections, two examples will be illustrated. A [tabular data file](../howto/customization/tabular.md#preparing-the-tabular-data-file) is parsed into one or more [data archive files](../explanation/data.md#data), their structure is based on a [schema archive file](../explanation/data.md#schema). NOMAD archive files are denoted as Entries.
 
 !!! note
-    From the NOMAD point of view, a schema file and a data file are the same kind of file where different sections have been filled (see [archive files description](../explanation/data.md#archive-files-a-shared-entry-structure)). Specifically, a schema file has its `definitions` section filled while a data file will have its `data` section filled. See [How to write a schema](../schemas/basics.md#uploading-schemas) for a more complete description of an archive file.
+    From the NOMAD point of view, a schema file and a data file are the same kind of file where different sections have been filled (see [archive files description](../explanation/data.md#archive-files-a-shared-entry-structure)). Specifically, a schema file has its `definitions` section filled while a data file will have its `data` section filled. See [How to write a schema](../howto/customization/basics.md#uploading-schemas) for a more complete description of an archive file.
 
-### Example 1
+#### Example 1
 
 We want instantiate an object created from the schema already shown in the first [Tutorial section](#what-is-a-custom-schema) and populate it with the data contained in the following excel file.
 
 <p align="center" width="100%">
-    <img width="30%" src="../schemas/2col.png">
+    <img width="30%" src="../howto/customization/images/2col.png">
 </p>
 
-The two columns in the file will be stored in a NOMAD Entry archive within two array quantities, as shown in the image below. In the case where the section to be filled is not in the root level of our schema but nested inside, it is useful to check the dedicated [How-to](../schemas/tabular.md#2-column-mode-current-entry-parse-to-my-path).
+The two columns in the file will be stored in a NOMAD Entry archive within two array quantities, as shown in the image below. In the case where the section to be filled is not in the root level of our schema but nested inside, it is useful to check the dedicated [How-to](../howto/customization/tabular.md#2-column-mode-current-entry-parse-to-my-path).
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-1.png">
+    <img width="100%" src="images/tabular-1.png">
 </p>
 
 The schema will be decorated by the annotations mentioned at the beginning of this section  and will look like this:
@@ -120,19 +120,19 @@ definitions:
               name: "My header 2"
 ```
 
-Here the tabular data file is parsed by columns, directly within the Entry where the `TableData` is inherited and filling the quantities in the root level of the schema (see dedicated how-to to learn [how to inherit tabular parser in your schema](../schemas/tabular.md#inheriting-the-tabledata-base-section)).
+Here the tabular data file is parsed by columns, directly within the Entry where the `TableData` is inherited and filling the quantities in the root level of the schema (see dedicated how-to to learn [how to inherit tabular parser in your schema](../howto/customization/tabular.md#inheriting-the-tabledata-base-section)).
 
 !!! note
     In yaml files a dash character indicates a list element. `mapping_options` is a list because it is possible to parse multiple tabular sheets from the same schema with different parsing options. `sections` in turn is a list because multiple sections of the schema can be parsed with same parsing options.
 
-### Example 2
+#### Example 2
 
 <p align="center" width="100%">
-    <img width="100%" src="../tutorial/tabular-6.png">
+    <img width="100%" src="images/tabular-6.png">
 </p>
 
 In this example, each row of the tabular data file will be placed in a new Entry that is an instance of a class defined in the schema. This would make sense for, say, an inventory spreadsheet where each row can be a separate entity such as a sample, a substrate, etc.
-In this case, a manyfold of Entries will be generated based on the only class available in the schema. These Entries will not be bundled together by a parent Entry but just live in our NOMAD Upload as a spare list, to bundle them together it is useful to check the dedicated [How-to](../schemas/tabular.md#7-row-mode-multiple-new-entries-parse-to-my-path). They might still be referenced manually inside an overarching Entry, such as an experiment Entry, from the ELN with `ReferenceEditQuantity`.
+In this case, a manyfold of Entries will be generated based on the only class available in the schema. These Entries will not be bundled together by a parent Entry but just live in our NOMAD Upload as a spare list, to bundle them together it is useful to check the dedicated [How-to](../howto/customization/tabular.md#7-row-mode-multiple-new-entries-parse-to-my-path). They might still be referenced manually inside an overarching Entry, such as an experiment Entry, from the ELN with `ReferenceEditQuantity`.
 
 ```yaml
 definitions:
@@ -173,4 +173,47 @@ definitions:
           m_annotations:
             tabular:
               name: "My header 2"
+```
+
+!!! warning "Attention"
+    This part of the documentation is still work in progress.
+
+## Custom normalizers
+
+For custom schemas, you might want to add custom normalizers. All files are parsed
+and normalized when they are uploaded or changed. The NOMAD metainfo Python interface
+allows you to add functions that are called when your data is normalized.
+
+Here is an example:
+
+```python
+--8<-- "examples/archive/custom_schema.py"
+```
+
+To add a `normalize` function, your section has to inherit from `ArchiveSection` which
+provides the base for this functionality. Now you can overwrite the `normalize` function
+and add you own behavior. Make sure to call the `super` implementation properly to
+support schemas with multiple inheritance.
+
+If we parse an archive like this:
+
+```yaml
+--8<-- "examples/archive/custom_data.archive.yaml"
+```
+
+we will get a final normalized archive that contains our data like this:
+
+```json
+{
+  "data": {
+    "m_def": "examples.archive.custom_schema.SampleDatabase",
+    "samples": [
+      {
+        "added_date": "2022-06-18T00:00:00+00:00",
+        "formula": "NaCl",
+        "sample_id": "2022-06-18 00:00:00+00:00--NaCl"
+      }
+    ]
+  }
+}
 ```
\ No newline at end of file
diff --git a/docs/tutorial/explore.md b/docs/tutorial/explore.md
index 72ccb162ca8c5a0c778834cf8c366d473e3fd2f1..5affa36d4290bcaa898e79791fda71c6db5c6f4a 100644
--- a/docs/tutorial/explore.md
+++ b/docs/tutorial/explore.md
@@ -1,11 +1,14 @@
+!!! warning "Attention"
+    We are currently working to update this content.
+
 This tutorial shows how to use NOMAD's search interface and structured data browsing to explore available data.
 
 !!! note
     The NOMAD seen in the tutorials is an older version with a different color theme,
     but all the demonstrated functionality is still available on the current version.
     You'll find the NOMAD test installation mentioned in the first video
-    [here](https://nomad-lab.eu/prod/v1/test/gui/search/entries).
+    [here](https://nomad-lab.eu/prod/v1/test/gui/search/entries){:target="_blank"}.
 
 <div class="youtube">
 <iframe src="https://www.youtube-nocookie.com/embed/38S2U-TIvxE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
-</div>
+</div>
\ No newline at end of file
diff --git a/docs/tutorial/images/AI_toolkit.png b/docs/tutorial/images/AI_toolkit.png
new file mode 100644
index 0000000000000000000000000000000000000000..254eec7927f1f16232ca266b4262d4a250018ba5
Binary files /dev/null and b/docs/tutorial/images/AI_toolkit.png differ
diff --git a/docs/tutorial/images/api.png b/docs/tutorial/images/api.png
new file mode 100644
index 0000000000000000000000000000000000000000..0697c825c36c1cb230df096f328572058b8dd1bc
Binary files /dev/null and b/docs/tutorial/images/api.png differ
diff --git a/docs/tutorial/images/beta1.png b/docs/tutorial/images/beta1.png
new file mode 100644
index 0000000000000000000000000000000000000000..dc08adaed045d03c3655a01db84f8438bd78076c
Binary files /dev/null and b/docs/tutorial/images/beta1.png differ
diff --git a/docs/tutorial/images/beta2.png b/docs/tutorial/images/beta2.png
new file mode 100644
index 0000000000000000000000000000000000000000..65534535e8294349ba6d3dfed938b9215b20dcaf
Binary files /dev/null and b/docs/tutorial/images/beta2.png differ
diff --git a/docs/tutorial/images/explore.png b/docs/tutorial/images/explore.png
new file mode 100644
index 0000000000000000000000000000000000000000..5922ba1e47cc0140d823ed233de1b3c025435af9
Binary files /dev/null and b/docs/tutorial/images/explore.png differ
diff --git a/docs/tutorial/images/materials.png b/docs/tutorial/images/materials.png
new file mode 100644
index 0000000000000000000000000000000000000000..5774b8c1a36abe554c484c09da1b4e7b741c2c6c
Binary files /dev/null and b/docs/tutorial/images/materials.png differ
diff --git a/docs/tutorial/images/navigate_AI_toolkit.png b/docs/tutorial/images/navigate_AI_toolkit.png
new file mode 100644
index 0000000000000000000000000000000000000000..be7c9a14457fa8fb630c5475aadce5ffb1a22318
Binary files /dev/null and b/docs/tutorial/images/navigate_AI_toolkit.png differ
diff --git a/docs/tutorial/images/sc1_alternate_stack.png b/docs/tutorial/images/sc1_alternate_stack.png
new file mode 100644
index 0000000000000000000000000000000000000000..df2f13460d37af381cae5d7c04933eed36d93ccb
Binary files /dev/null and b/docs/tutorial/images/sc1_alternate_stack.png differ
diff --git a/docs/tutorial/images/sc1_fullquery_entrieslist.png b/docs/tutorial/images/sc1_fullquery_entrieslist.png
new file mode 100644
index 0000000000000000000000000000000000000000..68d3357858533654cf237f6642e6e37d39bec453
Binary files /dev/null and b/docs/tutorial/images/sc1_fullquery_entrieslist.png differ
diff --git a/docs/tutorial/images/sc1_fullquery_rawdata.png b/docs/tutorial/images/sc1_fullquery_rawdata.png
new file mode 100644
index 0000000000000000000000000000000000000000..5a4be1276551cc912c2afffe97d3554429fbf180
Binary files /dev/null and b/docs/tutorial/images/sc1_fullquery_rawdata.png differ
diff --git a/docs/tutorial/images/sc1_fullquery_sidepane.png b/docs/tutorial/images/sc1_fullquery_sidepane.png
new file mode 100644
index 0000000000000000000000000000000000000000..19ccf38faae7221110c66c5004d2f8edcf41f76f
Binary files /dev/null and b/docs/tutorial/images/sc1_fullquery_sidepane.png differ
diff --git a/docs/tutorial/images/sc1_gromacs_selected.png b/docs/tutorial/images/sc1_gromacs_selected.png
new file mode 100644
index 0000000000000000000000000000000000000000..391653d11ee553048fcd91fd57bf6f9054fde308
Binary files /dev/null and b/docs/tutorial/images/sc1_gromacs_selected.png differ
diff --git a/docs/tutorial/images/sc2_bandgap_filter.png b/docs/tutorial/images/sc2_bandgap_filter.png
new file mode 100644
index 0000000000000000000000000000000000000000..354a13325f797a1cdc73ee3b3a81623cc008f3ac
Binary files /dev/null and b/docs/tutorial/images/sc2_bandgap_filter.png differ
diff --git a/docs/tutorial/images/sc2_chip_Cquery.png b/docs/tutorial/images/sc2_chip_Cquery.png
new file mode 100644
index 0000000000000000000000000000000000000000..21729ff1359871062ffbfe1da08a9b20ebf6f31f
Binary files /dev/null and b/docs/tutorial/images/sc2_chip_Cquery.png differ
diff --git a/docs/tutorial/images/sc2_column_layout.png b/docs/tutorial/images/sc2_column_layout.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0480049f74c9740d1eefba82b812d3f2b2596df
Binary files /dev/null and b/docs/tutorial/images/sc2_column_layout.png differ
diff --git a/docs/tutorial/images/sc2_column_selection.png b/docs/tutorial/images/sc2_column_selection.png
new file mode 100644
index 0000000000000000000000000000000000000000..8089572865a274420cda7bd3a97dac3bc365d2ab
Binary files /dev/null and b/docs/tutorial/images/sc2_column_selection.png differ
diff --git a/docs/tutorial/images/sc2_composition_complete.png b/docs/tutorial/images/sc2_composition_complete.png
new file mode 100644
index 0000000000000000000000000000000000000000..048ebe4328b545df7a2f3dd75dfea8fad6f4712a
Binary files /dev/null and b/docs/tutorial/images/sc2_composition_complete.png differ
diff --git a/docs/tutorial/images/sc2_fullquery.png b/docs/tutorial/images/sc2_fullquery.png
new file mode 100644
index 0000000000000000000000000000000000000000..e5af69d97a4b5fba706cd00f25c5c0bcf8ae41a3
Binary files /dev/null and b/docs/tutorial/images/sc2_fullquery.png differ
diff --git a/docs/tutorial/images/sc2_fullquery_old.png b/docs/tutorial/images/sc2_fullquery_old.png
new file mode 100644
index 0000000000000000000000000000000000000000..400d498e03982871d786d485659ba5cf2d3086e5
Binary files /dev/null and b/docs/tutorial/images/sc2_fullquery_old.png differ
diff --git a/docs/tutorial/images/sc2_fullquery_selection.png b/docs/tutorial/images/sc2_fullquery_selection.png
new file mode 100644
index 0000000000000000000000000000000000000000..6f783b7db98dbbbe6fbabaf07a11008bc99ebcfe
Binary files /dev/null and b/docs/tutorial/images/sc2_fullquery_selection.png differ
diff --git a/docs/tutorial/images/sc2_fullquery_selection_old.png b/docs/tutorial/images/sc2_fullquery_selection_old.png
new file mode 100644
index 0000000000000000000000000000000000000000..709744dbde59b52024c1d6c5c5233a033dd99cda
Binary files /dev/null and b/docs/tutorial/images/sc2_fullquery_selection_old.png differ
diff --git a/docs/tutorial/images/sc2_method_complete.png b/docs/tutorial/images/sc2_method_complete.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e50a8cb87c63af7be53b9c8ff696f73ecf65ee7
Binary files /dev/null and b/docs/tutorial/images/sc2_method_complete.png differ
diff --git a/docs/tutorial/images/sc2_searchbar_Cquery.png b/docs/tutorial/images/sc2_searchbar_Cquery.png
new file mode 100644
index 0000000000000000000000000000000000000000..9e933df4a4630da27c0c1f97044ef33f2e1b0694
Binary files /dev/null and b/docs/tutorial/images/sc2_searchbar_Cquery.png differ
diff --git a/docs/tutorial/images/sc3_dashboard.png b/docs/tutorial/images/sc3_dashboard.png
new file mode 100644
index 0000000000000000000000000000000000000000..c305eefeefffa2f44c59acc1aa0235d8156d4b9b
Binary files /dev/null and b/docs/tutorial/images/sc3_dashboard.png differ
diff --git a/docs/tutorial/images/sc3_dashboard_2uploadtimes.png b/docs/tutorial/images/sc3_dashboard_2uploadtimes.png
new file mode 100644
index 0000000000000000000000000000000000000000..cc99841559d27b1f127e8482eab1fe47c815d191
Binary files /dev/null and b/docs/tutorial/images/sc3_dashboard_2uploadtimes.png differ
diff --git a/docs/tutorial/images/sc3_dashboard_uploadtime1.png b/docs/tutorial/images/sc3_dashboard_uploadtime1.png
new file mode 100644
index 0000000000000000000000000000000000000000..e710260e307de65ed6c3ac2d46201db5f36531f0
Binary files /dev/null and b/docs/tutorial/images/sc3_dashboard_uploadtime1.png differ
diff --git a/docs/tutorial/images/sc3_dashboard_uploadtime2.png b/docs/tutorial/images/sc3_dashboard_uploadtime2.png
new file mode 100644
index 0000000000000000000000000000000000000000..47660aefc815ef31894c868827ba72618fe44ac3
Binary files /dev/null and b/docs/tutorial/images/sc3_dashboard_uploadtime2.png differ
diff --git a/docs/tutorial/images/sc3_uploadtime_widget.png b/docs/tutorial/images/sc3_uploadtime_widget.png
new file mode 100644
index 0000000000000000000000000000000000000000..67750d3e881401dd93f05bb8d0e20665b841b1b3
Binary files /dev/null and b/docs/tutorial/images/sc3_uploadtime_widget.png differ
diff --git a/docs/tutorial/tabular-0.png b/docs/tutorial/images/tabular-0.png
similarity index 100%
rename from docs/tutorial/tabular-0.png
rename to docs/tutorial/images/tabular-0.png
diff --git a/docs/tutorial/tabular-1.png b/docs/tutorial/images/tabular-1.png
similarity index 100%
rename from docs/tutorial/tabular-1.png
rename to docs/tutorial/images/tabular-1.png
diff --git a/docs/tutorial/tabular-2.png b/docs/tutorial/images/tabular-2.png
similarity index 100%
rename from docs/tutorial/tabular-2.png
rename to docs/tutorial/images/tabular-2.png
diff --git a/docs/tutorial/tabular-3.png b/docs/tutorial/images/tabular-3.png
similarity index 100%
rename from docs/tutorial/tabular-3.png
rename to docs/tutorial/images/tabular-3.png
diff --git a/docs/tutorial/tabular-4.png b/docs/tutorial/images/tabular-4.png
similarity index 100%
rename from docs/tutorial/tabular-4.png
rename to docs/tutorial/images/tabular-4.png
diff --git a/docs/tutorial/tabular-5.png b/docs/tutorial/images/tabular-5.png
similarity index 100%
rename from docs/tutorial/tabular-5.png
rename to docs/tutorial/images/tabular-5.png
diff --git a/docs/tutorial/tabular-6.png b/docs/tutorial/images/tabular-6.png
similarity index 100%
rename from docs/tutorial/tabular-6.png
rename to docs/tutorial/images/tabular-6.png
diff --git a/docs/tutorial/tabular-7.png b/docs/tutorial/images/tabular-7.png
similarity index 100%
rename from docs/tutorial/tabular-7.png
rename to docs/tutorial/images/tabular-7.png
diff --git a/docs/tutorial/tabular-8.png b/docs/tutorial/images/tabular-8.png
similarity index 100%
rename from docs/tutorial/tabular-8.png
rename to docs/tutorial/images/tabular-8.png
diff --git a/docs/tutorial/nomad_repo.md b/docs/tutorial/nomad_repo.md
new file mode 100644
index 0000000000000000000000000000000000000000..16cd6e14b504c1e39baa6e791d7a92477aa29f2a
--- /dev/null
+++ b/docs/tutorial/nomad_repo.md
@@ -0,0 +1,15 @@
+# Navigating to the NOMAD repository
+
+There are several access points to the NOMAD repository.
+The general [_landing page_](https://nomad-lab.eu/nomad-lab/){:target="_blank"} will give you a quick rundown of NOMAD's usage and features, and provides several links to documentation, tutorials, and the history behind the project.
+
+From this page, we can navigate to the NOMAD repository, where we can upload, manage, and explore data.
+There are 2 public versions available:
+
+1. [stable](https://nomad-lab.eu/prod/v1/gui/search/entries){:target="_blank"}, which is accessed by clicking the "Open NOMAD" button at the top of the landing page (highlighted <span style="color:orange">orange</span> in images below).
+2. [beta /staging](https://nomad-lab.eu/prod/v1/staging/gui/search/entries){:target="_blank"}, which has the latest release and updates much more frequently, but may also harbor unstable or untested features. You can navigate to this version via two distinct links: 1. at the bottom-right corner of the landing page and 2. under "SOLUTIONS" > "NOMAD" > "Try and Test" in the top navigation menu (highlighted <span style="color:red">red</span> in images below).
+
+<div class="image-container" markdown="block">
+![NOMAD Beta at the bottom of the website](images/beta1.png){.screenshot}
+![NOMAD Beta inside solutions](images/beta2.png){.screenshot}
+</div>
diff --git a/docs/tutorial/plugins.md b/docs/tutorial/plugins.md
deleted file mode 100644
index ff9ce6b1fdefac1dd73830d8d8701d3b85ded7e9..0000000000000000000000000000000000000000
--- a/docs/tutorial/plugins.md
+++ /dev/null
@@ -1,45 +0,0 @@
-!!! attention
-
-    This part of the documentation is still work in progress.
-
-
-
-## Custom normalizers
-
-For custom schemas, you might want to add custom normalizers. All files are parsed
-and normalized when they are uploaded or changed. The NOMAD metainfo Python interface
-allows you to add functions that are called when your data is normalized.
-
-Here is an example:
-
-```python
---8<-- "examples/archive/custom_schema.py"
-```
-
-To add a `normalize` function, your section has to inherit from `ArchiveSection` which
-provides the base for this functionality. Now you can overwrite the `normalize` function
-and add you own behavior. Make sure to call the `super` implementation properly to
-support schemas with multiple inheritance.
-
-If we parse an archive like this:
-
-```yaml
---8<-- "examples/archive/custom_data.archive.yaml"
-```
-
-we will get a final normalized archive that contains our data like this:
-
-```json
-{
-  "data": {
-    "m_def": "examples.archive.custom_schema.SampleDatabase",
-    "samples": [
-      {
-        "added_date": "2022-06-18T00:00:00+00:00",
-        "formula": "NaCl",
-        "sample_id": "2022-06-18 00:00:00+00:00--NaCl"
-      }
-    ]
-  }
-}
-```
\ No newline at end of file
diff --git a/docs/tutorial/upload_publish.md b/docs/tutorial/upload_publish.md
index 1d71de3d906e4568131d628bd492e3b5fbabd17c..69bc0e35c47f17dfc1e7d9bc08c0215e2b1d2a4d 100644
--- a/docs/tutorial/upload_publish.md
+++ b/docs/tutorial/upload_publish.md
@@ -1,3 +1,6 @@
+!!! warning "Attention"
+    We are currently working to update this content.
+
 This tutorial guides you through the basics of going from files on your computer
 to a published dataset with DOI.
 
@@ -10,7 +13,7 @@ We will perform these steps with NOMAD's graphical user interface and its APIs.
     The NOMAD seen in the tutorials is an older version with a different color theme,
     but all the demonstrated functionality is still available on the current version.
     You'll find the NOMAD test installation mentioned in the first video
-    [here](https://nomad-lab.eu/prod/v1/test/gui/search/entries).
+    [here](https://nomad-lab.eu/prod/v1/test/gui/search/entries){:target="_blank"}.
 
 <div class="youtube">
 <iframe src="https://www.youtube-nocookie.com/embed/3rVvfYoUbO0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
diff --git a/docs/writing_guide.md b/docs/writing_guide.md
new file mode 100644
index 0000000000000000000000000000000000000000..976b30ae071d2104d226b7a5a8f070d219288490
--- /dev/null
+++ b/docs/writing_guide.md
@@ -0,0 +1,27 @@
+# Writing Guide
+
+This is a guide for best practices when contributing to the NOMAD documentation.
+
+## Images and Data
+
+All assets specific to an individual markdown file should be stored within an immediate sub-directory of the file, labeled accordingly. Please use `images/` and `data/` for the image and data files, respectively.
+
+## Sections Hierarchy
+
+single "#" sections should only be used at the beginning of the md file
+
+## External Links
+
+Use [](){:target="_blank"} for external links to open a new browser window.
+
+## Admonitions
+
+Here is a list of currently used admonitions within the docs:
+
+- !!! warning "Attention"
+
+- !!! note
+
+- !!! tip
+
+- !!! tip "Important"
\ No newline at end of file
diff --git a/gui/src/config.js b/gui/src/config.js
index 78f1a06539215e706f88182d8997ae966cc7266f..9219bc27b62552e78c600f033589f0c1807d4348 100644
--- a/gui/src/config.js
+++ b/gui/src/config.js
@@ -146,7 +146,7 @@ export const nomadTheme = createTheme({
   },
   overrides: {
     // This is used to inject global css styles through the CssBaseline
-    // component, see: https://v4.mui.com/customization/globals/#global-css
+    // component, see: https://v4.mui.com/howto/customization/globals/#global-css
     MuiCssBaseline: {
       '@global': {
         '.react-grid-item.react-grid-placeholder': {
diff --git a/mkdocs.yml b/mkdocs.yml
index 0665e1b23199e7abdf6eb825d352be985e68e410..a40b4da58be2b914c4c9d01d6fc90dbb5e36074f 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -2,55 +2,59 @@ site_name: Documentation
 site_description: |
   The documentation for NOMAD v1.
 site_author: The NOMAD Authors
+repo_url: https://github.com/nomad-coe/nomad
+edit_uri: ''
 nav:
   - Home: index.md
   - Tutorial:
+    - Navigating to NOMAD: tutorial/nomad_repo.md
     - Uploading and publishing data: tutorial/upload_publish.md
-    - Exploring data on: tutorial/explore.md
+    - Exploring data: tutorial/explore.md
     - Access data via API: tutorial/access_api.md
-    - Built in schemas: tutorial/builtin.md
-    - Schemas: tutorial/custom.md
-    - Plugins: tutorial/plugins.md
-    - Third-party integration: tutorial/third_party.md
+    - Schemas and plugins: tutorial/custom.md
   - How-to guides:
-    - Data Management:
-      - How to upload/publish data for supported formats: data/upload.md
-      - How to use ELNs: data/eln.md
-      - How to explore data: data/explore.md
-      - How to use NORTH: data/north.md
-    - Customize Schemas:
-      - How to write a schema: schemas/basics.md
-      - How to define ELNs: schemas/elns.md
-      - How to use base sections: schemas/base_sections.md
-      - How to use tabular parser: schemas/tabular.md
-      - How to define workflows: schemas/workflows.md
-      - How to reference hdf5: schemas/hdf5.md
-    - Programming interfaces:
-      - How to use the API: apis/api.md
-      - How to install nomad-lab: apis/pythonlib.md
-      - How to access processed data: apis/archive_query.md
-      - How to run a parser: apis/local_parsers.md
-    - Plugins:
-      - How to develop, publish, and install plugins: plugins/plugins.md
-      - How to write schema plugins: plugins/schemas.md
-      - How to write parser plugins: plugins/parsers.md
+    - Overview: howto/overview.md
+    - Manage and find data:
+      - Upload and publish data for supported formats: howto/manage/upload.md
+      - Use ELNs: howto/manage/eln.md
+      - Explore data: howto/manage/explore.md
+      - Use NORTH: howto/manage/north.md
+    - Programmatic use:
+      - Use the API: howto/programmatic/api.md  # TODO separate into How-to and Explanation/Reference
+      - Publish data using python: howto/programmatic/publish_python.md
+      - Install nomad-lab: howto/programmatic/pythonlib.md
+      - Access processed data: howto/programmatic/archive_query.md
+      - Run a parser: howto/programmatic/local_parsers.md
+    - NOMAD Oasis:
+      - Install an Oasis: howto/oasis/install.md
+      - Customize an Oasis: howto/oasis/customize.md
+      - Install plugins: howto/oasis/plugins_install.md
+      - Configure custom apps: howto/oasis/apps.md
+      - Migrate Oasis versions: howto/oasis/migrate.md
+      - Perform admin tasks: howto/oasis/admin.md
+    - Customization:
+      - Write a schema: howto/customization/basics.md
+      - Define ELNs: howto/customization/elns.md
+      - Use base sections: howto/customization/base_sections.md
+      - Use tabular parser: howto/customization/tabular.md
+      - Define workflows: howto/customization/workflows.md
+      - Reference hdf5 files: howto/customization/hdf5.md
+      - Develop and publish plugins: howto/customization/plugins_dev.md
+      - Write a parser: howto/customization/parsers.md
+      - Write a normalizer: howto/customization/normalizers.md
     - Development:
-      - How to get started: develop/setup.md
-      - How to navigate the code: develop/code.md
-      - How to contribute: develop/contrib.md
-      - Code guidelines: develop/guides.md
-      - How to extend the search: develop/search.md
-      - How to write a parser: develop/parsers.md
-      - How to write a normalizer: develop/normalizers.md
-    - Oasis:
-      - How to install an Oasis: oasis/install.md
-      - How to customize an Oasis: oasis/customize.md
-      - How to configure custom apps: oasis/apps.md
-      - How to migrate Oasis versions: oasis/migrate.md
-      - Administrative tasks: oasis/admin.md
+      - Get started: howto/develop/setup.md
+      - Navigate the code: howto/develop/code.md
+      - Contribute: howto/develop/contrib.md
+      - Extend the search: howto/develop/search.md
+  - Domain-specific examples:
+    - Overview: examples/overview.md
+    - Computational data:
+      - Quick start: examples/computational_data/uploading.md
+      - Workflows: examples/computational_data/workflows.md
   - Explanation:
     - From files to data: explanation/basics.md
-    - Structured data: explanation/data.md
+    - Data structure: explanation/data.md
     - Processing: explanation/processing.md
     - Architecture: explanation/architecture.md
     - Why you need an Oasis: explanation/oasis.md
@@ -60,7 +64,9 @@ nav:
     - reference/cli.md
     - reference/plugins.md
     - reference/parsers.md
+    - reference/code_guidelines.md
     - reference/glossary.md
+    - reference/tutorials.md
 theme:
   name: material
   palette:
@@ -72,6 +78,8 @@ theme:
   favicon: assets/favicon.png
   features:
     - navigation.instant
+  icon:
+    repo: fontawesome/brands/github
   custom_dir: docs/theme
 # repo_url: https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/
 markdown_extensions:
@@ -83,14 +91,25 @@ markdown_extensions:
       anchor_linenums: true
   - pymdownx.inlinehilite
   - pymdownx.snippets
-  - pymdownx.superfences
+  - mkdocs-click
+  - pymdownx.extra
   - toc:
       permalink: True
   - pymdownx.arithmatex:
       generic: true
-  - attr_list
-  - mkdocs-click
-  - pymdownx.extra
+  - pymdownx.emoji: # TODO make sure this isn't used!!
+      # below 2 lines works for mkdocs-material<=9.3, but they produce a deprecated warning
+      emoji_index: !!python/name:materialx.emoji.twemoji
+      emoji_generator: !!python/name:materialx.emoji.to_svg
+      # below 2 lines are supposed to work for mkdocs-material>=9.4
+      # emoji_index: !!python/name:material.extensions.emoji.twemoji
+      # emoji_generator: !!python/name:material.extensions.emoji.to_svg
+  - pymdownx.superfences:
+      custom_fences:
+        - name: mermaid
+          class: mermaid
+          format: !!python/name:pymdownx.superfences.fence_code_format
+
 extra:
   generator: false
   homepage: https://nomad-lab.eu
@@ -101,10 +120,14 @@ plugins:
         module_name: nomad/mkdocs
     - redirects:
         redirect_maps:
-          'plugins.md': 'plugins/plugins.md'
-          'pythonlib.md': 'apis/pythonlib.md'
-          'oasis.md': 'oasis/install.md'
-          'develop/gitlab.md': 'develop/contrib.md'
+          'pythonlib.md': 'howto/programmatic/pythonlib.md'
+          'oasis.md': 'howto/oasis/install.md'
+          'develop/gitlab.md': 'howto/develop/contrib.md'
+    - glightbox
+    # - git-revision-date-localized
+    # TODO Fix error in pipeline when this plugin is included
+    # (with 'mkdocs-git-revision-date-localized-plugin==1.2.1' in pyproject.toml)
+    # see pipeline error here: https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR/-/jobs/2473217
 extra_css:
   - stylesheets/extra.css
 extra_javascript:
diff --git a/pyproject.toml b/pyproject.toml
index dc7fabbc2723372e24b076a9e6705d5e4452ccf5..c491655ce1268600476d5439282797b6df8905b2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -132,16 +132,19 @@ dev = [
     'twine==3.4.2',
     'python-gitlab==2.10.1',
     'devtools==0.8.0',
-    'mkdocs==1.2.3',
-    'mkdocs-material==8.1.1',
-    'mkdocs-material-extensions==1.0.3',
+    'mkdocs==1.3.0',
+    'mkdocs-material==8.2.8',
+    'mkdocs-material-extensions==1.3.1',
     'mkdocs-macros-plugin==0.6.3',
+    'mkdocs-glightbox==0.3.5',
     'aiosmtpd',
     'mkdocs-click==0.8.0',
     'mkdocs-redirects==1.2.0',
+    'mkdocs-git-revision-date-localized-plugin==1.2.1',
     'ruff==0.1.4'
 ]
 
+
 [project.scripts]
 nomad = "nomad.cli:run_cli"