diff --git a/nomad/app/dcat/mapping.py b/nomad/app/dcat/mapping.py
index 26c47495853e7237176759dbb0dcb6b0dd13a70b..d22399a7628fba69adf3d0309b1b1cf1f63072dc 100644
--- a/nomad/app/dcat/mapping.py
+++ b/nomad/app/dcat/mapping.py
@@ -118,8 +118,9 @@ class Mapping:
                 dataset,
                 DCAT.landingPage,
                 URIRef(
-                    '%s/entry/id/%s/%s'
-                    % (config.gui_url(), entry['upload_id'], entry['entry_id'])
+                    '{}/entry/id/{}/{}'.format(
+                        config.gui_url(), entry['upload_id'], entry['entry_id']
+                    )
                 ),
             )
         )
@@ -175,7 +176,7 @@ class Mapping:
         self.g.add((person, FOAF.givenName, Literal(user.first_name)))
         self.g.add((person, FOAF.familyName, Literal(user.last_name)))
         self.g.add((person, FOAF.nick, Literal(user.username)))
-        self.g.add((person, FOAF.mbox, URIRef('mailto:%s' % (user.email))))
+        self.g.add((person, FOAF.mbox, URIRef(f'mailto:{user.email}')))
 
         self.persons[user.user_id] = person
 
diff --git a/nomad/app/optimade/__init__.py b/nomad/app/optimade/__init__.py
index c10f5c65ca70489080833e924e65d579edf26078..c9e5f0ad5c5efc340629c4f6e0a09149da81a57e 100644
--- a/nomad/app/optimade/__init__.py
+++ b/nomad/app/optimade/__init__.py
@@ -104,7 +104,7 @@ from nomad import utils  # nopep8
 from nomad.config import config
 from optimade.server.config import CONFIG  # nopep8
 
-CONFIG.root_path = '%s/optimade' % config.services.api_base_path
+CONFIG.root_path = f'{config.services.api_base_path}/optimade'
 CONFIG.base_url = '{}://{}'.format(
     'https' if config.services.https else 'http',
     config.services.api_host.strip('/'),
diff --git a/nomad/app/optimade/elasticsearch.py b/nomad/app/optimade/elasticsearch.py
index 6c8836638faacb847ab7a83f7c0cabfcc3baca12..910306b4199e4808c0f3d4268f8ab3deb4721c42 100644
--- a/nomad/app/optimade/elasticsearch.py
+++ b/nomad/app/optimade/elasticsearch.py
@@ -247,10 +247,10 @@ class StructureCollection(EntryCollection):
         sort, order = criteria.get('sort', (('chemical_formula_reduced', 1),))[0]
         sort_quantity = datamodel.OptimadeEntry.m_def.all_quantities.get(sort, None)
         if sort_quantity is None:
-            raise BadRequest(detail='Unable to sort on field %s' % sort)
+            raise BadRequest(detail=f'Unable to sort on field {sort}')
         sort_quantity_a_optimade = sort_quantity.m_get_annotations('optimade')
         if not sort_quantity_a_optimade.sortable:
-            raise BadRequest(detail='Unable to sort on field %s' % sort)
+            raise BadRequest(detail=f'Unable to sort on field {sort}')
 
         search_query = self._base_search_query()
 
diff --git a/nomad/app/optimade/filterparser.py b/nomad/app/optimade/filterparser.py
index 75cec2fd210ebd7665df4007a270362419ca8e84..c24b6c540071f11b06f95e60e5aa484a0e5807c5 100644
--- a/nomad/app/optimade/filterparser.py
+++ b/nomad/app/optimade/filterparser.py
@@ -45,7 +45,7 @@ def _get_transformer(without_prefix, **kwargs):
     quantities: dict[str, Quantity] = {
         q.name: Quantity(
             q.name,
-            backend_field='optimade.%s' % q.name,
+            backend_field=f'optimade.{q.name}',
             elastic_mapping_type=q.a_elasticsearch.mapping['type'],
         )
         for q in OptimadeEntry.m_def.all_quantities.values()
@@ -101,12 +101,12 @@ def parse_filter(filter_str: str, without_prefix=False) -> Q:
     try:
         parse_tree = _parser.parse(filter_str)
     except Exception as e:
-        raise FilterException('Syntax error: %s' % str(e))
+        raise FilterException(f'Syntax error: {str(e)}')
 
     try:
         query = transformer.transform(parse_tree)
     except Exception as e:
-        raise FilterException('Semantic error: %s' % str(e))
+        raise FilterException(f'Semantic error: {str(e)}')
 
     return query
 
@@ -155,7 +155,7 @@ class ElasticTransformer(OPTElasticTransformer):
             quantity = quantities[0]
 
             if quantity.length_quantity is None:
-                raise Exception('HAS ONLY is not supported by %s' % quantity.name)
+                raise Exception(f'HAS ONLY is not supported by {quantity.name}')
 
             has_all = super()._has_query_op(quantities, 'HAS ALL', predicate_zip_list)
             has_length = Q(
diff --git a/nomad/app/v1/models/graph/utils.py b/nomad/app/v1/models/graph/utils.py
index f7d59b63dab1452064ffb718be455eb8ce7189fd..6d420a6e1002c51bca54bd55ccbb815ddac4c34d 100644
--- a/nomad/app/v1/models/graph/utils.py
+++ b/nomad/app/v1/models/graph/utils.py
@@ -223,7 +223,7 @@ def _get_response_type(type_hint: Any, ns: ModelNamespace) -> Any:
     raise NotImplementedError(type_hint)
 
 
-ModelNamespace = dict[str, Union[type[BaseModel], ForwardRef]]
+ModelNamespace = dict[str, type[BaseModel] | ForwardRef]
 
 
 def _generate_model(
@@ -297,7 +297,7 @@ def _generate_model(
 
         if field_name == 'm_errors':
             if suffix == response_suffix:
-                fields[field_name] = (Optional[Union[type_hint]], None)  # type: ignore
+                fields[field_name] = (Optional[type_hint], None)  # type: ignore
             continue
 
         if field_name.startswith('m_') and field_name not in ['m_def', 'm_def_id']:
diff --git a/nomad/app/v1/models/models.py b/nomad/app/v1/models/models.py
index 8921dfe90b2e28e0cac42be4eaca444e102b43bb..4513d7b03e1f3d136364f408db12446779825e7e 100644
--- a/nomad/app/v1/models/models.py
+++ b/nomad/app/v1/models/models.py
@@ -20,7 +20,7 @@ import enum
 import fnmatch
 import json
 import re
-from typing import Any, Dict, List, Optional, Union
+from typing import Any
 from collections.abc import Mapping
 
 import pydantic
@@ -56,8 +56,8 @@ from typing import Annotated
 User: Any = datamodel.User.m_def.a_pydantic.model
 # It is important that datetime.datetime comes last. Otherwise, number valued strings
 # are interpreted as epoch dates by pydantic
-Value = Union[StrictInt, StrictFloat, StrictBool, str, datetime.datetime]
-ComparableValue = Union[StrictInt, StrictFloat, str, datetime.datetime]
+Value = StrictInt | StrictFloat | StrictBool | str | datetime.datetime
+ComparableValue = StrictInt | StrictFloat | str | datetime.datetime
 
 
 owner_documentation = strip(
@@ -188,7 +188,7 @@ ops = {
     'any': Any_,
 }
 
-CriteriaValue = Union[Value, list[Value], Range, Any_, All, None_, dict[str, Any]]
+CriteriaValue = Value | list[Value] | Range | Any_ | All | None_ | dict[str, Any]
 
 
 class LogicalOperator(NoneEmptyBaseModel):
@@ -267,7 +267,7 @@ class Empty(BaseModel):
     pass
 
 
-Query = Union[And, Or, Not, Nested, Criteria, Empty, Mapping[str, CriteriaValue]]
+Query = And | Or | Not | Nested | Criteria | Empty | Mapping[str, CriteriaValue]
 
 
 And.model_rebuild()
@@ -557,8 +557,7 @@ class QueryParameters:
                         detail=[
                             {
                                 'loc': ['query', key],
-                                'msg': 'operator %s does not support multiple values'
-                                % op,
+                                'msg': f'operator {op} does not support multiple values',
                             }
                         ],
                     )
@@ -567,7 +566,7 @@ class QueryParameters:
                 raise HTTPException(
                     422,
                     detail=[
-                        {'loc': ['query', key], 'msg': 'operator %s is unknown' % op}
+                        {'loc': ['query', key], 'msg': f'operator {op} is unknown'}
                     ],
                 )
 
@@ -1230,13 +1229,13 @@ for quantity in datamodel.EditableUserMetadata.m_def.definitions:
             quantity.type if quantity.type in (str, int, float, bool) else str
         )
     else:
-        pydantic_type = Union[str, list[str], MetadataEditListAction]
+        pydantic_type = str | list[str] | MetadataEditListAction
     if getattr(quantity, 'a_auth_level', None) == datamodel.AuthLevel.admin:
         description = '**NOTE:** Only editable by admin user'
     else:
         description = None
     _metadata_edit_actions_fields[quantity.name] = (
-        Optional[pydantic_type],
+        pydantic_type | None,
         Field(None, description=description),
     )
 
@@ -1346,7 +1345,7 @@ class Files(BaseModel):
             return re.compile(re_pattern)
         except re.error as e:
             raise PydanticCustomError(
-                'invalid_pattern', 'could not parse the re pattern: %s' % e
+                'invalid_pattern', f'could not parse the re pattern: {e}'
             )
 
     @model_validator(mode='after')
diff --git a/nomad/app/v1/routers/auth.py b/nomad/app/v1/routers/auth.py
index 318aa5682fdb23d6b0f7ed513f51b453eead8a01..8cf79a9df7e5f22536959550c6dca67ff4a011f5 100644
--- a/nomad/app/v1/routers/auth.py
+++ b/nomad/app/v1/routers/auth.py
@@ -463,7 +463,4 @@ def generate_upload_token(user):
         bytes(config.services.api_secret, 'utf-8'), msg=payload, digestmod=hashlib.sha1
     )
 
-    return '{}.{}'.format(
-        utils.base64_encode(payload),
-        utils.base64_encode(signature.digest()),
-    )
+    return f'{utils.base64_encode(payload)}.{utils.base64_encode(signature.digest())}'
diff --git a/nomad/app/v1/routers/datasets.py b/nomad/app/v1/routers/datasets.py
index 6cecffb685f91c813fdf57e6ca4b4223186f9ef8..c4850b0d29b59b4b23576a740281691fe63e7488 100644
--- a/nomad/app/v1/routers/datasets.py
+++ b/nomad/app/v1/routers/datasets.py
@@ -300,7 +300,7 @@ async def get_datasets(
         doi=doi,
     )
     if prefix is not None and prefix != '':
-        query_params.update(dataset_name=re.compile('^%s.*' % prefix, re.IGNORECASE))  # type: ignore
+        query_params.update(dataset_name=re.compile(f'^{prefix}.*', re.IGNORECASE))  # type: ignore
     query_params = {k: v for k, v in query_params.items() if v is not None}
 
     mongodb_query = pagination.order_result(mongodb_objects(**query_params))
@@ -554,7 +554,7 @@ async def assign_doi(
             detail=_dataset_has_unpublished_contents[1]['description'],
         )
 
-    doi = DOI.create(title='NOMAD dataset: %s' % dataset.dataset_name, user=user)
+    doi = DOI.create(title=f'NOMAD dataset: {dataset.dataset_name}', user=user)
 
     try:
         doi.create_draft()
diff --git a/nomad/app/v1/routers/entries.py b/nomad/app/v1/routers/entries.py
index 0f7abd0f340e1a1bf517708f159f6fc5d2fab679..5e1836894cffe5c628a433c56d36bb0f15c172df 100644
--- a/nomad/app/v1/routers/entries.py
+++ b/nomad/app/v1/routers/entries.py
@@ -18,7 +18,7 @@
 from datetime import datetime
 
 from enum import Enum
-from typing import Annotated, Optional, Set, Union, Dict, Any, List, Type
+from typing import Any
 from collections.abc import Iterator
 from fastapi import (
     APIRouter,
@@ -197,7 +197,7 @@ replace the references:
 )
 
 
-ArchiveRequired = Union[str, dict[str, Any]]
+ArchiveRequired = str | dict[str, Any]
 
 _archive_required_field = Body(
     '*',
@@ -291,9 +291,9 @@ EntryMetadataEditActions = create_model(
     'EntryMetadataEditActions',
     **{  # type: ignore
         quantity.name: (
-            Optional[EntryMetadataEditActionField]
+            EntryMetadataEditActionField | None
             if quantity.is_scalar
-            else Optional[list[EntryMetadataEditActionField]],
+            else list[EntryMetadataEditActionField] | None,
             None,
         )
         for quantity in EditableUserMetadata.m_def.definitions
@@ -705,8 +705,10 @@ def _answer_entries_raw_request(owner: Owner, query: Query, files: Files, user:
     if response.pagination.total > config.services.max_entry_download:
         raise HTTPException(
             status.HTTP_400_BAD_REQUEST,
-            detail='The limit of maximum number of entries in a single download (%d) has been exeeded (%d).'
-            % (config.services.max_entry_download, response.pagination.total),
+            detail=(
+                f'The limit of maximum number of entries in a single download '
+                f'({config.services.max_entry_download}) has been exceeded ({response.pagination.total}).'
+            ),
         )
 
     files_params = Files() if files is None else files
@@ -1741,7 +1743,7 @@ async def post_entry_metadata_edit(
             if quantity is None:
                 raise HTTPException(
                     status.HTTP_400_BAD_REQUEST,
-                    detail='Unknown quantity %s' % action_quantity_name,
+                    detail=f'Unknown quantity {action_quantity_name}',
                 )
 
             # TODO this does not work. Because the quantities are not in EditableUserMetadata
@@ -1751,13 +1753,13 @@ async def post_entry_metadata_edit(
                 if not user.is_admin():
                     raise HTTPException(
                         status.HTTP_400_BAD_REQUEST,
-                        detail='Only the admin user can set %s' % quantity.name,
+                        detail=f'Only the admin user can set {quantity.name}',
                     )
 
             if isinstance(quantity_actions, list) == quantity.is_scalar:
                 raise HTTPException(
                     status.HTTP_400_BAD_REQUEST,
-                    detail='Wrong shape for quantity %s' % action_quantity_name,
+                    detail=f'Wrong shape for quantity {action_quantity_name}',
                 )
 
             if not isinstance(quantity_actions, list):
@@ -1866,8 +1868,7 @@ async def post_entry_metadata_edit(
                 if doi_ds is not None and not user.is_admin:
                     data.success = False
                     data.message = (data.message if data.message else '') + (
-                        'Edit would remove entries from a dataset with DOI (%s) '
-                        % doi_ds.dataset_name
+                        f'Edit would remove entries from a dataset with DOI ({doi_ds.dataset_name}) '
                     )
                     has_error = True
 
diff --git a/nomad/app/v1/routers/materials.py b/nomad/app/v1/routers/materials.py
index 05793772d1a5263098063063149cf1e28923d5ba..579342c5fb1e8e0ec7a6fd89f6642fc8c562300c 100644
--- a/nomad/app/v1/routers/materials.py
+++ b/nomad/app/v1/routers/materials.py
@@ -95,7 +95,7 @@ def perform_search(*args, **kwargs) -> MetadataResponse:
     except SearchError as e:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
-            detail='Elasticsearch could not process your query: %s' % str(e),
+            detail=f'Elasticsearch could not process your query: {str(e)}',
         )
 
 
diff --git a/nomad/app/v1/routers/users.py b/nomad/app/v1/routers/users.py
index e5f92534f4312dd824ceb7b489a62e172f43f5a8..3b47845604a862381f4434284b4f7d06a485b952 100644
--- a/nomad/app/v1/routers/users.py
+++ b/nomad/app/v1/routers/users.py
@@ -198,7 +198,7 @@ async def invite_user(
     except Exception as e:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
-            detail='Invalid user data: %s' % str(e),
+            detail=f'Invalid user data: {str(e)}',
         )
 
     if user.email is None:
@@ -212,13 +212,13 @@ async def invite_user(
     except KeyError as e:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
-            detail='Invalid user data: %s' % str(e),
+            detail=f'Invalid user data: {str(e)}',
         )
 
     if error is not None:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
-            detail='Could not invite user: %s' % str(error),
+            detail=f'Could not invite user: {str(error)}',
         )
 
     return datamodel.User.get(username=user.username)
diff --git a/nomad/app/v1/utils.py b/nomad/app/v1/utils.py
index 319958b76750f4ce4e5398b4e1986551a5495dad..e889fba0858ff8bd17cc88005a2758e13ef16ba5 100644
--- a/nomad/app/v1/utils.py
+++ b/nomad/app/v1/utils.py
@@ -62,17 +62,16 @@ def parameter_dependency_from_model(
 
     code = inspect.cleandoc(
         """
-    def %s(%s):
+    def {}({}):
         try:
-            return %s(%s)
+            return {}({})
         except ValidationError as e:
             errors = e.errors()
             for error in errors:
                 error['loc'] = ['query'] + list(error['loc'])
             raise HTTPException(422, detail=errors)
 
-    """
-        % (
+    """.format(
             name,
             ', '.join(names),
             model_cls.__name__,  # type: ignore
diff --git a/nomad/archive/converter.py b/nomad/archive/converter.py
index 2f581b032bde61db701ec18dd621fc9688ffae02..b72233f3ffdf3053471751b669d3cd11ca181780 100644
--- a/nomad/archive/converter.py
+++ b/nomad/archive/converter.py
@@ -274,7 +274,7 @@ def convert_upload(
         force_repack (bool, optional): Force repacking the existing archive (in new format). Defaults to False.
         size_limit (int, optional): Size limit in GB for the archive. Defaults to -1 (no limit).
     """
-    if isinstance(uploads, (str, Upload)):
+    if isinstance(uploads, str | Upload):
         uploads = [uploads]
 
     if not uploads:
diff --git a/nomad/archive/query.py b/nomad/archive/query.py
index 822392656dbb0ca330da8a34ad6b3f52ff774e61..05bbc2700eade759945cfa1312fc3c1917332784 100644
--- a/nomad/archive/query.py
+++ b/nomad/archive/query.py
@@ -64,7 +64,7 @@ def _extract_child(archive_item, prop, index) -> dict | list:
     archive_child = archive_item[prop]
     from .storage_v2 import ArchiveList as ArchiveListNew
 
-    is_list = isinstance(archive_child, (ArchiveListNew, ArchiveList, list))
+    is_list = isinstance(archive_child, ArchiveListNew | ArchiveList | list)
 
     if index is None and is_list:
         index = (0, None)
@@ -132,7 +132,7 @@ def query_archive(
 
     if isinstance(f_or_archive_reader, ArchiveReader):
         return _load_data(query_dict, f_or_archive_reader)
-    elif isinstance(f_or_archive_reader, (BytesIO, str)):
+    elif isinstance(f_or_archive_reader, BytesIO | str):
         with read_archive(f_or_archive_reader, **kwargs) as archive:
             return _load_data(query_dict, archive)
 
@@ -198,14 +198,14 @@ def filter_archive(
             # TODO
             raise ArchiveQueryError('key wildcards not yet implemented')
         else:
-            raise ArchiveQueryError('invalid key format: %s' % key)
+            raise ArchiveQueryError(f'invalid key format: {key}')
 
         try:
             archive_child = _extract_child(archive_item, key, index)
 
             from .storage_v2 import ArchiveList as ArchiveListNew
 
-            if isinstance(archive_child, (ArchiveListNew, ArchiveList, list)):
+            if isinstance(archive_child, ArchiveListNew | ArchiveList | list):
                 result[key] = [
                     filter_archive(val, item, transform=transform)
                     for item in archive_child
diff --git a/nomad/archive/required.py b/nomad/archive/required.py
index f3ef389c4549f54879ca4a05bcc098164b1a9efe..6a08652900df662c92b9eb958cf3c29d149e938b 100644
--- a/nomad/archive/required.py
+++ b/nomad/archive/required.py
@@ -329,7 +329,7 @@ class RequiredReader:
             try:
                 result[prop] = (
                     [handle_item(item) for item in value]
-                    if isinstance(value, (list, ArchiveList, ArchiveListNew))
+                    if isinstance(value, list | ArchiveList | ArchiveListNew)
                     else handle_item(value)
                 )
             except ArchiveError as e:
@@ -552,7 +552,7 @@ class RequiredReader:
         if isinstance(archive_item, ArchiveDict):
             archive_item = to_json(archive_item)
 
-        if isinstance(archive_item, (dict, NewArchiveDict)) and 'm_def' in archive_item:
+        if isinstance(archive_item, dict | NewArchiveDict) and 'm_def' in archive_item:
             dataset = dataset.replace(
                 definition=self._resolve_definition(
                     dataset.upload_id,
@@ -606,7 +606,7 @@ class RequiredReader:
 
                 from .storage_v2 import ArchiveList as ArchiveListNew
 
-                if isinstance(archive_child, (ArchiveListNew, ArchiveList, list)):
+                if isinstance(archive_child, ArchiveListNew | ArchiveList | list):
                     result[prop] = [
                         self._apply_required(
                             val, item, dataset.replace(definition=prop_def)
diff --git a/nomad/archive/storage.py b/nomad/archive/storage.py
index c0baeb75c87900e9f73fa249ada9802d52022aaf..6d33ecfdd378ec709decfa3d6d476bbe26f5b7ed 100644
--- a/nomad/archive/storage.py
+++ b/nomad/archive/storage.py
@@ -151,7 +151,7 @@ class ArchiveReader(ArchiveDict):
                     self._file_or_path, 'rb', buffering=config.archive.read_buffer_size
                 ),
             )
-        elif isinstance(self._file_or_path, (BytesIO, BufferedReader)):
+        elif isinstance(self._file_or_path, BytesIO | BufferedReader):
             f = cast(BytesIO, self._file_or_path)
         else:
             raise ValueError('not a file or path')
diff --git a/nomad/archive/storage_v2.py b/nomad/archive/storage_v2.py
index 6d525e2ef6eeb5dbab7a7a0e0276c39b71a96f5a..6a5d1e59af5aa7ad6dd82a3b87d647c44927df1c 100644
--- a/nomad/archive/storage_v2.py
+++ b/nomad/archive/storage_v2.py
@@ -123,7 +123,7 @@ class TOCPacker:
             """
             _pack_direct(Utility.packb(_obj))
 
-        if self._depth >= self._toc_depth or not isinstance(obj, (dict, list)):
+        if self._depth >= self._toc_depth or not isinstance(obj, dict | list):
             start_pos = self._pos
             _pack_raw(obj)
             return {'pos': [start_pos, self._pos]}
diff --git a/nomad/atomutils.py b/nomad/atomutils.py
index b6c089282cdb753994a1c5b71bc291da265937e9..0e142213d4d1c50728d9e681366be3102764b97d 100644
--- a/nomad/atomutils.py
+++ b/nomad/atomutils.py
@@ -684,7 +684,7 @@ def get_formula_string(symbols: Iterable[str], counts: Iterable[int]) -> str:
     formula = ''
     for symbol, count in zip(symbols, counts):
         if count > 1:
-            formula += '%s%d' % (symbol, count)
+            formula += f'{symbol}{count}'
         else:
             formula += symbol
     return formula
@@ -744,7 +744,7 @@ def get_normalized_wyckoff(
     sorted_species.sort(key=functools.cmp_to_key(compare_atomic_number))
     standard_atom_names = {}
     for i, at in enumerate(sorted_species):
-        standard_atom_names[at] = 'X_%d' % i
+        standard_atom_names[at] = f'X_{i}'
 
     # Rename with anonymized species labels
     standard_wyc: dict = {}
diff --git a/nomad/cli/admin/admin.py b/nomad/cli/admin/admin.py
index 5871edd593e1d16707e443e069a4353207586b4f..7c02a16cdd51457e9b241def679e6d26b8a492ba 100644
--- a/nomad/cli/admin/admin.py
+++ b/nomad/cli/admin/admin.py
@@ -77,8 +77,7 @@ def reset_processing(zero_complete_time):
             process_status__in=proc.ProcessStatus.STATUSES_PROCESSING
         )
         print(
-            '%d %s processes need to be reset due to incomplete process'
-            % (in_processing.count(), cls.__name__)
+            f'{in_processing.count()} {cls.__name__} processes need to be reset due to incomplete process'
         )
         in_processing.update(
             process_status=proc.ProcessStatus.READY,
@@ -126,10 +125,8 @@ def lift_embargo(dry, parallel):
 
         if upload.publish_time + relativedelta(months=embargo_length) < datetime.now():
             print(
-                'need to lift the embargo of %s (publish_time=%s, embargo=%d)'
-                % (upload.upload_id, upload.publish_time, embargo_length)
+                f'need to lift the embargo of {upload.upload_id} (publish_time={upload.publish_time}, embargo={embargo_length})'
             )
-
             if not dry:
                 upload.edit_upload_metadata(
                     edit_request_json=dict(metadata={'embargo_length': 0}),
@@ -154,9 +151,7 @@ def dump(restore: bool):
 
     date_str = datetime.utcnow().strftime('%Y_%m_%d')
     print(
-        'mongodump --host {} --port {} --db {} -o /backup/fairdi/mongo/{}'.format(
-            config.mongo.host, config.mongo.port, config.mongo.db_name, date_str
-        )
+        f'mongodump --host {config.mongo.host} --port {config.mongo.port} --db {config.mongo.db_name} -o /backup/fairdi/mongo/{date_str}'
     )
 
 
@@ -164,9 +159,7 @@ def dump(restore: bool):
 @click.argument('PATH_TO_DUMP', type=str, nargs=1)
 def restore(path_to_dump):
     print(
-        'mongorestore --host {} --port {} --db {} {}'.format(
-            config.mongo.host, config.mongo.port, config.mongo.db_name, path_to_dump
-        )
+        f'mongorestore --host {config.mongo.host} --port {config.mongo.port} --db {config.mongo.db_name} {path_to_dump}'
     )
 
 
@@ -199,7 +192,7 @@ def restore(path_to_dump):
 )
 def nginx_conf(prefix, host, port, server):
     prefix = prefix.rstrip('/')
-    prefix = '/%s' % prefix.lstrip('/')
+    prefix = '/{}'.format(prefix.lstrip('/'))
 
     if server:
         print(
@@ -211,24 +204,24 @@ def nginx_conf(prefix, host, port, server):
         )
 
     print(
-        """
+        f"""
     location / {{
-        proxy_pass http://{1}:{2};
+        proxy_pass http://{host}:{port};
     }}
 
-    location ~ {0}\\/?(gui)?$ {{
-        rewrite ^ {0}/gui/ permanent;
+    location ~ {prefix}\\/?(gui)?$ {{
+        rewrite ^ {prefix}/gui/ permanent;
     }}
 
-    location {0}/gui/ {{
+    location {prefix}/gui/ {{
         proxy_intercept_errors on;
         error_page 404 = @redirect_to_index;
-        proxy_pass http://{1}:{2};
+        proxy_pass http://{host}:{port};
     }}
 
     location @redirect_to_index {{
-        rewrite ^ {0}/gui/index.html break;
-        proxy_pass http://{1}:{2};
+        rewrite ^ {prefix}/gui/index.html break;
+        proxy_pass http://{host}:{port};
     }}
 
     location ~ \\/gui\\/(service-worker\\.js|meta\\.json)$ {{
@@ -237,20 +230,20 @@ def nginx_conf(prefix, host, port, server):
         if_modified_since off;
         expires off;
         etag off;
-        proxy_pass http://{1}:{2};
+        proxy_pass http://{host}:{port};
     }}
 
     location ~ /api/v1/uploads(/?$|.*/raw|.*/bundle?$) {{
         client_max_body_size 35g;
         proxy_request_buffering off;
-        proxy_pass http://{1}:{2};
+        proxy_pass http://{host}:{port};
     }}
 
     location ~ /api/v1/.*/download {{
         proxy_buffering off;
-        proxy_pass http://{1}:{2};
+        proxy_pass http://{host}:{port};
     }}
-""".format(prefix, host, port)
+"""
     )
     if server:
         print('}')
diff --git a/nomad/cli/admin/clean.py b/nomad/cli/admin/clean.py
index 74964478356a2be7c3002cb8649dbc0c26463943..6ca3d6c7ca5773b59bf79b7a716508f890d9be98 100644
--- a/nomad/cli/admin/clean.py
+++ b/nomad/cli/admin/clean.py
@@ -68,8 +68,7 @@ def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
         if not dry and len(missing_uploads) > 0:
             if not force:
                 input(
-                    'Will delete entries (mongo + es) for %d missing uploads. Press any key to continue ...'
-                    % len(missing_uploads)
+                    f'Will delete entries (mongo + es) for {len(missing_uploads)} missing uploads. Press any key to continue ...'
                 )
 
             for upload in missing_uploads:
@@ -81,8 +80,7 @@ def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
                 ).query('term', upload_id=upload).delete()
         else:
             print(
-                'Found %s uploads that have entries in mongo, but there is no upload entry.'
-                % len(missing_uploads)
+                f'Found {len(missing_uploads)} uploads that have entries in mongo, but there is no upload entry.'
             )
             print('List first 10:')
             for upload in missing_uploads[:10]:
@@ -109,16 +107,13 @@ def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
         if not dry and len(to_delete) > 0:
             if not force:
                 input(
-                    'Will delete %d upload directories. Press any key to continue ...'
-                    % len(to_delete)
+                    f'Will delete {len(to_delete)} upload directories. Press any key to continue ...'
                 )
 
             for path in to_delete:
                 shutil.rmtree(path)
         else:
-            print(
-                'Found %d upload directories with no upload in mongo.' % len(to_delete)
-            )
+            print(f'Found {len(to_delete)} upload directories with no upload in mongo.')
             print('List first 10:')
             for path in to_delete[:10]:
                 print(path)
@@ -131,16 +126,14 @@ def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
         if not dry and len(to_delete) > 0:
             if not force:
                 input(
-                    'Will delete %d staging upload directories. Press any key to continue ...'
-                    % len(to_delete)
+                    f'Will delete {len(to_delete)} staging upload directories. Press any key to continue ...'
                 )
 
             for path in to_delete:
                 shutil.rmtree(path)
         else:
             print(
-                'Found %d staging upload directories with upload directory in public.'
-                % len(to_delete)
+                f'Found {len(to_delete)} staging upload directories with upload directory in public.'
             )
             print('List first 10:')
             for path in to_delete[:10]:
@@ -164,15 +157,13 @@ def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
         if not dry and len(to_delete) > 0:
             if not force:
                 input(
-                    'Will delete %d entries in %d uploads from ES. Press any key to continue ...'
-                    % (entries, len(to_delete))
+                    f'Will delete {entries} entries in {len(to_delete)} uploads from ES. Press any key to continue ...'
                 )
             for upload_id, _ in to_delete:
                 delete_by_query(owner='all', query=dict(upload_id=upload_id))
         else:
             print(
-                'Found %d entries in %d uploads from ES with no upload in mongo.'
-                % (entries, len(to_delete))
+                f'Found {entries} entries in {len(to_delete)} uploads from ES with no upload in mongo.'
             )
             print('List first 10:')
             tabulate.tabulate(to_delete, headers=['id', '#entries'])
diff --git a/nomad/cli/admin/entries.py b/nomad/cli/admin/entries.py
index 77b3f5bbcdcc64157b03f593d53c43d0576d09fb..54ac693eed7dcd6510287497f0733e912432729a 100644
--- a/nomad/cli/admin/entries.py
+++ b/nomad/cli/admin/entries.py
@@ -38,7 +38,7 @@ def rm(entries, skip_es, skip_mongo):
     infrastructure.setup_mongo()
     infrastructure.setup_elastic()
 
-    print('%d entries selected, deleting ...' % len(entries))
+    print(f'{len(entries)} entries selected, deleting ...')
 
     if not skip_es:
         for entry in entries:
diff --git a/nomad/cli/admin/springer.py b/nomad/cli/admin/springer.py
index 74dde0e7350ba1b56a4806f8736156e80f4d2540..7c468d179682ee84ea3c5c333073a538a40b2444 100644
--- a/nomad/cli/admin/springer.py
+++ b/nomad/cli/admin/springer.py
@@ -86,8 +86,7 @@ def normalize_formula(formula_str: str) -> str:
     symbol_normamount = {e: round(a / total * 100.0) for e, a in symbol_amount.items()}
 
     formula_sorted = [
-        '%s%d' % (s, symbol_normamount[s])
-        for s in sorted(list(symbol_normamount.keys()))
+        f'{s}{symbol_normamount[s]}' for s in sorted(list(symbol_normamount.keys()))
     ]
 
     return ''.join(formula_sorted)
@@ -198,10 +197,7 @@ def update_springer(max_n_query: int = 10, retry_time: int = 120):
     page = 1
     while True:
         # check springer database for new entries by comparing with local database
-        root = (
-            'http://materials.springer.com/search?searchTerm=&pageNumber=%d&datasourceFacet=sm_isp&substanceId='
-            % page
-        )
+        root = f'http://materials.springer.com/search?searchTerm=&pageNumber={page}&datasourceFacet=sm_isp&substanceId='
         req_text = _download(root, max_n_query, retry_time)
         if 'Sorry,' in req_text:
             break
@@ -216,7 +212,7 @@ def update_springer(max_n_query: int = 10, retry_time: int = 120):
             if sp_id in sp_ids:
                 continue
 
-            path = 'http://materials.springer.com%s' % path
+            path = f'http://materials.springer.com{path}'
             req_text = _download(path, max_n_query, retry_time)
             try:
                 data = parse(req_text)
diff --git a/nomad/cli/admin/uploads.py b/nomad/cli/admin/uploads.py
index 55d35f783e400a01d6d915893a9981e7a62b534d..258e8affdbe03cb23a239b250ac28f59d5de3921 100644
--- a/nomad/cli/admin/uploads.py
+++ b/nomad/cli/admin/uploads.py
@@ -37,7 +37,7 @@ def _run_parallel(
 
     from nomad import utils, processing as proc
 
-    if isinstance(uploads, (tuple, list)):
+    if isinstance(uploads, tuple | list):
         uploads_count = len(uploads)
 
     else:
@@ -53,10 +53,10 @@ def _run_parallel(
 
     logger = utils.get_logger(__name__)
 
-    print('%d uploads selected, %s ...' % (uploads_count, label))
+    print(f'{uploads_count} uploads selected, {label} ...')
 
     def process_upload(upload: proc.Upload):
-        logger.info('%s started' % label, upload_id=upload.upload_id)
+        logger.info(f'{label} started', upload_id=upload.upload_id)
 
         completed = False
         try:
@@ -64,7 +64,7 @@ def _run_parallel(
                 completed = True
         except Exception as e:
             completed = True
-            logger.error('%s failed' % label, upload_id=upload.upload_id, exc_info=e)
+            logger.error(f'{label} failed', upload_id=upload.upload_id, exc_info=e)
 
         with cv:
             state['completed_count'] += 1 if completed else 0
@@ -72,8 +72,7 @@ def _run_parallel(
             state['available_threads_count'] += 1
 
             print(
-                '   %s %s and skipped %s of %s uploads'
-                % (
+                '   {} {} and skipped {} of {} uploads'.format(
                     label,
                     state['completed_count'],
                     state['skipped_count'],
@@ -85,7 +84,7 @@ def _run_parallel(
 
     for upload in uploads:
         logger.info(
-            'cli schedules parallel %s processing for upload' % label,
+            f'cli schedules parallel {label} processing for upload',
             current_process=upload.current_process,
             last_status_message=upload.last_status_message,
             upload_id=upload.upload_id,
@@ -125,15 +124,14 @@ def _run_processing(
 
     def run_process(upload, logger):
         logger.info(
-            'cli calls %s processing' % label,
+            f'cli calls {label} processing',
             current_process=upload.current_process,
             last_status_message=upload.last_status_message,
             upload_id=upload.upload_id,
         )
         if upload.process_running and not process_running:
             logger.warn(
-                'cannot trigger %s, since the upload is already/still processing'
-                % label,
+                f'cannot trigger {label}, since the upload is already/still processing',
                 current_process=upload.current_process,
                 last_status_message=upload.last_status_message,
                 upload_id=upload.upload_id,
@@ -152,9 +150,9 @@ def _run_processing(
             upload.block_until_complete_or_waiting_for_result(interval=0.5)
 
         if upload.process_status == proc.ProcessStatus.FAILURE:
-            logger.info('%s with failure' % label, upload_id=upload.upload_id)
+            logger.info(f'{label} with failure', upload_id=upload.upload_id)
 
-        logger.info('%s complete' % label, upload_id=upload.upload_id)
+        logger.info(f'{label} complete', upload_id=upload.upload_id)
         return True
 
     _run_parallel(
@@ -514,7 +512,7 @@ def chown(ctx, username, uploads):
 
     _, uploads = _query_uploads(uploads, **ctx.obj.uploads_kwargs)
 
-    print('%d uploads selected, changing owner ...' % uploads.count())
+    print(f'{uploads.count()} uploads selected, changing owner ...')
 
     user = datamodel.User.get(username=username)
     for upload in uploads:
@@ -544,7 +542,7 @@ def reset(ctx, uploads, with_entries, success, failure):
     _, uploads = _query_uploads(uploads, **ctx.obj.uploads_kwargs)
     uploads_count = uploads.count()
 
-    print('%d uploads selected, resetting their processing ...' % uploads_count)
+    print(f'{uploads_count} uploads selected, resetting their processing ...')
 
     i = 0
     for upload in uploads:
@@ -566,7 +564,7 @@ def reset(ctx, uploads, with_entries, success, failure):
             upload.process_status = proc.ProcessStatus.FAILURE
         upload.save()
         i += 1
-        print('resetted %d of %d uploads' % (i, uploads_count))
+        print(f'resetted {i} of {uploads_count} uploads')
 
 
 @uploads.command(help='(Re-)index all entries of the given uploads.')
@@ -671,7 +669,7 @@ def delete_upload(
 def rm(ctx, uploads, skip_es, skip_mongo, skip_files):
     _, uploads = _query_uploads(uploads, **ctx.obj.uploads_kwargs)
 
-    print('%d uploads selected, deleting ...' % uploads.count())
+    print(f'{uploads.count()} uploads selected, deleting ...')
 
     for upload in uploads:
         delete_upload(
diff --git a/nomad/cli/admin/users.py b/nomad/cli/admin/users.py
index 49a54132d73b9ebcebc7fe38783ca6a47e683af0..21253fb313395efbbc99604d35c864f22b40e3ed 100644
--- a/nomad/cli/admin/users.py
+++ b/nomad/cli/admin/users.py
@@ -49,6 +49,6 @@ def import_command(path_to_users_file):
             infrastructure.user_management.add_user(
                 user, bcrypt_password=password, invite=False
             )
-            print('Imported %s' % user.name)
+            print(f'Imported {user.name}')
         except Exception as e:
             logger.error('could not import user', exc_info=e)
diff --git a/nomad/cli/aflow.py b/nomad/cli/aflow.py
index 0548025e18bc522a880de218459857a4e7cf7c06..6912ec39dde56d95bd5106ead7305b83d0efadcf 100644
--- a/nomad/cli/aflow.py
+++ b/nomad/cli/aflow.py
@@ -76,7 +76,7 @@ class DbUpdater:
             if hasattr(self, key):
                 setattr(self, key, val)
             else:
-                raise KeyError('Invalid key %s' % key)
+                raise KeyError(f'Invalid key {key}')
 
         # create directory to save data
         subdir = ''
@@ -99,7 +99,7 @@ class DbUpdater:
                     else 'OUTCAR.static.xz'
                 )
         else:
-            raise NotImplementedError('%s not yet supported.' % self.db_name)
+            raise NotImplementedError(f'{self.db_name} not yet supported.')
 
         self._session = requests.Session()
 
@@ -126,7 +126,7 @@ class DbUpdater:
         return re.sub(r'[^\w\d-]', '_', path)
 
     def _read_from_file(self, filename):
-        print('Reading from file %s' % filename)
+        print(f'Reading from file {filename}')
         data = []
         with open(filename) as f:
             line = f.readline()
@@ -147,7 +147,7 @@ class DbUpdater:
         with open(filename, 'w') as f:
             for i in range(len(data)):
                 if isinstance(data[i], str):
-                    f.write('%s\n' % data[i])
+                    f.write(f'{data[i]}\n')
                 else:
                     f.write(f'{data[i][0]} {data[i][1]} \n')
 
@@ -155,7 +155,7 @@ class DbUpdater:
         if self.dbfile is not None and os.path.isfile(self.dbfile):
             self.db_files = self._read_from_file(self.dbfile)
         else:
-            print('Generating list from %s' % self.root_url)
+            print(f'Generating list from {self.root_url}')
             self.db_files = []
             todo = self._get_paths(self.root_url)
             while len(todo) > 0:
@@ -189,7 +189,7 @@ class DbUpdater:
         else:
             print('Generating NOMAD list')
             if self.db_name.lower() == 'aflowlib':
-                servers = ['LIB%d_LIB' % n for n in range(1, 10)] + ['ICSD_WEB']
+                servers = [f'LIB{n}_LIB' for n in range(1, 10)] + ['ICSD_WEB']
                 paths = [s for s in servers if s in self.root_url]
                 paths = paths if paths else servers
                 # main_author: Stefano Curtarolo
@@ -258,7 +258,7 @@ class DbUpdater:
         if len(in_nomad) > 0:
             fn = 'in_nomad.txt'
             print('Warning: Some NOMAD entries not found in db.')
-            print('See %s for list.' % fn)
+            print(f'See {fn} for list.')
             self._write_to_file(in_nomad, fn)
 
         # add the root back
@@ -267,7 +267,7 @@ class DbUpdater:
         root = f'{u.scheme}://{u.netloc}/{up}'
         self.update_list = [os.path.join(root, e) for e in self.update_list]
         self.is_updated_list = [False] * len(self.update_list)
-        print('Found %d entries to be added in NOMAD' % len(self.update_list))
+        print(f'Found {len(self.update_list)} entries to be added in NOMAD')
 
         if self.outfile is not None:
             data = [self.update_list[i] for i in range(len(self.update_list))]
@@ -345,7 +345,7 @@ class DbUpdater:
 
         tarname = f'{d1}-{d2}'
         uploadname = f'{self.db_name.upper()}_{tarname}'
-        tarname = os.path.join(self._local_path, '%s.tar' % tarname)
+        tarname = os.path.join(self._local_path, f'{tarname}.tar')
 
         return tarname, uploadname
 
@@ -477,7 +477,7 @@ class DbUpdater:
         """
         Download files from database.
         """
-        print('Downloading from %s' % self.root_url)
+        print(f'Downloading from {self.root_url}')
         s = time.time()
         plist = [[] for i in range(self.parallel)]
         cur = 0
@@ -669,9 +669,7 @@ def update_prototypes(ctx, filepath, matches_only):
                     atoms = ase.io.read(poscar_file, format='vasp')
                 except Exception:
                     print(
-                        'Error in getting prototype structure from POSCAR: {}'.format(
-                            poscarurl
-                        )
+                        f'Error in getting prototype structure from POSCAR: {poscarurl}'
                     )
                     print(
                         'Could not read prototype structure from CIF or POSCAR file for prototype: {}, {}, '.format(
@@ -705,9 +703,7 @@ def update_prototypes(ctx, filepath, matches_only):
         aflow_prototypes = {'prototypes_by_spacegroup': structure_types_by_spacegroup}
         print(
             'Extracted latest AFLOW prototypes online. Total number of '
-            'successfully fetched prototypes: {}, missing: {}'.format(
-                n_prototypes, n_missing
-            )
+            f'successfully fetched prototypes: {n_prototypes}, missing: {n_missing}'
         )
 
     # Update matches
@@ -748,9 +744,7 @@ def update_prototypes(ctx, filepath, matches_only):
                     n_unmatched += 1
     print(
         'Updated matches in AFLOW prototype library. Total number of '
-        'prototypes: {}, unmatched: {}, failed: {}'.format(
-            n_prototypes, n_unmatched, n_failed
-        )
+        f'prototypes: {n_prototypes}, unmatched: {n_unmatched}, failed: {n_failed}'
     )
 
     # Write data file to the specified path
diff --git a/nomad/cli/client/__init__.py b/nomad/cli/client/__init__.py
index 5a7cd1edce8c055c9903c679d2bae6e35fcfe4be..8b1ca40a86ff6fc48c9c6d8b2abac27b481ab5b9 100644
--- a/nomad/cli/client/__init__.py
+++ b/nomad/cli/client/__init__.py
@@ -28,7 +28,7 @@ from nomad.config import config
     '-n',
     '--url',
     default=config.client.url,
-    help='The URL where nomad is running, default is "%s".' % config.client.url,
+    help=f'The URL where nomad is running, default is "{config.client.url}".',
 )
 @click.option(
     '-u', '--user', default=None, help='the user name to login, default is no login.'
@@ -235,7 +235,7 @@ def local(
 
     from nomad.client import LocalEntryProcessing
 
-    print('Using %s' % config.client.url)
+    print(f'Using {config.client.url}')
     auth = _create_auth(ctx)
 
     with LocalEntryProcessing(entry_id, auth=auth, **kwargs) as local:
diff --git a/nomad/cli/dev.py b/nomad/cli/dev.py
index 074c88bd1093d68b0d23d459c51164efe2bdd926..e7ff602405fd1c7f177d26d301180a72ba10afdc 100644
--- a/nomad/cli/dev.py
+++ b/nomad/cli/dev.py
@@ -98,7 +98,7 @@ def api_model(model):
         if isinstance(data, dict):
             processed_dict = {}
             for key, value in data.items():
-                if isinstance(value, (dict, list)):
+                if isinstance(value, dict | list):
                     processed_value = remove_null_types(value)
 
                     # Only add the key if the processed value is not a "type": "null" dictionary
diff --git a/nomad/client/processing.py b/nomad/client/processing.py
index 97af552bb41fc83642d9a884ffad4c4bb1cc7cce..3445fc88711a48284f432cd15507ac0c93955adb 100644
--- a/nomad/client/processing.py
+++ b/nomad/client/processing.py
@@ -59,7 +59,7 @@ def parse(
     else:
         parser_name = parser.__class__.__name__
 
-    assert parser is not None, 'there is no parser matching %s' % mainfile
+    assert parser is not None, f'there is no parser matching {mainfile}'
     logger = logger.bind(parser=parser.name)  # type: ignore
     logger.info('identified parser')
     if hasattr(parser, 'backend_factory'):
@@ -86,7 +86,7 @@ def normalize(normalizer: str | typing.Callable, entry_archive, logger=None):
             if normalizer_instance.__class__.__name__ == normalizer
         )
 
-    assert normalizer is not None, 'there is no normalizer %s' % str(normalizer)
+    assert normalizer is not None, f'there is no normalizer {str(normalizer)}'
     normalizer_instance = typing.cast(typing.Callable, normalizer)(entry_archive)
     logger = logger.bind(normalizer=normalizer_instance.__class__.__name__)
     logger.info('identified normalizer')
diff --git a/nomad/client/upload.py b/nomad/client/upload.py
index 7594196df7c5ac2af07197ba805f013da6b07687..e800b8da04781ad004150d26b8cc47042d5917e6 100644
--- a/nomad/client/upload.py
+++ b/nomad/client/upload.py
@@ -51,7 +51,7 @@ def upload_file(
             headers={'Accept': 'application/json'},
             auth=auth,
         )
-        print('process offline: %s' % file_path)
+        print(f'process offline: {file_path}')
     else:
         with open(file_path, 'rb') as f:
             response = api.post(
@@ -62,7 +62,7 @@ def upload_file(
                 auth=auth,
             )
     if response.status_code != 200:
-        print('Could not create upload: %s' % response.text)
+        print(f'Could not create upload: {response.text}')
         return None
     upload = response.json()['data']
 
@@ -86,15 +86,8 @@ def upload_file(
         )
 
         print(
-            'status: %s; process: %s; parsing: %d/%d/%d                %s'
-            % (
-                upload['process_status'],
-                upload['current_process'],
-                successes,
-                failures,
-                total,
-                ret,
-            ),
+            f'status: {upload["process_status"]}; process: {upload["current_process"]}; '
+            f'parsing: {successes}/{failures}/{total} {ret}',
             end='',
         )
 
@@ -103,11 +96,11 @@ def upload_file(
     if upload['process_status'] == ProcessStatus.FAILURE:
         print('There have been errors:')
         for error in upload['errors']:
-            print('    %s' % error)
+            print(f'    {error}')
     elif publish:
         response = api.post(f'uploads/{upload["upload_id"]}/action/publish', auth=auth)
         if response.status_code != 200:
-            print('Could not publish upload: %s' % response.text)
+            print(f'Could not publish upload: {response.text}')
             return None
 
     return upload['upload_id']
diff --git a/nomad/config/models/config.py b/nomad/config/models/config.py
index 12c424e4ec985913e6dd26e846ef79a2bfd23e49..2ec981ae5b53d69f9af968692f5499e2e23b53f3 100644
--- a/nomad/config/models/config.py
+++ b/nomad/config/models/config.py
@@ -598,7 +598,7 @@ class Normalize(ConfigBaseModel):
         """,
     )
     angle_rounding: float = Field(
-        float(10.0),
+        10.0,
         description="""
             Defines the "bin size" for rounding cell angles for the material hash in degree.
         """,
@@ -966,14 +966,10 @@ class Config(ConfigBaseModel):
         if page is not None:
             return f'{base}/gui/{page}'
 
-        return '%s/gui' % base
+        return f'{base}/gui'
 
     def rabbitmq_url(self):
-        return 'pyamqp://{}:{}@{}//'.format(
-            self.rabbitmq.user,
-            self.rabbitmq.password,
-            self.rabbitmq.host,
-        )
+        return f'pyamqp://{self.rabbitmq.user}:{self.rabbitmq.password}@{self.rabbitmq.host}//'
 
     def north_url(self, ssl: bool = True):
         return self.api_url(
diff --git a/nomad/config/models/plugins.py b/nomad/config/models/plugins.py
index 9657130e6cc68f1bb1e3ff5fa1ce5c69c033d211..eeade496bca2cea5bfd59018510aa28a2d85d805 100644
--- a/nomad/config/models/plugins.py
+++ b/nomad/config/models/plugins.py
@@ -681,7 +681,7 @@ class Parser(PythonPluginBase):
         return MatchingParserInterface(**data)
 
 
-EntryPointType = Union[
+EntryPointType = Union[  # noqa
     Schema,
     Normalizer,
     Parser,
diff --git a/nomad/config/models/ui.py b/nomad/config/models/ui.py
index a0ad71defd404aa4e8ed64e079e2802a4bbe0e84..88845aba8eaff855fdb7e5af268f9532a0637597 100644
--- a/nomad/config/models/ui.py
+++ b/nomad/config/models/ui.py
@@ -853,15 +853,13 @@ class MenuItemCustomQuantities(MenuItem):
 # The 'discriminated union' feature of Pydantic is used here:
 # https://docs.pydantic.dev/usage/types/#discriminated-unions-aka-tagged-unions
 MenuItemTypeNested = Annotated[
-    Union[
-        MenuItemTerms,
-        MenuItemHistogram,
-        MenuItemPeriodicTable,
-        MenuItemVisibility,
-        MenuItemDefinitions,
-        MenuItemOptimade,
-        MenuItemCustomQuantities,
-    ],
+    MenuItemTerms
+    | MenuItemHistogram
+    | MenuItemPeriodicTable
+    | MenuItemVisibility
+    | MenuItemDefinitions
+    | MenuItemOptimade
+    | MenuItemCustomQuantities,
     Field(discriminator='type'),
 ]
 
@@ -1199,14 +1197,12 @@ class WidgetScatterPlotDeprecated(WidgetScatterPlot):
 # The 'discriminated union' feature of Pydantic is used here:
 # https://docs.pydantic.dev/usage/types/#discriminated-unions-aka-tagged-unions
 WidgetAnnotated = Annotated[
-    Union[
-        WidgetTerms,
-        WidgetHistogram,
-        WidgetScatterPlot,
-        WidgetScatterPlotDeprecated,
-        WidgetPeriodicTable,
-        WidgetPeriodicTableDeprecated,
-    ],
+    WidgetTerms
+    | WidgetHistogram
+    | WidgetScatterPlot
+    | WidgetScatterPlotDeprecated
+    | WidgetPeriodicTable
+    | WidgetPeriodicTableDeprecated,
     Field(discriminator='type'),
 ]
 
diff --git a/nomad/datamodel/datamodel.py b/nomad/datamodel/datamodel.py
index a90eecef2abe75a849fcfa55749e220d917e3568..2bf32ebd83c6091725c8048090d19d636020f335 100644
--- a/nomad/datamodel/datamodel.py
+++ b/nomad/datamodel/datamodel.py
@@ -1094,7 +1094,7 @@ class EntryMetadata(MSection):
             if section_path.startswith('data') and isinstance(property_def, Quantity):
                 # From each string dtype, we get a truncated sample to put into
                 # the keywords field, unless we are already storing too many unique values.
-                if (isinstance(property_def.type, (MEnum, m_str))) and len(
+                if (isinstance(property_def.type, MEnum | m_str)) and len(
                     keywords_set
                 ) < 10000:
                     keyword = section.m_get(property_def)
diff --git a/nomad/datamodel/hdf5.py b/nomad/datamodel/hdf5.py
index bcb89e36f55ef278c31c51f5c4aa47200201354e..5bfa5c2e8452196ff65ec6b2f9d432a22e974f72 100644
--- a/nomad/datamodel/hdf5.py
+++ b/nomad/datamodel/hdf5.py
@@ -216,7 +216,7 @@ class HDF5Dataset(NonPrimitive):
             raise ValueError('Cannot normalize HDF5 value without context.')
 
         if not isinstance(
-            value, (str, np.ndarray, h5py.Dataset, pint.Quantity, HDF5Wrapper)
+            value, str | np.ndarray | h5py.Dataset | pint.Quantity | HDF5Wrapper
         ):
             raise ValueError(f'Invalid HDF5 dataset value: {value}.')
 
diff --git a/nomad/datamodel/optimade.py b/nomad/datamodel/optimade.py
index 101bb5a0497038b897ee18af852d0d2c25b38a77..f72fa69cbf3a836fc54d7b3ab0af320716d91868 100644
--- a/nomad/datamodel/optimade.py
+++ b/nomad/datamodel/optimade.py
@@ -33,8 +33,7 @@ from nomad.metainfo.elasticsearch_extension import Elasticsearch
 
 def optimade_links(section: str):
     return [
-        'https://github.com/Materials-Consortia/OPTiMaDe/blob/develop/optimade.md#%s'
-        % section
+        f'https://github.com/Materials-Consortia/OPTiMaDe/blob/develop/optimade.md#{section}'
     ]
 
 
diff --git a/nomad/datamodel/util.py b/nomad/datamodel/util.py
index ce8b7580cbe13ae45c052fb82297489796edca69..63ab9e0c481a61278f3dbee0373d5beb7626780e 100644
--- a/nomad/datamodel/util.py
+++ b/nomad/datamodel/util.py
@@ -151,7 +151,7 @@ def create_custom_mapping(
                     if isinstance(value, float) and math.isnan(value):
                         value = None
 
-                    if isinstance(value, (int, float, str)):
+                    if isinstance(value, int | float | str):
                         value = np.array([value])
 
                     if value is not None:
diff --git a/nomad/doi.py b/nomad/doi.py
index 994a840896793da98985af5dd003af1aea74963a..a755e19c31e6059503dbef60b6d59340b954bbda 100644
--- a/nomad/doi.py
+++ b/nomad/doi.py
@@ -87,8 +87,7 @@ def edit_doi_url(doi: str, url: str = None):
 
     if response.status_code >= 300:
         raise Exception(
-            'Unexpected datacite response (status code %d): %s'
-            % (response.status_code, response.text)
+            f'Unexpected datacite response (status code {response.status_code}): {response.text}'
         )
 
 
@@ -127,12 +126,7 @@ class DOI(Document):
         create_time = datetime.datetime.utcnow()
 
         while True:
-            doi_str = '%s/NOMAD/%s-%d' % (
-                config.datacite.prefix,
-                create_time.strftime('%Y.%m.%d'),
-                counter,
-            )
-
+            doi_str = f'{config.datacite.prefix}/NOMAD/{create_time.strftime("%Y.%m.%d")}-{counter}'
             try:
                 doi = DOI(doi=doi_str)
                 doi.save(force_insert=True)
@@ -188,7 +182,7 @@ class DOI(Document):
     def __handle_datacite_errors(self, response, msg: str):
         if response is None or response.status_code >= 300:
             utils.get_logger(__name__).error(
-                'could not %s' % msg,
+                f'could not {msg}',
                 status_code=response.status_code,
                 body=response.content,
                 doi=self.doi,
diff --git a/nomad/graph/graph_reader.py b/nomad/graph/graph_reader.py
index 9370c4bf2dd9df4416c508c57aadb59e73baf8a9..63473f36cda60f050161a2ea53e53649ca6e10b1 100644
--- a/nomad/graph/graph_reader.py
+++ b/nomad/graph/graph_reader.py
@@ -112,8 +112,8 @@ logger = utils.get_logger(__name__)
 
 # bug when used in isinstance() with mypy
 # see https://github.com/python/mypy/issues/11673
-GenericList = Union[list, ArchiveList, ArchiveListNew]
-GenericDict = Union[dict, ArchiveDict, ArchiveDictNew]
+GenericList = list | ArchiveList | ArchiveListNew
+GenericDict = dict | ArchiveDict | ArchiveDictNew
 
 
 @dataclasses.dataclass(frozen=True)
@@ -187,7 +187,7 @@ class ConfigError(Exception):
 
 async def goto_child(container, key: str | int | list):
     if not isinstance(key, list):
-        if isinstance(container, (list, dict)):
+        if isinstance(container, list | dict):
             return container[key]  # type: ignore
 
         return await asyncio.to_thread(container.__getitem__, key)
@@ -2510,7 +2510,7 @@ def _is_quantity_reference(definition) -> bool:
         isinstance(definition, Quantity)
         and isinstance(definition.type, Reference)
         and not isinstance(
-            definition.type, (UserReference, AuthorReference, DatasetReference)
+            definition.type, UserReference | AuthorReference | DatasetReference
         )
     )
 
@@ -2801,7 +2801,7 @@ class ArchiveReader(ArchiveLikeReader):
             )
 
         if isinstance(node.definition, Quantity) or isinstance(
-            getattr(node.definition, 'type', None), (JSON, AnyType)
+            getattr(node.definition, 'type', None), JSON | AnyType
         ):
             # the container size limit does not recursively apply to JSON
             result_to_write = (
@@ -3065,7 +3065,7 @@ class DefinitionReader(ArchiveLikeReader):
             # for derived quantities like 'all_properties', 'all_quantities', etc.
             # normalise them to maps
             is_plain_container: bool = (
-                False if is_list else isinstance(child_def, (list, set, dict))
+                False if is_list else isinstance(child_def, list | set | dict)
             )
 
             child_path: list = node.current_path + [name]
@@ -3090,7 +3090,7 @@ class DefinitionReader(ArchiveLikeReader):
                         await _populate_result(
                             node.result_root, child_path + [k], __convert(v)
                         )
-                elif isinstance(child_def, (set, list)):
+                elif isinstance(child_def, set | list):
                     await _populate_result(node.result_root, child_path, [])
                     for i, v in enumerate(child_def):
                         await _populate_result(
@@ -3109,7 +3109,7 @@ class DefinitionReader(ArchiveLikeReader):
                 if isinstance(child_def, dict):
                     for _k, _v in child_def.items():
                         await __func(child_path + [_k], _v)
-                elif isinstance(child_def, (set, list)):
+                elif isinstance(child_def, set | list):
                     for _i, _v in enumerate(child_def):
                         await __func(child_path + [str(_i)], _v)
                 else:
@@ -3264,7 +3264,7 @@ class DefinitionReader(ArchiveLikeReader):
                             _unwrap_subsection(v)
                             for v in (
                                 target
-                                if isinstance(target, (list, set))
+                                if isinstance(target, list | set)
                                 else target.values()
                             )
                         )
diff --git a/nomad/infrastructure.py b/nomad/infrastructure.py
index ae53636d9657050d72d3396e91cb6dd49c942f58..02b35346c1dbdbd49b84b29080babad8d17f5c51 100644
--- a/nomad/infrastructure.py
+++ b/nomad/infrastructure.py
@@ -106,7 +106,7 @@ def setup_elastic():
         http_auth = (config.elastic.username, config.elastic.password)
     global elastic_client
     elastic_client = connections.create_connection(
-        hosts=['%s:%d' % (config.elastic.host, config.elastic.port)],
+        hosts=[f'{config.elastic.host}:{config.elastic.port}'],
         timeout=config.elastic.timeout,
         max_retries=10,
         retry_on_timeout=True,
@@ -348,7 +348,7 @@ class KeycloakUserManagement(UserManagement):
         index = 1
         try:
             while self.get_user(username=user.username):
-                user.username += '%d' % index
+                user.username += f'{index}'
                 index += 1
         except KeyError:
             pass
@@ -415,12 +415,12 @@ class KeycloakUserManagement(UserManagement):
         if user.user_id != 'not_set':
             try:
                 self._admin_client.get_user(user.user_id)
-                return 'User %s with given id already exists' % user.email
+                return f'User {user.email} with given id already exists'
             except KeycloakGetError:
                 pass
 
         if self._admin_client.get_user_id(user.email) is not None:
-            return 'User with email %s already exists' % user.email
+            return f'User with email {user.email} already exists'
 
         try:
             self._admin_client.create_user(keycloak_user)
@@ -484,7 +484,7 @@ class KeycloakUserManagement(UserManagement):
                 user_id = self._admin_client.get_user_id(username)
 
             if user_id is None:
-                raise KeyError('User with username %s does not exist' % username)
+                raise KeyError(f'User with username {username} does not exist')
 
         if email is not None and user_id is None:
             with utils.lnr(logger, 'Could not use keycloak admin client'):
@@ -494,7 +494,7 @@ class KeycloakUserManagement(UserManagement):
                 user_id = users[0]['id']
 
             if user_id is None:
-                raise KeyError('User with email %s does not exist' % email)
+                raise KeyError(f'User with email {email} does not exist')
 
         assert user_id is not None, 'Could not determine user from given kwargs'
 
@@ -625,7 +625,7 @@ def send_mail(name: str, email: str, message: str, subject: str):
     to_addrs = [email]
 
     if config.mail.cc_address is not None:
-        msg['Cc'] = 'The nomad team <%s>' % config.mail.cc_address
+        msg['Cc'] = f'The nomad team <{config.mail.cc_address}>'
         to_addrs.append(config.mail.cc_address)
 
     try:
diff --git a/nomad/metainfo/data_type.py b/nomad/metainfo/data_type.py
index 992cced7ef98d275244ceed79f705b38cd97d9b3..04b4c724651ac2a2307d9d3c55b8aba26799605b 100644
--- a/nomad/metainfo/data_type.py
+++ b/nomad/metainfo/data_type.py
@@ -25,7 +25,7 @@ from base64 import b64decode, b64encode
 from datetime import datetime, date
 from functools import reduce
 from inspect import isclass
-from typing import Any as TypingAny, Optional
+from typing import Any as TypingAny
 from urllib.parse import urlparse, urlunparse
 
 import numpy as np
@@ -230,7 +230,7 @@ class Primitive(Datatype):
             return value
 
         if self.is_scalar:
-            if isinstance(value, (list, np.ndarray)):
+            if isinstance(value, list | np.ndarray):
                 raise ValueError(f'Shape mismatch for {value}.')
         else:
             if not self.support_array:
@@ -261,7 +261,7 @@ class Primitive(Datatype):
                 'type_data': self._dtype.__name__,
             } | self.flags
 
-        if issubclass(self._dtype, (np.number, np.str_, np.bool_)):
+        if issubclass(self._dtype, np.number | np.str_ | np.bool_):
             return {
                 'type_kind': 'numpy',
                 'type_data': self._dtype.__name__,
@@ -274,7 +274,7 @@ class Primitive(Datatype):
             return value
 
         def extract_magnitude(v):
-            if isinstance(v, (list, tuple)):
+            if isinstance(v, list | tuple):
                 return [extract_magnitude(x) for x in v]
 
             if not isinstance(v, pint.Quantity):
@@ -333,9 +333,9 @@ class Primitive(Datatype):
 
         if isinstance(value, np.ndarray):
             array = value
-        elif isinstance(value, (pd.DataFrame, pd.Series)):
+        elif isinstance(value, pd.DataFrame | pd.Series):
             array = value.to_numpy()
-        elif isinstance(value, (list, tuple)):
+        elif isinstance(value, list | tuple):
             array = np.array(value)
         else:
             raise ValueError(f'Cannot identify type for {value}.')
@@ -363,7 +363,7 @@ class Primitive(Datatype):
                     array = array.astype(self._dtype, casting='safe')
                 except TypeError:
                     new_array = array.astype(self._dtype).astype(original_dtype)
-                    if isinstance(self, (m_str, m_bool)):
+                    if isinstance(self, m_str | m_bool):
                         if not np.all(array == new_array):
                             raise ValueError(
                                 f'Cannot convert {array} to {self._dtype}.'
@@ -535,7 +535,7 @@ class InexactNumber(Number):
         """
 
         def _preprocess(v):
-            if isinstance(v, (list, tuple)):
+            if isinstance(v, list | tuple):
                 return [_preprocess(x) for x in v]
 
             return float('nan') if v is None else v
@@ -658,7 +658,7 @@ class m_complex128(m_complex):
             return {'re': value.real, 'im': value.imag}
 
         # 1D
-        if isinstance(value, (list, tuple)):
+        if isinstance(value, list | tuple):
             return {'re': [v.real for v in value], 'im': [v.imag for v in value]}
 
         # ND
@@ -993,7 +993,7 @@ class Datetime(NonPrimitive):
             datetime_obj = datetime(value.year, value.month, value.day)
         elif isinstance(value, str):
             datetime_obj = parse(value)
-        elif isinstance(value, (int, float)):
+        elif isinstance(value, int | float):
             datetime_obj = datetime.fromtimestamp(value)
         elif isinstance(value, pd.Timestamp):
             datetime_obj = value.to_pydatetime()
@@ -1278,20 +1278,19 @@ def to_pydantic_type(in_type: Datatype):
     standard_type = in_type.standard_type()
 
     if standard_type.startswith('int'):
-        return Optional[int]
+        return int | None
     if standard_type.startswith('float'):
-        return Optional[float]
+        return float | None
     if standard_type.startswith('complex'):
-        return Optional[complex]
+        return complex | None
     if standard_type == 'bool':
-        return Optional[bool]
+        return bool | None
     if standard_type in ('str', 'enum'):
-        return Optional[str]
+        return str | None
     if standard_type == 'datetime':
-        return Optional[datetime]
+        return datetime | None
     if standard_type == 'dict':
-        return Optional[dict]
-
+        return dict | None
     raise NotImplementedError(f'Unsupported pydantic data type {in_type}.')
 
 
@@ -1399,7 +1398,7 @@ def _normalize_complex(value, complex_type, to_unit: str | ureg.Unit | None):
         )
 
     # complex or real part only
-    if isinstance(value, (int, float, complex, np.number)):
+    if isinstance(value, int | float | complex | np.number):
         __check_precision(type(value))
         return complex_type(value)
 
diff --git a/nomad/metainfo/elasticsearch_extension.py b/nomad/metainfo/elasticsearch_extension.py
index 6c44725683a1c9a07188259b3845fa767f7101cc..605b0bb779cc0f2f453ac3f2d63557ea42912b49 100644
--- a/nomad/metainfo/elasticsearch_extension.py
+++ b/nomad/metainfo/elasticsearch_extension.py
@@ -245,7 +245,7 @@ class DocumentType:
         """
         Creates an indexable document from the given archive.
         """
-        suggestions: DefaultDict = defaultdict(list)
+        suggestions: defaultdict = defaultdict(list)
 
         def transform(quantity, section, value, path):
             """
@@ -498,7 +498,7 @@ class DocumentType:
         package_names = set()
         packages_from_plugins = {}
         for plugin in config.plugins.entry_points.filtered_values():
-            if isinstance(plugin, (Schema, Parser)):
+            if isinstance(plugin, Schema | Parser):
                 package_name = plugin.python_package
                 if package_name in package_names:
                     raise ValueError(
@@ -608,15 +608,13 @@ class DocumentType:
 
         assert (
             name not in self.quantities or self.quantities[name] == search_quantity
-        ), 'Search quantity names must be unique: %s' % name
+        ), f'Search quantity names must be unique: {name}'
 
         self.quantities[name] = search_quantity
 
         if annotation.metrics is not None:
             for name, metric in annotation.metrics.items():
-                assert name not in self.metrics, (
-                    'Metric names must be unique: %s' % name
-                )
+                assert name not in self.metrics, f'Metric names must be unique: {name}'
                 self.metrics[name] = (metric, search_quantity)
 
         if self == entry_type:
@@ -939,8 +937,7 @@ class Elasticsearch(DefinitionAnnotation):
 
             if self.dynamic:
                 raise NotImplementedError(
-                    'Quantity type %s for dynamic quantity %s is not supported.'
-                    % (quantity.type, quantity)
+                    f'Quantity type {quantity.type} for dynamic quantity {quantity} is not supported.'
                 )
             if isinstance(quantity.type, QuantityReference):
                 return compute_mapping(quantity.type.target_quantity_def)
@@ -950,8 +947,7 @@ class Elasticsearch(DefinitionAnnotation):
                 )
             else:
                 raise NotImplementedError(
-                    'Quantity type %s for quantity %s is not supported.'
-                    % (quantity.type, quantity)
+                    f'Quantity type {quantity.type} for quantity {quantity} is not supported.'
                 )
 
         if self.suggestion:
diff --git a/nomad/metainfo/example.py b/nomad/metainfo/example.py
index 3f18e1b52ca0446bf8408a4d96fe65a9f58bff5b..3e7f7443b41013722c1058de5bf70e0491629b7d 100644
--- a/nomad/metainfo/example.py
+++ b/nomad/metainfo/example.py
@@ -182,7 +182,7 @@ if __name__ == '__main__':
     print(system.n_atoms)
 
     # To validate dimensions and custom constraints
-    print('errors: %s' % run.m_all_validate())
+    print(f'errors: {run.m_all_validate()}')
 
     # To serialize the data:
     serializable = run.m_to_dict()
diff --git a/nomad/metainfo/metainfo.py b/nomad/metainfo/metainfo.py
index d047228ea513561cbbff80d4c795ed7e1b1318fd..c5e99bac8c8f4c16709beda576e15beb8a7b1c43 100644
--- a/nomad/metainfo/metainfo.py
+++ b/nomad/metainfo/metainfo.py
@@ -507,7 +507,7 @@ class Reference:
         }
 
     def _normalize_impl(self, section, value):
-        if isinstance(value, (str, int, dict)):
+        if isinstance(value, str | int | dict):
             if isinstance(value, str):
                 context = section.m_root().m_context if section else None
                 value = (
@@ -984,7 +984,7 @@ class MSection(metaclass=MObjectMeta):
         event_handlers: set[TypingCallable] = set(m_def.event_handlers)
         for name, attr in cls.__dict__.items():
             # transfer names and descriptions for properties, init properties
-            if isinstance(attr, (Attribute, Property)):
+            if isinstance(attr, Attribute | Property):
                 attr.name = name
                 if attr.description is not None:
                     attr.description = re.sub(
@@ -1110,7 +1110,7 @@ class MSection(metaclass=MObjectMeta):
             return default_name
 
         quantity_def = self.m_def.all_quantities.get(key_quantity)
-        if not isinstance(quantity_def.type, (m_str, Enum)):
+        if not isinstance(quantity_def.type, m_str | Enum):
             raise TypeError(f'Key quantity {key_quantity} must be of type str.')
 
         if self.m_is_set(quantity_def):
@@ -1332,7 +1332,7 @@ class MSection(metaclass=MObjectMeta):
 
             target = definition.__get__(self)
             if isinstance(target, list) and index is not None:
-                assert isinstance(index, (int, slice))
+                assert isinstance(index, int | slice)
                 try:
                     sliced = target[index]
                 except IndexError:
@@ -2505,7 +2505,7 @@ class MSection(metaclass=MObjectMeta):
         """
         if isinstance(key, str):
             value = self.m_annotations.get(key, default)
-            if as_list and not isinstance(value, (list, tuple)):
+            if as_list and not isinstance(value, list | tuple):
                 return [value]
             else:
                 return value
@@ -2513,7 +2513,7 @@ class MSection(metaclass=MObjectMeta):
         elif isinstance(key, type):
             result_list = []
             for values in self.m_annotations.values():
-                if isinstance(values, (tuple, list)):
+                if isinstance(values, tuple | list):
                     for value in values:
                         if isinstance(value, key):
                             result_list.append(value)
@@ -3207,7 +3207,7 @@ class Quantity(Property):
                 except Exception as e:
                     raise DeriveError(f'Could not derive value for {self}: {str(e)}')
 
-            if isinstance(self.default, (dict, list)):
+            if isinstance(self.default, dict | list):
                 value = self.default.copy()
             else:
                 value = self.default
@@ -3487,13 +3487,13 @@ class SubSection(Property):
         """
         existing: MSection | MSubSectionList | None = self.__get__(obj)
         if self.repeats:
-            if value is not None and not isinstance(value, (list, set)):
+            if value is not None and not isinstance(value, list | set):
                 raise TypeError(
                     'Cannot set a repeating subsection directly, modify the list, e.a. via append.'
                 )
             existing = cast(MSubSectionList, existing)
             existing.clear()
-            if isinstance(value, (list, set)):
+            if isinstance(value, list | set):
                 existing.extend(value)
         else:
             if existing is value:
diff --git a/nomad/metainfo/util.py b/nomad/metainfo/util.py
index f542325e24cc13fc0f1a51a9fda358c7936cf501..53e59e2e3fb8bebecde44edc416a05e12a5821d6 100644
--- a/nomad/metainfo/util.py
+++ b/nomad/metainfo/util.py
@@ -406,7 +406,7 @@ def validate_allowable_unit(
         except KeyError:
             return False
 
-    if isinstance(allowable_list, (pint.Unit, pint.Quantity)):
+    if isinstance(allowable_list, pint.Unit | pint.Quantity):
         if dimensionality in ('1', 'dimensionless'):
             return allowable_list.dimensionless
 
diff --git a/nomad/mkdocs.py b/nomad/mkdocs.py
index 25efa82810ed9dcc27304ed57fde48178ae40dbb..281046c19a2e41ecd2fd36d9dd9fe398ad4c72f4 100644
--- a/nomad/mkdocs.py
+++ b/nomad/mkdocs.py
@@ -158,7 +158,7 @@ def get_field_default(field) -> str | None:
     """
     default_value = field.default
     if default_value is not None:
-        if isinstance(default_value, (dict, BaseModel)):
+        if isinstance(default_value, dict | BaseModel):
             default_value = 'Complex object, default value not displayed.'
         elif default_value == '':
             default_value = '""'
diff --git a/nomad/normalizing/method.py b/nomad/normalizing/method.py
index c4423ec72d3f398c46ccea59451f3b8eab808f71..0fc961ec59be8499fe209547351a58d40abe2f8b 100644
--- a/nomad/normalizing/method.py
+++ b/nomad/normalizing/method.py
@@ -20,7 +20,6 @@ from ase.dft.kpoints import monkhorst_pack, get_monkhorst_pack_size_and_offset
 from collections import OrderedDict
 import re
 import numpy as np
-from typing import List, Tuple, Union, Optional
 
 from nomad.datamodel import EntryArchive, ArchiveSection
 from nomad.metainfo import MSection
@@ -718,7 +717,7 @@ class DFTMethod(ElectronicMethod):
                 method_dict['smearing_kind'] = smearing_kind
             smearing_width = self._repr_method.electronic.smearing.width
             if smearing_width is not None:
-                smearing_width = '%.4f' % (smearing_width)
+                smearing_width = f'{smearing_width:.4f}'
                 method_dict['smearing_width'] = smearing_width
         except Exception:
             pass
@@ -737,7 +736,7 @@ class DFTMethod(ElectronicMethod):
         try:
             conv_thr = self._repr_method.scf.threshold_energy_change
             if conv_thr is not None:
-                conv_thr = '%.13f' % (conv_thr.to(ureg.rydberg).magnitude)
+                conv_thr = f'{conv_thr.to(ureg.rydberg).magnitude:.13f}'
                 method_dict['scf_threshold_energy_change'] = conv_thr
         except Exception:
             pass
@@ -789,12 +788,12 @@ class DFTMethod(ElectronicMethod):
             atom_positions = atoms['positions']
             geom_dict['atom_positions'] = np.array2string(
                 atom_positions.to(ureg.angstrom).magnitude,  # convert to Angstrom
-                formatter={'float_kind': lambda x: '%.6f' % x},  # type: ignore
+                formatter={'float_kind': lambda x: f'{x:.6f}'},  # type: ignore
             ).replace('\n', '')
             cell = atoms['lattice_vectors']
             geom_dict['simulation_cell'] = np.array2string(
                 cell.to(ureg.angstrom).magnitude,  # convert to Angstrom
-                formatter={'float_kind': lambda x: '%.6f' % x},  # type: ignore
+                formatter={'float_kind': lambda x: f'{x:.6f}'},  # type: ignore
             ).replace('\n', '')
         except Exception:
             pass
@@ -813,7 +812,7 @@ class DFTMethod(ElectronicMethod):
             else None
         )
         if conv_thr is not None:
-            conv_thr = '%.13f' % (conv_thr.to(ureg.rydberg).magnitude)
+            conv_thr = f'{conv_thr.to(ureg.rydberg).magnitude:.13f}'
         param_dict['scf_threshold_energy_change'] = conv_thr
 
         # Pseudopotentials are kept constant, if applicable
@@ -1109,10 +1108,10 @@ class BasisSetFHIAims(MethodNormalizerBasisSet):
         result = None
         if data is None:
             return None
-        elif isinstance(data, (Section, dict)):
+        elif isinstance(data, Section | dict):
             result = OrderedDict()
             for k in sorted(cls._filtered_section_keys(data)):
-                v = data.get(k, None)
+                v = data.get(k, None)  # type: ignore
                 result[k] = cls._values_to_dict(v, level=level + 1)
         elif isinstance(data, (list)):
             result = []
@@ -1170,14 +1169,14 @@ class BasisSetExciting(MethodNormalizerBasisSet):
             for group in groups:
                 label = group.x_exciting_geometry_atom_labels
                 try:
-                    muffin_tin_settings[f'{label}_muffin_tin_radius'] = '%.6f' % (
-                        group.x_exciting_muffin_tin_radius.to(ureg.angstrom).magnitude
+                    muffin_tin_settings[f'{label}_muffin_tin_radius'] = (
+                        f'{group.x_exciting_muffin_tin_radius.to(ureg.angstrom).magnitude:.6f}'
                     )
                 except Exception:
                     muffin_tin_settings[f'{label}_muffin_tin_radius'] = None
                 try:
                     muffin_tin_settings[f'{label}_muffin_tin_points'] = (
-                        '%d' % group.x_exciting_muffin_tin_points
+                        f'{group.x_exciting_muffin_tin_points}'
                     )
                 except Exception:
                     muffin_tin_settings[f'{label}_muffin_tin_points'] = None
@@ -1188,7 +1187,7 @@ class BasisSetExciting(MethodNormalizerBasisSet):
         # Other important method settings
         system = self._repr_system
         try:
-            self.settings['rgkmax'] = '%.6f' % (system.x_exciting_rgkmax.magnitude)
+            self.settings['rgkmax'] = f'{system.x_exciting_rgkmax.magnitude:.6f}'
         except Exception:
             pass
         try:
@@ -1198,11 +1197,11 @@ class BasisSetExciting(MethodNormalizerBasisSet):
         except Exception:
             pass
         try:
-            self.settings['lo'] = '%d' % (system.x_exciting_lo)
+            self.settings['lo'] = f'{system.x_exciting_lo}'
         except Exception:
             pass
         try:
-            self.settings['lmaxapw'] = '%d' % (system.x_exciting_lmaxapw)
+            self.settings['lmaxapw'] = f'{system.x_exciting_lmaxapw}'
         except Exception:
             pass
 
diff --git a/nomad/normalizing/results.py b/nomad/normalizing/results.py
index 3e734e6f26049715ad910ec2ed7fba2041e3be61..6f5f4ebde1e15ef57b0668b85d49efb63c332d82 100644
--- a/nomad/normalizing/results.py
+++ b/nomad/normalizing/results.py
@@ -1097,7 +1097,7 @@ class ResultsNormalizer(Normalizer):
                             msd.diffusion_constant_errors = (
                                 diffusion_constant.errors
                                 if isinstance(
-                                    diffusion_constant.errors, (list, np.ndarray)
+                                    diffusion_constant.errors, list | np.ndarray
                                 )
                                 else [diffusion_constant.errors]
                             )
diff --git a/nomad/parsing/artificial.py b/nomad/parsing/artificial.py
index 484f97ae15268f3feae2cd449a3ac47b0b0b650e..6302b72dde31231c95de7924a889b9d38d20423e 100644
--- a/nomad/parsing/artificial.py
+++ b/nomad/parsing/artificial.py
@@ -140,7 +140,7 @@ class ChaosParser(Parser):
             while True:
                 data.append('a' * 10**6)
                 i += 1
-                logger.info('ate %d mb' % i)
+                logger.info(f'ate {i} mb')
         elif chaos == 'exception':
             raise Exception('Some chaos happened, muhuha...')
         elif chaos == 'segfault':
diff --git a/nomad/parsing/file_parser/file_parser.py b/nomad/parsing/file_parser/file_parser.py
index 488fb026565f2a62887969b29fd7994ce8a6f6e8..79b4f9086a8fc43858b11dbd5fe5ee17c26c72af 100644
--- a/nomad/parsing/file_parser/file_parser.py
+++ b/nomad/parsing/file_parser/file_parser.py
@@ -184,7 +184,7 @@ class FileParser(ABC):
             return
 
         if unit is not None:
-            if isinstance(unit, (pint.Quantity, pint.Unit)):
+            if isinstance(unit, pint.Quantity | pint.Unit):
                 val = val * unit
 
             elif isinstance(val, pint.Quantity):
diff --git a/nomad/parsing/file_parser/text_parser.py b/nomad/parsing/file_parser/text_parser.py
index 6b8804c507a0198e3bb4391b020af63e27232afb..10802db4aa74c8fee7928a14df1aaedac06f97eb 100644
--- a/nomad/parsing/file_parser/text_parser.py
+++ b/nomad/parsing/file_parser/text_parser.py
@@ -41,7 +41,7 @@ class ParsePattern:
                 token += r'\w'
             if 'array' in value:
                 token += r' '
-            value = r'[%s]+' % token
+            value = rf'[{token}]+'
         self._value = value
         self._tail = kwargs.get('tail', '\n')
         self._re_pattern = None
@@ -49,14 +49,9 @@ class ParsePattern:
     @property
     def re_pattern(self):
         if self._re_pattern is None:
-            head = r'%s[\s\S]*?' % self._head if self._head else ''
-            key = r'%s\s*\:*\=*\s*' % self._key if self._key else ''
-            self._re_pattern = r'{}{}\s*\:*\=*\s*({}){}'.format(
-                head,
-                key,
-                self._value,
-                self._tail,
-            )
+            head = rf'{self._head}[\s\S]*?' if self._head else ''
+            key = rf'{self._key}\s*\:*\=*\s*' if self._key else ''
+            self._re_pattern = rf'{head}{key}\s*\:*\=*\s*({self._value}){self._tail}'
         return self._re_pattern
 
     def __call__(self, text, repeats=True):
@@ -163,7 +158,7 @@ class Quantity:
         Returns a compiled re pattern.
         """
         if isinstance(self._re_pattern, str):
-            re_pattern = self._re_pattern.replace('__unit', '__unit_%s' % self.name)
+            re_pattern = self._re_pattern.replace('__unit', f'__unit_{self.name}')
             self._re_pattern = re.compile(re_pattern.encode())
         return self._re_pattern
 
@@ -194,7 +189,7 @@ class Quantity:
 
                 return val
 
-            elif isinstance(val, (list, np.ndarray)):
+            elif isinstance(val, list | np.ndarray):
                 try:
                     dtype = float if self.dtype is None else self.dtype
                     val_test = np.array(val, dtype=dtype)
@@ -429,7 +424,7 @@ class TextParser(FileParser):
             re_findall = '|'.join([q.re_pattern.pattern.decode() for q in quantities])
             if len(quantities) == 1:
                 # necessary to add a dummy variable to make multiple matches
-                re_findall = '%s|(__dummy__)' % re_findall
+                re_findall = f'{re_findall}|(__dummy__)'
             re_findall_b = re.compile(re_findall.encode())
             if self._re_findall is None:
                 self._re_findall = re_findall_b
@@ -451,7 +446,7 @@ class TextParser(FileParser):
                     continue
                 non_empty_matches.append(non_empty_match)
             index_unit = quantity.re_pattern.groupindex.get(
-                '__unit_%s' % quantity.name, None
+                f'__unit_{quantity.name}', None
             )
             for non_empty_match in non_empty_matches:
                 try:
@@ -498,7 +493,7 @@ class TextParser(FileParser):
 
             else:
                 try:
-                    unit = res.groupdict().get('__unit_%s' % quantity.name, None)
+                    unit = res.groupdict().get(f'__unit_{quantity.name}', None)
                     units.append(unit.decode() if unit is not None else None)
                     value.append(
                         ' '.join(
diff --git a/nomad/parsing/parser.py b/nomad/parsing/parser.py
index 73ec9a4e2ef346dd50dfd43d5b3c05c551e05351..cc74a0297f8c294050b16edce966fc45ad600fe4 100644
--- a/nomad/parsing/parser.py
+++ b/nomad/parsing/parser.py
@@ -577,4 +577,4 @@ class MissingParser(MatchingParser):
     def parse(
         self, mainfile: str, archive: EntryArchive, logger=None, child_archives=None
     ):
-        raise Exception('The code %s is not yet supported.' % self.code_name)
+        raise Exception(f'The code {self.code_name} is not yet supported.')
diff --git a/nomad/parsing/parsers.py b/nomad/parsing/parsers.py
index 3cd806d4b20645dd63308ac0af61709b167cfcc6..3375e235c7803ac90e53969a9a02a685f62d4c4d 100644
--- a/nomad/parsing/parsers.py
+++ b/nomad/parsing/parsers.py
@@ -114,7 +114,7 @@ def match_parser(
     else:
         parsers_to_check = parsers
     for parser in parsers_to_check:
-        if strict and isinstance(parser, (MissingParser, EmptyParser)):
+        if strict and isinstance(parser, MissingParser | EmptyParser):
             continue
 
         match_result = parser.is_mainfile(
diff --git a/nomad/parsing/tabular.py b/nomad/parsing/tabular.py
index 55c6364902853ff362b335701ae3db1bbba7ea01..dcd4eb25946187e48f13fa36f57ccc7955b5f56a 100644
--- a/nomad/parsing/tabular.py
+++ b/nomad/parsing/tabular.py
@@ -657,7 +657,7 @@ def _create_column_to_quantity_mapping(section_def: Section):
                     if isinstance(value, float) and math.isnan(value):
                         value = None
 
-                    if isinstance(value, (int, float, str, pd.Timestamp)):
+                    if isinstance(value, int | float | str | pd.Timestamp):
                         value = np.array([value])
 
                     if value is not None:
diff --git a/nomad/processing/base.py b/nomad/processing/base.py
index 8207337e762194ec80884338bdc4038729dabc5c..c9a3a52c7197f1a45e0c37fdb481b8acbf1d9f39 100644
--- a/nomad/processing/base.py
+++ b/nomad/processing/base.py
@@ -77,7 +77,7 @@ def setup(**kwargs):
 
     infrastructure.setup()
     utils.get_logger(__name__).info(
-        'celery configured with acks_late=%s' % str(config.celery.acks_late)
+        f'celery configured with acks_late={str(config.celery.acks_late)}'
     )
 
 
@@ -398,7 +398,7 @@ class Proc(Document):
         try:
             obj = cls.objects(**{id_field: id}).first()
         except ValidationError:
-            raise InvalidId('%s is not a valid id' % id)
+            raise InvalidId(f'{id} is not a valid id')
         except ConnectionFailure as e:
             raise e
 
@@ -559,10 +559,7 @@ class Proc(Document):
         )
 
     def __str__(self):
-        return 'proc celery_task_id={} worker_hostname={}'.format(
-            self.celery_task_id,
-            self.worker_hostname,
-        )
+        return f'proc celery_task_id={self.celery_task_id} worker_hostname={self.worker_hostname}'
 
     def parent(self) -> 'Proc':
         """
@@ -884,7 +881,7 @@ class NomadCeleryRequest(Request):
                 'detected WorkerLostError', exc_info=exc_info.exception
             )
             self._fail(
-                'process failed due to worker lost: %s' % str(exc_info.exception),
+                f'process failed due to worker lost: {str(exc_info.exception)}',
                 exc_info=exc_info,
             )
 
@@ -913,7 +910,7 @@ def unwarp_task(task, cls_name, self_id, *args, **kwargs):
 
     if cls is None:
         logger.critical('document not a subclass of Proc')
-        raise ProcNotRegistered('document %s not a subclass of Proc' % cls_name)
+        raise ProcNotRegistered(f'document {cls_name} not a subclass of Proc')
 
     # get the process instance
     try:
@@ -962,8 +959,7 @@ def proc_task(task, cls_name, self_id, func_name, args, kwargs):
     if func is None:  # "Should not happen"
         logger.error('called function not a function of proc class')
         proc.fail(
-            'called function %s is not a function of proc class %s'
-            % (func_name, cls_name)
+            f'called function {func_name} is not a function of proc class {cls_name}'
         )
         return
 
@@ -971,7 +967,7 @@ def proc_task(task, cls_name, self_id, func_name, args, kwargs):
     unwrapped_func = getattr(func, '__process_unwrapped', None)
     if unwrapped_func is None:  # "Should not happen"
         logger.error('called function was not decorated with @process')
-        proc.fail('called function %s was not decorated with @process' % func_name)
+        proc.fail(f'called function {func_name} was not decorated with @process')
         return
 
     # call the process function
diff --git a/nomad/processing/data.py b/nomad/processing/data.py
index b1298479f752560b900d314f2de1a3066bc38d88..bc91723d7cb4a88885298b247d430e2a05425135 100644
--- a/nomad/processing/data.py
+++ b/nomad/processing/data.py
@@ -309,7 +309,7 @@ class MetadataEditRequestHandler:
     ):
         # Initialization
         assert user, 'Must specify `user`'
-        assert isinstance(edit_request, (StagingUploadFiles, dict)), (
+        assert isinstance(edit_request, StagingUploadFiles | dict), (
             '`edit_request` must be either a json dictionary or a :class:`StagingUploadfiles` object'
         )
         self.logger = logger
@@ -344,7 +344,7 @@ class MetadataEditRequestHandler:
         self.verified_entries: dict[
             str, dict[str, Any]
         ] = {}  # Metadata specified for individual entries
-        self.affected_uploads: list['Upload'] = (
+        self.affected_uploads: list[Upload] = (
             None  # A MetadataEditRequest may involve multiple uploads
         )
 
@@ -969,7 +969,7 @@ class Entry(Proc):
         self._upload: Upload = None
         self._upload_files: StagingUploadFiles = None
         self._proc_logs: list[Any] = []
-        self._child_entries: list['Entry'] = []
+        self._child_entries: list[Entry] = []
 
         self._entry_metadata: EntryMetadata = None
         self._perform_index = True
@@ -1661,11 +1661,7 @@ class Entry(Proc):
         return self._proc_logs
 
     def __str__(self):
-        return 'entry {} entry_id={} upload_id{}'.format(
-            super().__str__(),
-            self.entry_id,
-            self.upload_id,
-        )
+        return f'entry {super().__str__()} entry_id={self.entry_id} upload_id{self.upload_id}'
 
 
 class Upload(Proc):
@@ -1781,10 +1777,7 @@ class Upload(Proc):
     def get_logger(self, **kwargs):
         logger = super().get_logger()
         main_author_user = self.main_author_user
-        main_author_name = '{} {}'.format(
-            main_author_user.first_name,
-            main_author_user.last_name,
-        )
+        main_author_name = f'{main_author_user.first_name} {main_author_user.last_name}'
         # We are not using 'main_author' because logstash (?) will filter these entries ?!
         logger = logger.bind(
             upload_id=self.upload_id,
@@ -2306,21 +2299,16 @@ class Upload(Proc):
                 self.staging_upload_files.raw_file_object(stripped_path).os_path, 'w'
             ) as stripped_f:
                 stripped_f.write(
-                    'Stripped POTCAR file. Checksum of original file (sha224): %s\n'
-                    % checksum
+                    f'Stripped POTCAR file. Checksum of original file (sha224): {checksum}\n'
                 )
             os.system(
+                f"""
+                    awk < '{self.staging_upload_files.raw_file_object(path).os_path}' >> '{self.staging_upload_files.raw_file_object(stripped_path).os_path}' '
+                    BEGIN {{ dump=1 }}
+                    /End of Dataset/ {{ dump=1 }}
+                    dump==1 {{ print }}
+                    /END of PSCTR/ {{ dump=0 }}'
                 """
-                    awk < '%s' >> '%s' '
-                    BEGIN { dump=1 }
-                    /End of Dataset/ { dump=1 }
-                    dump==1 { print }
-                    /END of PSCTR/ { dump=0 }'
-                """
-                % (
-                    self.staging_upload_files.raw_file_object(path).os_path,
-                    self.staging_upload_files.raw_file_object(stripped_path).os_path,
-                )
             )
 
     def match_mainfiles(
diff --git a/nomad/search.py b/nomad/search.py
index eb34d65764cb191151e238c877fd89c4539be33c..1dc31f8abd05cd74a95393253f7fc35b75d2d15d 100644
--- a/nomad/search.py
+++ b/nomad/search.py
@@ -362,7 +362,7 @@ _entry_metadata_defaults = {
 _all_author_quantities = [
     quantity.name
     for quantity in EntryMetadata.m_def.all_quantities.values()
-    if isinstance(quantity.type, (UserReference, AuthorReference))
+    if isinstance(quantity.type, UserReference | AuthorReference)
 ]
 
 
@@ -825,7 +825,7 @@ def normalize_api_query(
             query=normalize_api_query(query, doc_type=doc_type, prefix=query.prefix),
         )
 
-    if isinstance(query, (models.Empty, models.Criteria)):
+    if isinstance(query, models.Empty | models.Criteria):
         return query
 
     raise NotImplementedError(f'Query type {query.__class__} is not supported')
@@ -952,7 +952,7 @@ def _api_to_es_query(
             quantity = validate_quantity(name, doc_type=doc_type)
             return quantity.get_range_query(value)
 
-        elif isinstance(value, (models.And, models.Or, models.Not)):
+        elif isinstance(value, models.And | models.Or | models.Not):
             return validate_query(value)
 
         # list of values is treated as an "all" over the items
@@ -1029,7 +1029,7 @@ def validate_pagination(
         and pagination.order_by != doc_type.id_field
         and ':' not in page_after_value
     ):
-        pagination.page_after_value = '%s:' % page_after_value
+        pagination.page_after_value = f'{page_after_value}:'
 
     return order_quantity, page_after_value
 
@@ -1117,7 +1117,7 @@ def _api_to_es_aggregation(
                 )
             metric_aggregation, metric_quantity = metrics[metric_name]
             es_aggs.metric(
-                'statistics:%s' % metric_name,
+                f'statistics:{metric_name}',
                 A(metric_aggregation, field=metric_quantity.qualified_field),
             )
 
@@ -1134,7 +1134,7 @@ def _api_to_es_aggregation(
     is_nested = False
     for nested_key in doc_type.nested_object_keys:
         if agg.quantity.startswith(nested_key):
-            es_aggs = es_aggs.bucket('nested_agg:%s' % name, 'nested', path=nested_key)
+            es_aggs = es_aggs.bucket(f'nested_agg:{name}', 'nested', path=nested_key)
             longest_nested_key = nested_key
             is_nested = True
 
@@ -1143,7 +1143,7 @@ def _api_to_es_aggregation(
     # quantity.
     if quantity.dynamic:
         es_aggs = es_aggs.bucket(
-            'nested_agg:%s' % name, 'nested', path='search_quantities'
+            f'nested_agg:{name}', 'nested', path='search_quantities'
         )
         is_nested = True
         longest_nested_key = 'search_quantities'
@@ -1227,7 +1227,7 @@ def _api_to_es_aggregation(
 
             # additional cardinality to get total
             es_aggs.metric(
-                'agg:%s:total' % name, 'cardinality', field=quantity.search_field
+                f'agg:{name}:total', 'cardinality', field=quantity.search_field
             )
         else:
             if agg.size is None:
@@ -1360,7 +1360,7 @@ def _api_to_es_aggregation(
                 )
             metric_aggregation, metric_quantity = metrics[metric_name]
             es_agg.metric(
-                'metric:%s' % metric_name,
+                f'metric:{metric_name}',
                 A(metric_aggregation, field=metric_quantity.qualified_field),
             )
 
@@ -1498,7 +1498,7 @@ def _es_to_api_aggregation(
                         data.append(Bucket(value=value, count=0, metrics=metrics))
 
         else:
-            total = es_aggs['agg:%s:total' % name]['value']
+            total = es_aggs[f'agg:{name}:total']['value']
             pagination = PaginationResponse(
                 total=total, **aggregation_dict['pagination']
             )
@@ -1544,8 +1544,8 @@ def _es_to_api_aggregation(
             raise NotImplementedError()
 
     if isinstance(agg, MinMaxAggregation):
-        min_value = es_aggs['agg:%s:min' % name]['value']
-        max_value = es_aggs['agg:%s:max' % name]['value']
+        min_value = es_aggs[f'agg:{name}:min']['value']
+        max_value = es_aggs[f'agg:{name}:max']['value']
 
         return AggregationResponse(
             min_max=MinMaxAggregationResponse(
diff --git a/nomad/utils/__init__.py b/nomad/utils/__init__.py
index a580d088f98d59cb7e34c246cb29de1f56f13131..3400910cfb9218c8f0e03ee201b07177556b5543 100644
--- a/nomad/utils/__init__.py
+++ b/nomad/utils/__init__.py
@@ -147,10 +147,7 @@ def set_console_log_level(level):
         for handler in root.handlers:
             if not isinstance(
                 handler,
-                (
-                    LogstashHandler,
-                    LogtransferHandler,
-                ),
+                LogstashHandler | LogtransferHandler,
             ):
                 handler.setLevel(level)
 
@@ -317,7 +314,7 @@ def timer(
     if logger_method is not None:
         logger_method(event, exec_time=stop - start, **kwargs)
     else:
-        logger.error('Unknown logger method %s.' % method)
+        logger.error(f'Unknown logger method {method}.')
 
 
 class archive:
@@ -454,7 +451,7 @@ class RestrictedDict(OrderedDict):
         """
         super().__init__()
 
-        if isinstance(mandatory_keys, (list, tuple, set)):
+        if isinstance(mandatory_keys, list | tuple | set):
             self._mandatory_keys = set(mandatory_keys)
         elif mandatory_keys is None:
             self._mandatory_keys = set()
@@ -463,7 +460,7 @@ class RestrictedDict(OrderedDict):
                 'Please provide the mandatory_keys as a list, tuple or set.'
             )
 
-        if isinstance(optional_keys, (list, tuple, set)):
+        if isinstance(optional_keys, list | tuple | set):
             self._optional_keys = set(optional_keys)
         elif optional_keys is None:
             self._optional_keys = set()
@@ -472,7 +469,7 @@ class RestrictedDict(OrderedDict):
                 'Please provide the optional_keys as a list, tuple or set.'
             )
 
-        if isinstance(forbidden_values, (list, tuple, set)):
+        if isinstance(forbidden_values, list | tuple | set):
             self._forbidden_values = set(forbidden_values)
         elif forbidden_values is None:
             self._forbidden_values = set()
@@ -521,9 +518,7 @@ class RestrictedDict(OrderedDict):
             else:
                 if match:
                     raise ValueError(
-                        "The value '{}' is not allowed but was set for key '{}'.".format(
-                            value, key
-                        )
+                        f"The value '{value}' is not allowed but was set for key '{key}'."
                     )
 
         # Check recursively
@@ -1060,7 +1055,7 @@ def dict_to_dataframe(
         elif isinstance(nested_dict, dict):
             for key, value in nested_dict.items():
                 new_key = f'{parent_key}{sep}{key}' if parent_key else key
-                if isinstance(value, (dict, list)):
+                if isinstance(value, dict | list):
                     items.update(
                         flatten_dict(value, new_key, current_depth + 1, df, col_name)
                     )
diff --git a/nomad/utils/exampledata.py b/nomad/utils/exampledata.py
index 31ac1586733439f0093ebabc1505cf4b08898cb9..6fec3a33a862a355d12967ccdf3b98d339b43a09 100644
--- a/nomad/utils/exampledata.py
+++ b/nomad/utils/exampledata.py
@@ -166,7 +166,7 @@ class ExampleData:
         else:
             parser_name = parser.__class__.__name__
 
-        assert parser is not None, 'there is no parser matching %s' % mainfile
+        assert parser is not None, f'there is no parser matching {mainfile}'
         parser.parse(mainfile=mainfile, archive=entry_archive)
 
         entry_metadata = entry_archive.metadata
@@ -406,7 +406,7 @@ class ExampleData:
         self.create_entry(
             entry_archive=archive,
             upload_id=upload_id,
-            entry_id='test_entry_id_%d' % id,
+            entry_id=f'test_entry_id_{id}',
             domain='dft',
             **kwargs,
         )
diff --git a/nomad/utils/structlogging.py b/nomad/utils/structlogging.py
index 879f4a208a39f6b07a4abe59fc719085efb0ed8e..0a97f0a9546af4c65106a12d06abcc3c20a6e4a0 100644
--- a/nomad/utils/structlogging.py
+++ b/nomad/utils/structlogging.py
@@ -145,7 +145,7 @@ class LogstashFormatter(logstash.formatter.LogstashFormatterBase):
                     'user_id',
                     'mainfile',
                 ]:
-                    key = 'nomad.%s' % key
+                    key = f'nomad.{key}'
                 else:
                     key = f'{record.name}.{key}'
 
@@ -229,11 +229,12 @@ class ConsoleFormatter(LogstashFormatter):
 
         out = StringIO()
         out.write(
-            '%s %s %s %s'
-            % (level.ljust(8), logger.ljust(20)[:20], time.ljust(19)[:19], event)
+            f'{level.ljust(8)} {logger.ljust(20)[:20]} {time.ljust(19)[:19]} {event}'
         )
         if exception is not None:
-            out.write('\n  - exception: %s' % str(exception).replace('\n', '\n    '))
+            out.write(
+                '\n  - exception: {}'.format(str(exception).replace('\n', '\n    '))
+            )
 
         for key in keys:
             if cls.short_format and key.startswith('nomad.'):
@@ -340,10 +341,7 @@ def configure_logging(console_log_level=config.services.console_log_level):
     for handler in root.handlers:
         if not isinstance(
             handler,
-            (
-                LogstashHandler,
-                LogtransferHandler,
-            ),
+            LogstashHandler | LogtransferHandler,
         ):
             handler.setLevel(console_log_level)
             handler.setFormatter(ConsoleFormatter())
diff --git a/pyproject.toml b/pyproject.toml
index 2ec3f67eb796cfa5da9700100d4927529f543bc2..54045953a4e3080aaa004eda935e615c0947f1f6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,7 +11,7 @@ authors = [
 ]
 dynamic = ["version"]
 license = { text = "Apache-2.0" }
-requires-python = ">=3.10" # remember to update scripts/pyupgrade.sh
+requires-python = ">=3.10"
 
 dependencies = [
     'aniso8601>=7.0.0',
@@ -153,6 +153,7 @@ select = [
     "E",  # pycodestyle
     "W",  # pycodestyle
     "PL", # pylint
+    "UP", # pyupgrade
 ]
 ignore = [
     "E501",    # Line too long ({width} > {limit} characters)
@@ -168,9 +169,15 @@ ignore = [
     "PLW2901", # redefined-loop-name
     "PLR1714", # consider-using-in
     "PLR5501", # else-if-used
+    "UP035",   # deprecated-import
 ]
 fixable = ["ALL"]
 
+[tool.ruff.lint.extend-per-file-ignores]
+"nomad/app/v1/models/graph/utils.py" = [
+    "UP007",
+] # causes pydantic model building errors
+
 [tool.ruff.format]
 # use single quotes for strings.
 quote-style = "single"
diff --git a/scripts/pyupgrade.sh b/scripts/pyupgrade.sh
deleted file mode 100644
index f60515a860e24595820277ebcf0386e392612453..0000000000000000000000000000000000000000
--- a/scripts/pyupgrade.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Upgrade syntax of all python files under nomad/ folder using pyupgrade
-# pyupgrade is not installed by default
-# install it using `pip install pyupgrade` or `uv pip install pyupgrade`
-# using modern syntax to maximise maintainability and readability
-# it is also possible to use pyupgrade as a commit hook
-
-if ! command -v pyupgrade &> /dev/null; then
-    echo "Error: pyupgrade is not installed. Please install it using 'pip install pyupgrade'."
-    exit 1
-fi
-
-# Navigate to the parent folder based on script location
-cd "$(dirname "$0")/.." || exit 1
-
-# Find all Python files in the "nomad" folder and apply pyupgrade
-find nomad -type f -name "*.py" | while read -r file; do
-    pyupgrade --py310-plus "$file"
-done
diff --git a/tests/app/test_dcat.py b/tests/app/test_dcat.py
index 8e5b4b68291cd6e30faf8a9ae4b8b49fea7b4172..4f08b698f2a66cf80bed6f094aaaa07989a98cb2 100644
--- a/tests/app/test_dcat.py
+++ b/tests/app/test_dcat.py
@@ -79,7 +79,7 @@ def data(user1, user2, elastic_infra, mongo_module):
 
     for i in range(1, 11):
         example_attrs.update(
-            entry_id='test-id-%d' % i, last_processing_time=datetime(2020, 1, i)
+            entry_id=f'test-id-{i}', last_processing_time=datetime(2020, 1, i)
         )
         data.create_entry(**example_attrs)
 
@@ -101,7 +101,7 @@ def test_mapping(example_entry):
 
 def test_get_dataset(api, example_entry):
     entry_id = 'test-id'
-    rv = api.get('/datasets/%s' % entry_id)
+    rv = api.get(f'/datasets/{entry_id}')
     assert rv.status_code == 200
 
 
diff --git a/tests/app/test_optimade.py b/tests/app/test_optimade.py
index 468ca15fc71851cf4d59eebac174e64913eacedc..a7841ff89a7a45e7ca0c8977f932047c5dab4623 100644
--- a/tests/app/test_optimade.py
+++ b/tests/app/test_optimade.py
@@ -184,9 +184,7 @@ def assert_eq_attrib(data, key, ref, item=None):
 
 @pytest.mark.parametrize('limit, offset, results', [(1, 1, 1), (3, 2, 2), (5, 0, 4)])
 def test_list_endpoint_pagination(client, example_structures, limit, offset, results):
-    rv = client.get(
-        '/optimade/structures?page_limit=%d&page_offset=%d' % (limit, offset)
-    )
+    rv = client.get(f'/optimade/structures?page_limit={limit}&page_offset={offset}')
     assert rv.status_code == 200
     data = rv.json()
     assert len(data['data']) == results
@@ -194,7 +192,7 @@ def test_list_endpoint_pagination(client, example_structures, limit, offset, res
 
 @pytest.mark.parametrize('sort, order', [('nelements', 1), ('-nelements', -1)])
 def test_list_endpoint_sort(client, example_structures, sort, order):
-    rv = client.get('/optimade/structures?sort=%s' % sort)
+    rv = client.get(f'/optimade/structures?sort={sort}')
     assert rv.status_code == 200
     data = rv.json()['data']
 
@@ -228,7 +226,9 @@ def test_list_endpoint_response_fields(client, example_structures):
 
 def test_single_endpoint_response_fields(client, example_structures):
     rv = client.get(
-        '/optimade/structures/%s?response_fields=nelements,elements' % 'test_entry_id_1'
+        '/optimade/structures/{}?response_fields=nelements,elements'.format(
+            'test_entry_id_1'
+        )
     )
     assert rv.status_code == 200, json.dumps(rv.json(), indent=2)
     data = rv.json()
@@ -240,7 +240,7 @@ def test_single_endpoint_response_fields(client, example_structures):
 
 
 def test_single_endpoint(client, example_structures):
-    rv = client.get('/optimade/structures/%s' % 'test_entry_id_1')
+    rv = client.get('/optimade/structures/{}'.format('test_entry_id_1'))
     assert rv.status_code == 200
     data = rv.json()
     for key in ['type', 'id', 'attributes']:
@@ -323,7 +323,7 @@ def test_structures_endpoint(client, example_structures):
 
 
 def test_structure_endpoint(client, example_structures):
-    rv = client.get('/optimade/structures/%s' % 'test_entry_id_1')
+    rv = client.get('/optimade/structures/{}'.format('test_entry_id_1'))
     assert rv.status_code == 200
     data = rv.json()
     assert data.get('data') is not None
@@ -347,8 +347,9 @@ def test_nmd_properties_info(client, example_structures):
 
 def test_nmd_properties(client, example_structures):
     rv = client.get(
-        '/optimade/structures/%s'
-        % 'test_entry_id_1?response_fields=_nmd_results_material_elements,_nmd_results_material_structural_type,_nmd_doesnotexist,_nmd_archive_url'
+        '/optimade/structures/{}'.format(
+            'test_entry_id_1?response_fields=_nmd_results_material_elements,_nmd_results_material_structural_type,_nmd_doesnotexist,_nmd_archive_url'
+        )
     )
     assert rv.status_code == 200
     data = rv.json()
diff --git a/tests/app/v1/routers/common.py b/tests/app/v1/routers/common.py
index 81192fd430f817f34335a96f8e78695e072a4a14..19acc77e630f4f28250149dcc648a772472b60e9 100644
--- a/tests/app/v1/routers/common.py
+++ b/tests/app/v1/routers/common.py
@@ -139,7 +139,7 @@ def post_query_test_parameters(
 
 def get_query_test_parameters(
     str: dict, int: dict, date: dict, subsection: dict, total: int
-) -> List[Any]:
+) -> list[Any]:
     """Convenience function for constructing GET query test parameters.
 
     Args:
@@ -439,14 +439,14 @@ def aggregation_test_parameters(
     bool: dict,
     int: dict,
     pagination: dict,
-    pagination_order_by: Optional[dict],
+    pagination_order_by: dict | None,
     histogram_int: dict,
     histogram_date: dict,
     include: dict,
     metrics: dict,
     empty: dict,
-    fixed: Optional[dict],
-) -> List[Any]:
+    fixed: dict | None,
+) -> list[Any]:
     """Convenience function for constructing aggregation tests.
 
     Args:
@@ -1222,7 +1222,7 @@ def assert_metadata_response(response, status_code=None):
 def assert_required(data, required, default_key: str):
     # We flat out all keys in data and then make sure that the full qualified keys in the
     # data are consistent with the keys given in the required include and exclude.
-    keys: Set[str] = set()
+    keys: set[str] = set()
 
     def collect_keys(data, prefix=None):
         if isinstance(data, list):
@@ -1294,13 +1294,13 @@ def assert_aggregations(
 
     if agg_type == 'min_max':
         assert len(data) == 2
-        assert isinstance(data[0], (float, int))
-        assert isinstance(data[1], (float, int))
+        assert isinstance(data[0], float | int)
+        assert isinstance(data[1], float | int)
     elif agg_type == 'statistics':
         assert 'metrics' in agg_response
         for metric in agg.get('metrics', []):
             assert metric in data
-            assert isinstance(data[metric], (float, int))
+            assert isinstance(data[metric], float | int)
     else:
         assert total == -1 or total >= n_data
         assert size == -1 or size == n_data
@@ -1317,11 +1317,11 @@ def assert_aggregations(
             if agg_type == 'date_histogram':
                 assert re.match(r'\d{4}\-\d{2}\-\d{2}', value)
             elif agg_type == 'histogram':
-                assert isinstance(value, (float, int))
+                assert isinstance(value, float | int)
 
             for metric in agg.get('metrics', []):
                 assert metric in bucket['metrics']
-                assert isinstance(bucket['metrics'][metric], (float, int))
+                assert isinstance(bucket['metrics'][metric], float | int)
 
     if 'entries' in agg:
         for bucket in data:
@@ -1346,7 +1346,7 @@ def assert_query_response(client, test_method, query, total, status_code):
     if 'pagination' not in response_json:
         return
 
-    response = client.get('entries?%s' % urlencode(query, doseq=True))
+    response = client.get(f'entries?{urlencode(query, doseq=True)}')
 
     response_json = assert_metadata_response(response, status_code=status_code)
 
@@ -1547,7 +1547,7 @@ def perform_quantity_search_test(
     assert api_result == result
 
 
-def build_headers(accept: Optional[str] = None, user_auth: Optional[dict] = None):
+def build_headers(accept: str | None = None, user_auth: dict | None = None):
     headers = {}
     if accept:
         headers['Accept'] = accept
diff --git a/tests/app/v1/routers/test_auth.py b/tests/app/v1/routers/test_auth.py
index 904c80e7028dc10e67a1828863396b4efd84a2f5..c814260a3c4b502c8c841b09a2cb1fd2eb1302fe 100644
--- a/tests/app/v1/routers/test_auth.py
+++ b/tests/app/v1/routers/test_auth.py
@@ -27,7 +27,7 @@ def perform_get_token_test(client, http_method, status_code, username, password)
         )
     else:
         response = client.get(
-            'auth/token?%s' % urlencode(dict(username=username, password=password))
+            f'auth/token?{urlencode(dict(username=username, password=password))}'
         )
 
     assert response.status_code == status_code
diff --git a/tests/app/v1/routers/test_datasets.py b/tests/app/v1/routers/test_datasets.py
index a83c4d4f2806e048a5b4acbfe91d64ea3802fd0a..eecbdf50251be6853bbadb898d844a11726bf255 100644
--- a/tests/app/v1/routers/test_datasets.py
+++ b/tests/app/v1/routers/test_datasets.py
@@ -100,8 +100,8 @@ def data(elastic_function, raw_files_function, mongo_function, user1, user2):
     for i in range(1, 4):
         data.create_entry(
             upload_id='other_data',
-            entry_id='id_%02d' % i,
-            mainfile='test_content/%02d/mainfile.json' % i,
+            entry_id=f'id_{i:02d}',
+            mainfile=f'test_content/{i:02d}/mainfile.json',
         )
 
     data.save(with_files=False)
@@ -127,7 +127,7 @@ def assert_pagination(pagination):
 def assert_dataset(
     dataset,
     query: Query = None,
-    entries: List[str] = None,
+    entries: list[str] = None,
     n_entries: int = -1,
     **kwargs,
 ):
@@ -223,7 +223,7 @@ def test_datasets(client, data, query, size, status_code):
     ],
 )
 def test_dataset(client, data, dataset_id, result, status_code):
-    response = client.get('datasets/%s' % dataset_id)
+    response = client.get(f'datasets/{dataset_id}')
 
     assert_response(response, status_code=status_code)
     if status_code != 200:
@@ -354,7 +354,7 @@ def test_post_datasets(
     ],
 )
 def test_delete_dataset(auth_headers, client, data, dataset_id, user, status_code):
-    response = client.delete('datasets/%s' % dataset_id, headers=auth_headers[user])
+    response = client.delete(f'datasets/{dataset_id}', headers=auth_headers[user])
 
     assert_response(response, status_code=status_code)
     if status_code != 200:
@@ -410,7 +410,7 @@ def test_assign_doi_dataset(
     more_data.save(with_files=False)
 
     headers = auth_headers[user]
-    response = client.post('datasets/%s/action/doi' % dataset_id, headers=headers)
+    response = client.post(f'datasets/{dataset_id}/action/doi', headers=headers)
 
     assert_response(response, status_code=status_code)
     if status_code != 200:
diff --git a/tests/app/v1/routers/test_entries.py b/tests/app/v1/routers/test_entries.py
index 87cd767e34c84d65af7be017bc216d322ffb7b2e..8ec76fef0bfa476fb44668b678ee03be7fa34bc2 100644
--- a/tests/app/v1/routers/test_entries.py
+++ b/tests/app/v1/routers/test_entries.py
@@ -92,7 +92,7 @@ def perform_entries_raw_test(
         if owner is not None:
             params['owner'] = owner
         response = client.get(
-            'entries/raw?%s' % urlencode(params, doseq=True), headers=headers
+            f'entries/raw?{urlencode(params, doseq=True)}', headers=headers
         )
 
     else:
@@ -129,7 +129,7 @@ def perform_entries_rawdir_test(
         for value in kwargs.values():
             params.update(**value)
         response = client.get(
-            'entries/rawdir?%s' % urlencode(params, doseq=True), headers=headers
+            f'entries/rawdir?{urlencode(params, doseq=True)}', headers=headers
         )
 
     elif http_method == 'post':
@@ -188,7 +188,7 @@ def perform_entries_archive_download_test(
         if owner is not None:
             params['owner'] = owner
         response = client.get(
-            'entries/archive/download?%s' % urlencode(params, doseq=True),
+            f'entries/archive/download?{urlencode(params, doseq=True)}',
             headers=headers,
         )
 
@@ -219,7 +219,7 @@ def perform_entries_archive_test(
         if 'pagination' in kwargs:
             params.update(**kwargs['pagination'])
         response = client.get(
-            'entries/archive?%s' % urlencode(params, doseq=True), headers=headers
+            f'entries/archive?{urlencode(params, doseq=True)}', headers=headers
         )
 
     else:
@@ -590,7 +590,7 @@ def test_entry_metadata(
     auth_headers, client, example_data, user, entry_id, required, status_code
 ):
     response = client.get(
-        'entries/%s?%s' % (entry_id, urlencode(required, doseq=True)),
+        f'entries/{entry_id}?{urlencode(required, doseq=True)}',
         headers=auth_headers[user],
     )
     response_json = assert_metadata_response(response, status_code=status_code)
@@ -773,7 +773,7 @@ def test_entries_download_max(
 def test_entry_rawdir(
     auth_headers, client, example_data, user, entry_id, files_per_entry, status_code
 ):
-    response = client.get('entries/%s/rawdir' % entry_id, headers=auth_headers[user])
+    response = client.get(f'entries/{entry_id}/rawdir', headers=auth_headers[user])
     assert_response(response, status_code)
     if status_code == 200:
         assert_entry_rawdir_response(response.json(), files_per_entry=files_per_entry)
@@ -806,7 +806,7 @@ def test_entry_raw(
     status_code,
 ):
     response = client.get(
-        'entries/%s/raw?%s' % (entry_id, urlencode(files, doseq=True)),
+        f'entries/{entry_id}/raw?{urlencode(files, doseq=True)}',
         headers=auth_headers[user],
     )
     assert_response(response, status_code)
@@ -1074,7 +1074,7 @@ def test_entries_archive(client, example_data, required, status_code):
     ],
 )
 def test_entry_archive(auth_headers, client, example_data, user, entry_id, status_code):
-    response = client.get('entries/%s/archive' % entry_id, headers=auth_headers[user])
+    response = client.get(f'entries/{entry_id}/archive', headers=auth_headers[user])
     assert_response(response, status_code)
     if status_code == 200:
         assert_archive_response(response.json())
diff --git a/tests/app/v1/routers/test_entries_edit.py b/tests/app/v1/routers/test_entries_edit.py
index 07cd132275b4032bc51114459eedd21abfed156f..cd793d3fb4c43a5696bf9dc7800296de9b254b88 100644
--- a/tests/app/v1/routers/test_entries_edit.py
+++ b/tests/app/v1/routers/test_entries_edit.py
@@ -128,7 +128,7 @@ class TestEditRepo:
 
     def mongo(self, *args, edited: bool = True, **kwargs):
         for entry_id in args:
-            entry = proc.Entry.objects(entry_id='test_entry_id_%d' % entry_id).first()
+            entry = proc.Entry.objects(entry_id=f'test_entry_id_{entry_id}').first()
             assert entry is not None
             if edited:
                 assert entry.last_edit_time is not None
@@ -141,7 +141,7 @@ class TestEditRepo:
     def assert_elastic(self, *args, invert: bool = False, **kwargs):
         def assert_entry(get_entries):
             for arg in args:
-                entry_id = 'test_entry_id_%d' % arg
+                entry_id = f'test_entry_id_{arg}'
                 entries = list(get_entries(entry_id))
                 assert len(entries) > 0, entry_id
                 for entry in entries:
@@ -285,7 +285,7 @@ class TestEditRepo:
 
         assert rv.status_code == 200
         rv = self.api.post(
-            'datasets/%s/action/doi' % self.example_dataset.dataset_name,
+            f'datasets/{self.example_dataset.dataset_name}/action/doi',
             headers=self.user_auth,
         )
         assert rv.status_code == 200
diff --git a/tests/app/v1/routers/test_materials.py b/tests/app/v1/routers/test_materials.py
index eacdc7e376d1443d4ad800e96400a2ee05a90f09..3adbb143e28db88adf816bda0b6fb246d073b7c3 100644
--- a/tests/app/v1/routers/test_materials.py
+++ b/tests/app/v1/routers/test_materials.py
@@ -161,9 +161,7 @@ def test_materials_required(client, example_data, required, status_code, http_me
     ],
 )
 def test_material_metadata(client, example_data, material_id, required, status_code):
-    response = client.get(
-        'materials/%s?%s' % (material_id, urlencode(required, doseq=True))
-    )
+    response = client.get(f'materials/{material_id}?{urlencode(required, doseq=True)}')
     response_json = assert_metadata_response(response, status_code=status_code)
 
     if response_json is None:
@@ -302,7 +300,7 @@ def test_materials_get_query(client, example_data, query, status_code, total):
     if 'pagination' not in response_json:
         return
 
-    response = client.get('materials?%s' % urlencode(query, doseq=True))
+    response = client.get(f'materials?{urlencode(query, doseq=True)}')
 
     response_json = assert_metadata_response(response, status_code=status_code)
 
diff --git a/tests/app/v1/routers/uploads/test_basic_uploads.py b/tests/app/v1/routers/uploads/test_basic_uploads.py
index f539acf485acd26ebd73cf53aae0f9e1499a4965..f1e6d9bbed79041c3260d0a6d0be12c43bf38f88 100644
--- a/tests/app/v1/routers/uploads/test_basic_uploads.py
+++ b/tests/app/v1/routers/uploads/test_basic_uploads.py
@@ -22,7 +22,8 @@ import time
 import zipfile
 from datetime import datetime
 import tempfile
-from typing import Any, Dict, Iterable, List
+from typing import Any, Dict, List
+from collections.abc import Iterable
 
 import pytest
 import requests
@@ -401,7 +402,7 @@ def block_until_completed(client, upload_id: str, user_auth):
     start_time = time.time()
     while time.time() - start_time < config.tests.default_timeout:
         time.sleep(0.1)
-        response = client.get('uploads/%s' % upload_id, headers=user_auth)
+        response = client.get(f'uploads/{upload_id}', headers=user_auth)
         if response.status_code == 200:
             response_json = response.json()
             assert_upload(response_json)
@@ -412,14 +413,13 @@ def block_until_completed(client, upload_id: str, user_auth):
             return None
         else:
             raise Exception(
-                'unexpected status code while blocking for upload processing: %s'
-                % str(response.status_code)
+                f'unexpected status code while blocking for upload processing: {str(response.status_code)}'
             )
     raise Exception('Timed out while waiting for upload processing to finish')
 
 
 def get_upload_entries_metadata(
-    entries: List[Dict[str, Any]],
+    entries: list[dict[str, Any]],
 ) -> Iterable[EntryMetadata]:
     """
     Create a iterable of :class:`EntryMetadata` from a API upload json record, plus a
diff --git a/tests/app/v1/test_models.py b/tests/app/v1/test_models.py
index 130483bbc0e8b89711b07c7b5cbd4bf564f10c29..821fea8dc341e26b531b3b046b979c4492159faa 100644
--- a/tests/app/v1/test_models.py
+++ b/tests/app/v1/test_models.py
@@ -167,7 +167,7 @@ def test_module():
         ),
     ],
 )
-def test_validation(request_yaml: str, paths: List[str], error_path: str):
+def test_validation(request_yaml: str, paths: list[str], error_path: str):
     try:
         request = GraphRequest.model_validate(yaml.safe_load(strip(request_yaml)))
     except ValidationError as error:
diff --git a/tests/archive/test_archive.py b/tests/archive/test_archive.py
index 36e604641c2a9197f4581f6e61872a43a01671a4..0d972d4ff9a887c25045db690f50a68cb3a92a47 100644
--- a/tests/archive/test_archive.py
+++ b/tests/archive/test_archive.py
@@ -56,7 +56,7 @@ from nomad.utils.exampledata import ExampleData
 
 
 def create_example_uuid(index: int = 0):
-    return ('{:%dd}' % utils.default_hash_len).format(index)
+    return f'{index:{utils.default_hash_len}d}'
 
 
 @pytest.fixture(scope='session')
@@ -221,7 +221,7 @@ def test_read_archive_multi(monkeypatch, example_uuid, example_entry, use_blocke
             assert float(i) == entry['large_list'][i]
 
 
-test_query_example: Dict[Any, Any] = {
+test_query_example: dict[Any, Any] = {
     'c1': {
         's1': {'ss1': [{'p1': 1.0, 'p2': 'x'}, {'p1': 1.5, 'p2': 'y'}]},
         's2': [{'p1': ['a', 'b'], 'p2': True}],
@@ -1117,8 +1117,8 @@ def assert_required_results(
     results: dict,
     required: dict,
     archive: MSection,
-    current_results: Union[dict, str] = None,
-    current_archive_serialized: Union[str, dict] = None,
+    current_results: dict | str = None,
+    current_archive_serialized: str | dict = None,
 ):
     """
     Asserts if the resulting dict from a :class:`RequiredReader` contains everything that
diff --git a/tests/archive/test_storage.py b/tests/archive/test_storage.py
index 72d9f241664c2647b581f3591f5394e79ac2c562..4a2de3dbf6bce3c2e3b9ab8bfd8553060b55f3f7 100644
--- a/tests/archive/test_storage.py
+++ b/tests/archive/test_storage.py
@@ -49,11 +49,11 @@ def find_all_paths(json_obj, path=None, paths_list=None):
     if paths_list is None:
         paths_list = []
 
-    if isinstance(json_obj, (dict, ArchiveDict)):
+    if isinstance(json_obj, dict | ArchiveDict):
         for key, value in json_obj.items():
             new_path = [key] if not path else path + [key]
             find_all_paths(value, new_path, paths_list)
-    elif isinstance(json_obj, (list, ArchiveList)):
+    elif isinstance(json_obj, list | ArchiveList):
         for index, value in enumerate(json_obj):
             new_path = [index] if not path else path + [index]
             find_all_paths(value, new_path, paths_list)
diff --git a/tests/conftest.py b/tests/conftest.py
index 9cc503438128a6feb9f50f2ca239b94894c71ef5..bea0af18be93252b6f5715d635e09546e75e1e83 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -291,7 +291,7 @@ def central_logstash_mock(monkeypatch):
                 try:
                     line = self.rfile.readline()
                     # print(f'received {line=}')
-                except socket.timeout:
+                except TimeoutError:
                     # print(f'server timed out')
                     line = b''  # if time out, close connection
 
@@ -365,7 +365,7 @@ class MockFileManager:
     def read(self, name):
         name = os.path.abspath(name)
         if name not in self.files:
-            raise IOError(2, "No such file or directory: '%s'" % name)
+            raise OSError(2, f"No such file or directory: '{name}'")
 
         return self.files[name].getvalue()
 
diff --git a/tests/data/schemas/nomadschemaexample/schema.py b/tests/data/schemas/nomadschemaexample/schema.py
index 6d93b8d258549b38265c622ede60334f594baa69..2bf501f0067643332f9dd9d0f98312a11d090b1e 100644
--- a/tests/data/schemas/nomadschemaexample/schema.py
+++ b/tests/data/schemas/nomadschemaexample/schema.py
@@ -96,7 +96,7 @@ class MySchema(EntryData):
     child_recursive = SubSection(section_def=MySectionRecursiveA)
 
     def normalize(self, archive, logger):
-        super(MySchema, self).normalize(archive, logger)
+        super().normalize(archive, logger)
 
 
 m_package.__init_metainfo__()
diff --git a/tests/datamodel/metainfo/test_substance.py b/tests/datamodel/metainfo/test_substance.py
index c30135d817e1b39d5a4865f490058084d5233e1c..78c63e39a54c7ba8257f95167652b4698bb1b8a3 100644
--- a/tests/datamodel/metainfo/test_substance.py
+++ b/tests/datamodel/metainfo/test_substance.py
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-from typing import Iterable
+from collections.abc import Iterable
 import pytest
 import json
 
@@ -39,9 +39,7 @@ class MockResponse:
 
 @pytest.fixture(scope='function')
 def external_api_fixture(monkeypatch):
-    with open(
-        'tests/data/datamodel/metainfo/external_api_mock_response.json', 'r'
-    ) as fp:
+    with open('tests/data/datamodel/metainfo/external_api_mock_response.json') as fp:
         mock_responses = json.load(fp)
 
     def pub_chem_api_get_properties(cid: int, properties: Iterable[str]):
diff --git a/tests/datamodel/test_context.py b/tests/datamodel/test_context.py
index 267e71aed435818a9c2d5628660b6cffffb6e417..410c385b5d0fea5d2679321c74e5dafaaf2556d4 100644
--- a/tests/datamodel/test_context.py
+++ b/tests/datamodel/test_context.py
@@ -439,7 +439,7 @@ def test_server_external_schema(upload1_contents, upload2_contents, raw_files_fu
     for index, (file_name, content) in enumerate(upload1_contents.items()):
         if not re.match(r'.*.archive.json', file_name):
             continue
-        entry_id = 'upload1_entry{}'.format(index)
+        entry_id = f'upload1_entry{index}'
         archive = EntryArchive(
             m_context=context1,
             metadata=EntryMetadata(
@@ -462,7 +462,7 @@ def test_server_external_schema(upload1_contents, upload2_contents, raw_files_fu
     parser = ArchiveParser()
 
     for index, (file_name, content) in enumerate(upload2_contents.items()):
-        entry_id = 'upload2_entry{}'.format(index)
+        entry_id = f'upload2_entry{index}'
         archive = EntryArchive(
             m_context=context2,
             metadata=EntryMetadata(
@@ -491,7 +491,7 @@ def test_client_custom_schema(api_v1, published_wo_user_metadata):
         published_wo_user_metadata.upload_id, f'examples_template/template.json'
     )
 
-    with open(full_path, 'r') as f:
+    with open(full_path) as f:
         text = f.read().replace(
             '/run/0',
             f'{url}/uploads/{published_wo_user_metadata.upload_id}/archive/{entry_id}#/run/0',
@@ -572,7 +572,7 @@ def test_client_external_schema(
 
     parser = ArchiveParser()
     for index, (file_name, content) in enumerate(referencing_upload_contents.items()):
-        entry_id = 'upload2_entry{}'.format(index)
+        entry_id = f'upload2_entry{index}'
         archive = EntryArchive(
             m_context=context2,
             metadata=EntryMetadata(
diff --git a/tests/examples/test_docs.py b/tests/examples/test_docs.py
index 46c43ed8ed9eed3c273294472191dfc6030bef1b..e8b737a9fbc0a21a85cd8e5d7893d0b0a5080697 100644
--- a/tests/examples/test_docs.py
+++ b/tests/examples/test_docs.py
@@ -14,7 +14,7 @@ def _file(path):
 
 
 def _load_yaml(path):
-    with open(_file(path), 'rt') as f:
+    with open(_file(path)) as f:
         return yaml.safe_load(f)
 
 
diff --git a/tests/fixtures/data.py b/tests/fixtures/data.py
index 190bdde29598d2cef300e154d7dba0377d61ef96..d974026e243d981642b9701349b7d5257dda528a 100644
--- a/tests/fixtures/data.py
+++ b/tests/fixtures/data.py
@@ -31,7 +31,7 @@ from tests.utils import (
 
 
 @pytest.fixture(scope='session')
-def example_mainfile() -> Tuple[str, str]:
+def example_mainfile() -> tuple[str, str]:
     return ('parsers/template', 'tests/data/templates/template.json')
 
 
@@ -82,7 +82,7 @@ def internal_example_user_metadata(example_user_metadata) -> dict:
 
 
 @pytest.fixture(scope='session')
-def parsed(example_mainfile: Tuple[str, str]) -> EntryArchive:
+def parsed(example_mainfile: tuple[str, str]) -> EntryArchive:
     """Provides a parsed entry in the form of an EntryArchive."""
     parser, mainfile = example_mainfile
     return test_parsing.run_singular_parser(parser, mainfile)
@@ -103,7 +103,7 @@ def normalized(parsed: EntryArchive) -> EntryArchive:
 
 
 @pytest.fixture(scope='function')
-def uploaded(example_upload: str, raw_files_function) -> Tuple[str, str]:
+def uploaded(example_upload: str, raw_files_function) -> tuple[str, str]:
     """
     Provides a uploaded with uploaded example file and gives the upload_id.
     Clears files after test.
@@ -115,7 +115,7 @@ def uploaded(example_upload: str, raw_files_function) -> Tuple[str, str]:
 @pytest.fixture(scope='function')
 def non_empty_uploaded(
     non_empty_example_upload: str, raw_files_function
-) -> Tuple[str, str]:
+) -> tuple[str, str]:
     example_upload_id = os.path.basename(non_empty_example_upload).replace('.zip', '')
     return example_upload_id, non_empty_example_upload
 
@@ -212,7 +212,7 @@ def oasis_publishable_upload(
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
 def processed(
-    uploaded: Tuple[str, str], user1: User, proc_infra, mails
+    uploaded: tuple[str, str], user1: User, proc_infra, mails
 ) -> processing.Upload:
     """
     Provides a processed upload. Upload was uploaded with user1.
@@ -224,12 +224,11 @@ def processed(
 @pytest.fixture(scope='function')
 def processeds(
     non_empty_example_upload: str, user1: User, proc_infra
-) -> List[processing.Upload]:
-    result: List[processing.Upload] = []
+) -> list[processing.Upload]:
+    result: list[processing.Upload] = []
     for i in range(2):
-        upload_id = '%s_%d' % (
-            os.path.basename(non_empty_example_upload).replace('.zip', ''),
-            i,
+        upload_id = (
+            f'{os.path.basename(non_empty_example_upload).replace(".zip", "")}_{i}'
         )
         result.append(
             test_processing.run_processing((upload_id, non_empty_example_upload), user1)
@@ -241,7 +240,7 @@ def processeds(
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
 def non_empty_processed(
-    non_empty_uploaded: Tuple[str, str], user1: User, proc_infra
+    non_empty_uploaded: tuple[str, str], user1: User, proc_infra
 ) -> processing.Upload:
     """
     Provides a processed upload. Upload was uploaded with user1.
@@ -358,9 +357,9 @@ def example_data(
         upload_id='id_published', upload_name='name_published', published=True
     )
     for i in range(1, 24):
-        entry_id = 'id_%02d' % i
-        material_id = 'id_%02d' % (int(math.floor(i / 4)) + 1)
-        mainfile = 'test_content/subdir/test_entry_%02d/mainfile.json' % i
+        entry_id = f'id_{i:02d}'
+        material_id = f'id_{int(math.floor(i / 4)) + 1:02d}'
+        mainfile = f'test_content/subdir/test_entry_{i:02d}/mainfile.json'
         kwargs = dict(
             optimade=OptimadeEntry(nelements=2, elements=['H', 'O']),
         )
diff --git a/tests/fixtures/group_uploads.py b/tests/fixtures/group_uploads.py
index 9b03ae9e69db4eab8df5d639c96cb4cd848a252c..b0d8109c89dfc6a64e179ae0e36233d505ff6331 100644
--- a/tests/fixtures/group_uploads.py
+++ b/tests/fixtures/group_uploads.py
@@ -10,7 +10,7 @@ Values:
 - XgKLgMN: field X with groupKL and groupMN
 """
 
-from typing import Sequence
+from collections.abc import Sequence
 import pytest
 
 from nomad.utils.exampledata import ExampleData
diff --git a/tests/fixtures/infrastructure.py b/tests/fixtures/infrastructure.py
index 29d53ca8a7067d3eba653de3a2d0ddd3356c2566..d2840f2fd39afe12321cdcf26424d62f94438057 100644
--- a/tests/fixtures/infrastructure.py
+++ b/tests/fixtures/infrastructure.py
@@ -160,7 +160,7 @@ def clear_elastic_infra():
     from elasticsearch_dsl import connections
 
     connection = connections.create_connection(
-        hosts=['%s:%d' % (config.elastic.host, config.elastic.port)]
+        hosts=[f'{config.elastic.host}:{config.elastic.port}']
     )
 
     for index in indices:
diff --git a/tests/metainfo/test_elasticsearch_extension.py b/tests/metainfo/test_elasticsearch_extension.py
index 0dd64730b87d688b761ca2beb02ab5ac2d8a03cb..d1e4b04da5ada696fbd9a5a10531dfa417c21b65 100644
--- a/tests/metainfo/test_elasticsearch_extension.py
+++ b/tests/metainfo/test_elasticsearch_extension.py
@@ -270,7 +270,7 @@ def assert_entry_indexed(entry: Entry):
     )
 
 
-def assert_entries_indexed(entries: List[Entry]):
+def assert_entries_indexed(entries: list[Entry]):
     """
     Assert that the given entries and only the given entries and their materials are
     indexed.
diff --git a/tests/metainfo/test_quantities.py b/tests/metainfo/test_quantities.py
index feed120d44403ba91164ef537ccdcc832f3bd000..ad39b0e06313fce8e2dfffb4b6845d420cd79adb 100644
--- a/tests/metainfo/test_quantities.py
+++ b/tests/metainfo/test_quantities.py
@@ -233,7 +233,7 @@ def test_complex_number(unit, quantity_type, value, shape):
 
     def assert_complex_equal():
         result = section.quantity.m if unit else section.quantity
-        if isinstance(value, (list, np.ndarray)):
+        if isinstance(value, list | np.ndarray):
             for a, b in zip(result, value):
                 assert a == quantity_type(b)
         elif not isinstance(value, pint.Quantity):
diff --git a/tests/metainfo/test_to_dict.py b/tests/metainfo/test_to_dict.py
index dc698f47ec90ecfa93bc09cdfd158b87f78f3034..1069001b29f14a2d5395dcd80cc2a4b2a559f687 100644
--- a/tests/metainfo/test_to_dict.py
+++ b/tests/metainfo/test_to_dict.py
@@ -347,7 +347,7 @@ def test_schema_definition_id(schema_yaml):
     pkg.init_metainfo()
 
     def check_dict(value):
-        if not isinstance(value, (dict, list)):
+        if not isinstance(value, dict | list):
             return value
         if isinstance(value, list):
             return [check_dict(v) for v in value]
diff --git a/tests/normalizing/conftest.py b/tests/normalizing/conftest.py
index 7a5596fa7bd662b1a0c84f0068b8277059325982..2ac967401754e234cab41f6c1d59001ad845bbb7 100644
--- a/tests/normalizing/conftest.py
+++ b/tests/normalizing/conftest.py
@@ -17,7 +17,8 @@
 #
 from collections import defaultdict
 import numpy as np
-from typing import Any, Iterable, List, Union
+from typing import Any, List, Union
+from collections.abc import Iterable
 import pytest
 from ase import Atoms
 import ase.build
@@ -277,8 +278,8 @@ def robust_compare(a: Any, b: Any) -> bool:
     elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
         return np.array_equal(a, b)
     # Check if both are non-iterables (e.g., int, float)
-    elif not isinstance(a, (np.ndarray, list, tuple)) and not isinstance(
-        b, (np.ndarray, list, tuple)
+    elif not isinstance(a, np.ndarray | list | tuple) and not isinstance(
+        b, np.ndarray | list | tuple
     ):
         return a == b
     # Fallback case if one is an iterable and the other is not
@@ -308,11 +309,11 @@ def check_template_active_orbitals(template: EntryArchive, **kwargs) -> dict[str
                     continue  # skip quantities that were unasked for
                 # Assuming quantity_value is either a list or a single data object
                 if not isinstance(
-                    quantity_value, (list, np.ndarray)
+                    quantity_value, list | np.ndarray
                 ):  # for multiple core holes
                     quantity_value = [quantity_value]
                 extracted_values = getattr(top.active_orbitals, quantity_name, [])
-                if not isinstance(extracted_values, (list, np.ndarray)):
+                if not isinstance(extracted_values, list | np.ndarray):
                     extracted_values = [extracted_values]
                 # run over all reference values
                 if len(extracted_values) != len(quantity_value):
@@ -445,10 +446,10 @@ def get_section_system(atoms: Atoms):
 
 def add_template_dos(
     template: EntryArchive,
-    fill: List = [[[0, 1], [2, 3]]],
-    energy_reference_fermi: Union[float, None] = None,
-    energy_reference_highest_occupied: Union[float, None] = None,
-    energy_reference_lowest_unoccupied: Union[float, None] = None,
+    fill: list = [[[0, 1], [2, 3]]],
+    energy_reference_fermi: float | None = None,
+    energy_reference_highest_occupied: float | None = None,
+    energy_reference_lowest_unoccupied: float | None = None,
     n_values: int = 101,
     type: str = 'electronic',
 ) -> EntryArchive:
@@ -498,10 +499,10 @@ def add_template_dos(
 
 
 def get_template_dos(
-    fill: List = [[[0, 1], [2, 3]]],
-    energy_reference_fermi: Union[float, None] = None,
-    energy_reference_highest_occupied: Union[float, None] = None,
-    energy_reference_lowest_unoccupied: Union[float, None] = None,
+    fill: list = [[[0, 1], [2, 3]]],
+    energy_reference_fermi: float | None = None,
+    energy_reference_highest_occupied: float | None = None,
+    energy_reference_lowest_unoccupied: float | None = None,
     n_values: int = 101,
     type: str = 'electronic',
     normalize: bool = True,
@@ -523,7 +524,7 @@ def get_template_dos(
 
 def add_template_band_structure(
     template: EntryArchive,
-    band_gaps: List = None,
+    band_gaps: list = None,
     type: str = 'electronic',
     has_references: bool = True,
     has_reciprocal_cell: bool = True,
@@ -548,9 +549,9 @@ def add_template_band_structure(
         bs = runschema.calculation.BandStructure()
         scc.band_structure_electronic.append(bs)
         n_spin_channels = len(band_gaps)
-        fermi: List[float] = []
-        highest: List[float] = []
-        lowest: List[float] = []
+        fermi: list[float] = []
+        highest: list[float] = []
+        lowest: list[float] = []
         for gap in band_gaps:
             if gap is None:
                 highest.append(0)
@@ -573,10 +574,8 @@ def add_template_band_structure(
     full_space = np.linspace(0, 2 * np.pi, 200)
     k, m = divmod(len(full_space), n_segments)
     space = list(
-        (
-            full_space[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)]
-            for i in range(n_segments)
-        )
+        full_space[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)]
+        for i in range(n_segments)
     )
     for i_seg in range(n_segments):
         krange = space[i_seg]
@@ -610,7 +609,7 @@ def add_template_band_structure(
 
 
 def get_template_band_structure(
-    band_gaps: List = None,
+    band_gaps: list = None,
     type: str = 'electronic',
     has_references: bool = True,
     has_reciprocal_cell: bool = True,
@@ -1675,12 +1674,12 @@ def create_system(
     structural_type: str,
     dimensionality: str,
     building_block: str,
-    elements: List[str],
+    elements: list[str],
     formula_hill: str,
     formula_reduced: str,
     formula_anonymous: str,
     system_relation: Relation,
-    indices: List[List[int]] = None,
+    indices: list[list[int]] = None,
     material_id: str = None,
     atoms: ArchiveSection = None,
     cell: Cell = None,
@@ -1766,7 +1765,7 @@ def rattle(atoms):
     return atoms
 
 
-def single_cu_surface_topology() -> List[ResultSystem]:
+def single_cu_surface_topology() -> list[ResultSystem]:
     """Copper surface topology"""
     conv_cell = conv_fcc('Cu')
     surface = surf(conv_cell, (1, 0, 0))
@@ -1815,7 +1814,7 @@ def single_cu_surface_topology() -> List[ResultSystem]:
     return [subsystem, convsystem]
 
 
-def single_cr_surface_topology() -> List[ResultSystem]:
+def single_cr_surface_topology() -> list[ResultSystem]:
     """Cr surface topology"""
     conv_cell = conv_bcc('Cr')
     surface = surf(conv_cell, (1, 0, 0))
@@ -1863,7 +1862,7 @@ def single_cr_surface_topology() -> List[ResultSystem]:
     return [subsystem, convsystem]
 
 
-def single_ni_surface_topology() -> List[ResultSystem]:
+def single_ni_surface_topology() -> list[ResultSystem]:
     """Ni surface topology"""
     conv_cell = conv_fcc('Ni')
     surface = surf(conv_cell, (1, 0, 0))
@@ -1911,7 +1910,7 @@ def single_ni_surface_topology() -> List[ResultSystem]:
     return [subsystem, convsystem]
 
 
-def stacked_cu_ni_surface_topology() -> List[ResultSystem]:
+def stacked_cu_ni_surface_topology() -> list[ResultSystem]:
     topologies_cu = single_cu_surface_topology()
     topologies_ni = single_ni_surface_topology()
 
@@ -1942,7 +1941,7 @@ def graphene() -> Atoms:
     return system_c
 
 
-def graphene_topology() -> List[ResultSystem]:
+def graphene_topology() -> list[ResultSystem]:
     """Graphene topology"""
     subsystem = create_system(
         label='subsystem',
@@ -2020,7 +2019,7 @@ def boron_nitride() -> Atoms:
     return bn_16
 
 
-def boron_nitride_topology() -> List[ResultSystem]:
+def boron_nitride_topology() -> list[ResultSystem]:
     """Boron nitride topology"""
     subsystem = create_system(
         label='subsystem',
@@ -2099,7 +2098,7 @@ def mos2() -> Atoms:
     return stacked_2d_mos2_2
 
 
-def mos2_topology() -> List[ResultSystem]:
+def mos2_topology() -> list[ResultSystem]:
     subsystem = create_system(
         label='subsystem',
         structural_type='2D',
diff --git a/tests/normalizing/test_topology.py b/tests/normalizing/test_topology.py
index 3d3983e7399cd91a33ae6e3248dbdf0de7a67df2..7dc058c0834204f2ffc892647a13aacf3c91596f 100644
--- a/tests/normalizing/test_topology.py
+++ b/tests/normalizing/test_topology.py
@@ -315,7 +315,7 @@ def test_topology_2d(surface, ref_topologies):
             assert real_value.magnitude == pytest.approx(
                 ref_value.magnitude, rel=0.01, abs=0
             )
-        elif isinstance(ref_value, (np.ndarray, list)):
+        elif isinstance(ref_value, np.ndarray | list):
             real_array = np.array(real_value)
             ref_array = np.array(ref_value)
             if ref_array.dtype == bool:
diff --git a/tests/parsing/test_archive_parser.py b/tests/parsing/test_archive_parser.py
index 1ac3b157431bbf02ef0bc4ea01ce2f36b7b982a3..b9666aa564265de69aeb7d5bb64f9dcada4f8245 100644
--- a/tests/parsing/test_archive_parser.py
+++ b/tests/parsing/test_archive_parser.py
@@ -44,7 +44,7 @@ def test_archive_parser(raw_files_function):
     }
 
     mainfile = os.path.join(config.fs.tmp, 'test_mainfile.archive.json')
-    with open(mainfile, 'wt') as f:
+    with open(mainfile, 'w') as f:
         json.dump(archive_data, f)
 
     archive = EntryArchive()
diff --git a/tests/parsing/test_parsing.py b/tests/parsing/test_parsing.py
index 9a2047ac8f79ebf933763f82181cad5acb3af29c..adfbece7378ccc90039d204d6b9a42ca1e502ba5 100644
--- a/tests/parsing/test_parsing.py
+++ b/tests/parsing/test_parsing.py
@@ -160,7 +160,7 @@ def parse_file(parser_name_and_mainfile) -> EntryArchive:
     return run_singular_parser(parser_name, mainfile)
 
 
-@pytest.fixture(params=parser_examples, ids=lambda spec: '%s-%s' % spec)
+@pytest.fixture(params=parser_examples, ids=lambda spec: '{}-{}'.format(*spec))
 def parsed_example(request) -> EntryArchive:
     parser_name, mainfile = request.param
     result = run_singular_parser(parser_name, mainfile)
@@ -290,10 +290,7 @@ def test_match(
             matched_mainfiles[mainfile] = parser
 
     assert len(matched_mainfiles) >= num_output_files, ', '.join(
-        [
-            '%s: %s' % (parser.name, mainfile)
-            for mainfile, parser in matched_mainfiles.items()
-        ]
+        [f'{parser.name}: {mainfile}' for mainfile, parser in matched_mainfiles.items()]
     )
 
 
diff --git a/tests/parsing/test_tabular.py b/tests/parsing/test_tabular.py
index 04af3aa4922ac146af0753c95f5620a80d240a48..c1554851b48f49e518c3955794a164ecaca2fe12 100644
--- a/tests/parsing/test_tabular.py
+++ b/tests/parsing/test_tabular.py
@@ -907,13 +907,13 @@ def get_files(schema=None, content=None):
     """
     if schema:
         schema_file = os.path.join(config.fs.tmp, 'my_schema.archive.yaml')
-        with open(schema_file, 'wt') as f:
+        with open(schema_file, 'w') as f:
             f.write(schema)
     else:
         schema_file = None
     if content:
         mainfile = os.path.join(config.fs.tmp, 'test.my_schema.archive.csv')
-        with open(mainfile, 'wt') as f:
+        with open(mainfile, 'w') as f:
             f.write(content)
     else:
         mainfile = None
diff --git a/tests/processing/test_base.py b/tests/processing/test_base.py
index d2704e91fbb8b33f1eff240e9b99faed575788ea..3a3384ddc1f2c450ac1f5664c89f1ff566febc34 100644
--- a/tests/processing/test_base.py
+++ b/tests/processing/test_base.py
@@ -36,7 +36,7 @@ random.seed(0)
 
 
 fail = 'FAIL'
-events: List[str] = []
+events: list[str] = []
 
 
 @pytest.fixture(scope='function')
@@ -54,7 +54,7 @@ def assert_proc(
     assert not proc.process_running
 
 
-def assert_events(expected_events: List[Union[str, List[str]]]):
+def assert_events(expected_events: list[str | list[str]]):
     ind = 0
     for expected in expected_events:
         if isinstance(expected, str):
@@ -205,8 +205,8 @@ class ParentProc(Proc):
         fail_spawn: bool = False,
         suffix: str = '',
         delay=0.1,
-        child_args: List[Any] = [],
-        join_args: List[Any] = [],
+        child_args: list[Any] = [],
+        join_args: list[Any] = [],
     ):
         """
         Arguments:
diff --git a/tests/processing/test_data.py b/tests/processing/test_data.py
index 105f56b92e6f25f29de2e807428583b017232efa..121dc06fbfd1613b5cc187bd56a258617973d009 100644
--- a/tests/processing/test_data.py
+++ b/tests/processing/test_data.py
@@ -16,7 +16,8 @@
 # limitations under the License.
 #
 
-from typing import Generator, Tuple, Dict
+from typing import Tuple, Dict
+from collections.abc import Generator
 import pytest
 import os.path
 import re
@@ -79,7 +80,7 @@ class TestBatch(EntryData):
     )
 
     def normalize(self, archive, logger):
-        super(TestBatch, self).normalize(archive, logger)
+        super().normalize(archive, logger)
         if not self.n_samples:
             return
         sample_refs = []
@@ -139,14 +140,14 @@ def mongo_forall(mongo_function):
 @pytest.fixture
 def uploaded_id_with_warning(
     raw_files_function,
-) -> Generator[Tuple[str, str], None, None]:
+) -> Generator[tuple[str, str], None, None]:
     example_file = 'tests/data/proc/examples_with_warning_template.zip'
     example_upload_id = os.path.basename(example_file).replace('.zip', '')
 
     yield example_upload_id, example_file
 
 
-def run_processing(uploaded: Tuple[str, str], main_author, **kwargs) -> Upload:
+def run_processing(uploaded: tuple[str, str], main_author, **kwargs) -> Upload:
     uploaded_id, uploaded_path = uploaded
     upload = Upload.create(upload_id=uploaded_id, main_author=main_author, **kwargs)
     assert upload.process_status == ProcessStatus.READY
@@ -349,7 +350,7 @@ def test_republish(
 
 @pytest.mark.timeout(config.tests.default_timeout)
 def test_publish_failed(
-    non_empty_uploaded: Tuple[str, str],
+    non_empty_uploaded: tuple[str, str],
     internal_example_user_metadata,
     user1,
     monkeypatch,
@@ -869,9 +870,7 @@ def mock_failure(cls, function_name, monkeypatch):
 
     mock.__name__ = function_name
 
-    monkeypatch.setattr(
-        'nomad.processing.data.%s.%s' % (cls.__name__, function_name), mock
-    )
+    monkeypatch.setattr(f'nomad.processing.data.{cls.__name__}.{function_name}', mock)
 
 
 @pytest.mark.parametrize(
@@ -974,7 +973,7 @@ def test_parent_child_parser(proc_infra, user1, tmp):
             mainfile: str,
             archive: EntryArchive,
             logger=None,
-            child_archives: Dict[str, EntryArchive] = None,
+            child_archives: dict[str, EntryArchive] = None,
         ):
             archive.metadata.comment = 'parent'
             for mainfile_key, child_archive in child_archives.items():
diff --git a/tests/test_client.py b/tests/test_client.py
index 18e07809c0ed77a2d157fbf727f96704ba96f609..75956e8a604f67efc7dbdb7fdb0e54e2725a3037 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -42,7 +42,7 @@ from tests.processing import test_data as test_processing
 
 
 def assert_results(
-    results: List[MSection], sub_section_defs: List[SubSection] = None, total=1
+    results: list[MSection], sub_section_defs: list[SubSection] = None, total=1
 ):
     assert len(results) == total
     for result in results:
@@ -62,11 +62,11 @@ def assert_results(
 
 
 @pytest.fixture(scope='function')
-def many_uploads(non_empty_uploaded: Tuple[str, str], user1: User, proc_infra):
+def many_uploads(non_empty_uploaded: tuple[str, str], user1: User, proc_infra):
     _, upload_file = non_empty_uploaded
     for index in range(0, 4):
         upload = test_processing.run_processing(
-            ('test_upload_%d' % index, upload_file), user1
+            (f'test_upload_{index}', upload_file), user1
         )
         upload.publish_upload()  # pylint: disable=no-member
         try:
@@ -166,9 +166,9 @@ def load_example(path: str):
     expected_path = os.path.join(
         current_dir, 'examples', 'data', 'json_transformer', 'expected.json'
     )
-    with open(rules_path, 'r') as file:
+    with open(rules_path) as file:
         rules_data = json.load(file)
-    with open(expected_path, 'r') as f:
+    with open(expected_path) as f:
         expected = json.load(f)
 
     transformation_dict = {}
diff --git a/tests/test_config.py b/tests/test_config.py
index b76767d798928bf0a31fd61f8abdc8cf528c8d20..7cc0f5070576d2c59b4fed4bd1acf8cebadb24e2 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -332,7 +332,7 @@ def test_parser_plugins():
     parsers = [
         entry_point
         for entry_point in config.plugins.entry_points.options.values()
-        if isinstance(entry_point, (Parser, ParserEntryPoint))
+        if isinstance(entry_point, Parser | ParserEntryPoint)
     ]
     assert len(parsers) == 72
 
diff --git a/tests/test_files.py b/tests/test_files.py
index 07c9ecdc3322e5156a8d63dbc19b86cf3d6cf70b..77fc88fcf47e91ed4585cc9cc6df94a769059e7f 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -16,7 +16,8 @@
 # limitations under the License.
 #
 
-from typing import Generator, Any, Dict, Tuple, Iterable, List, Union
+from typing import Any, Dict, Tuple, List, Union
+from collections.abc import Generator, Iterable
 from datetime import datetime
 import os
 import os.path
@@ -40,10 +41,10 @@ from nomad.files import StagingUploadFiles, PublicUploadFiles, UploadFiles
 from nomad.processing import Upload
 
 
-EntryWithFiles = Tuple[datamodel.EntryMetadata, str]
-UploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], UploadFiles]
-StagingUploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], StagingUploadFiles]
-PublicUploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], PublicUploadFiles]
+EntryWithFiles = tuple[datamodel.EntryMetadata, str]
+UploadWithFiles = tuple[str, list[datamodel.EntryMetadata], UploadFiles]
+StagingUploadWithFiles = tuple[str, list[datamodel.EntryMetadata], StagingUploadFiles]
+PublicUploadWithFiles = tuple[str, list[datamodel.EntryMetadata], PublicUploadFiles]
 
 # example_file uses an artificial parser for faster test execution, can also be
 # changed to examples_vasp.zip for using vasp parser
@@ -129,7 +130,7 @@ class TestObjects:
         assert os.path.exists(os.path.dirname(file.os_path)) == create
 
 
-example_entry: Dict[str, Any] = {
+example_entry: dict[str, Any] = {
     'entry_id': '0',
     'mainfile': 'examples_template/template.json',
     'data': 'value',
@@ -145,7 +146,7 @@ def generate_example_entry(
     example_entry = datamodel.EntryMetadata(domain='dft', entry_id=str(entry_id))
 
     if with_mainfile_prefix:
-        mainfile = '%d.template.json' % entry_id
+        mainfile = f'{entry_id}.template.json'
     else:
         mainfile = 'template.json'
 
@@ -162,7 +163,7 @@ def generate_example_entry(
             filename = os.path.basename(filepath)
             arcname = filename
             if arcname == 'template.json' and with_mainfile_prefix:
-                arcname = '%d.template.json' % entry_id
+                arcname = f'{entry_id}.template.json'
 
             if subdirectory is not None:
                 arcname = os.path.join(subdirectory, arcname)
@@ -685,8 +686,8 @@ def assert_upload_files(
 
 
 def create_test_upload_files(
-    upload_id: Union[str, None],
-    archives: Union[List[datamodel.EntryArchive], None] = None,
+    upload_id: str | None,
+    archives: list[datamodel.EntryArchive] | None = None,
     published: bool = True,
     embargo_length: int = 0,
     raw_files: str = None,
@@ -790,8 +791,8 @@ def test_test_upload_files(raw_files_infra):
     for index in range(0, 3):
         archive = datamodel.EntryArchive()
         metadata = archive.m_create(datamodel.EntryMetadata)
-        metadata.entry_id = 'example_entry_id_%d' % index
-        metadata.mainfile = 'test/test/entry_%d/mainfile_%d.json' % (index, index)
+        metadata.entry_id = f'example_entry_id_{index}'
+        metadata.mainfile = f'test/test/entry_{index}/mainfile_{index}.json'
         archives.append(archive)
 
     upload_files = create_test_upload_files(upload_id, archives, embargo_length=0)
diff --git a/tests/test_logtransfer.py b/tests/test_logtransfer.py
index 1cb171b058722fd1c12a64e5f9582fd393222461..b67262fbeedf50b7db7a447f8e0e58da2374c5fb 100644
--- a/tests/test_logtransfer.py
+++ b/tests/test_logtransfer.py
@@ -31,7 +31,7 @@ def test_logtransfer_handler(log_handler):
     test_logger.info('test event', data='test data')
 
     assert os.path.exists(log_handler.baseFilename)
-    with open(log_handler.baseFilename, 'r') as f:
+    with open(log_handler.baseFilename) as f:
         logs = f.readlines()
 
     assert len(logs) == 1
@@ -68,7 +68,7 @@ def test_transfer_logs(log_handler, monkeypatch, api_v1, central_logstash_mock,
         test_logger.info('test event', data='test data')
 
     assert os.path.exists(log_handler.baseFilename)
-    with open(log_handler.baseFilename, 'r') as f:
+    with open(log_handler.baseFilename) as f:
         logs = f.readlines()
     assert len(logs) == size
 
diff --git a/tests/test_mkdocs.py b/tests/test_mkdocs.py
index 6b6ca47a1c3538a4d65c47ecb5bdf0e1dc98eba6..713c087b6e73458628e1a5105f6a2ef5778e716d 100644
--- a/tests/test_mkdocs.py
+++ b/tests/test_mkdocs.py
@@ -19,7 +19,7 @@
 from enum import Enum
 import pytest
 from typing import Union, List, Dict, Optional
-from typing_extensions import Literal, Annotated  # type: ignore
+from typing import Literal, Annotated  # type: ignore
 from pydantic import BaseModel, Field
 from nomad.config.models.ui import WidgetHistogram, WidgetTerms
 from nomad.mkdocs import (
@@ -54,26 +54,26 @@ class MyIntEnum(int, Enum):
         ),
         pytest.param(Enum, 'Enum', {Enum}, id='class'),
         pytest.param(
-            Optional[WidgetHistogram],
+            Optional[WidgetHistogram],  # noqa
             'Optional[WidgetHistogram, NoneType]',
             {WidgetHistogram, Union, type(None)},
             id='optional-ignored',
         ),
         pytest.param(
-            Union[str, WidgetHistogram],
+            Union[str, WidgetHistogram],  # noqa
             'Union[str, WidgetHistogram]',
             {Union, str, WidgetHistogram},
             id='union',
         ),
         pytest.param(
-            List[Union[str, WidgetHistogram]],
-            'List[Union[str, WidgetHistogram]]',
+            list[Union[str, WidgetHistogram]],  # noqa
+            'list[Union[str, WidgetHistogram]]',
             {list, Union, str, WidgetHistogram},
             id='list-with-union',
         ),
         pytest.param(
-            Dict[str, WidgetHistogram],
-            'Dict[str, WidgetHistogram]',
+            dict[str, WidgetHistogram],
+            'dict[str, WidgetHistogram]',
             {dict, str, WidgetHistogram},
             id='dict',
         ),
@@ -82,12 +82,12 @@ class MyIntEnum(int, Enum):
         pytest.param(MyStrEnum, 'str', {MyStrEnum}, id='enum-string'),
         pytest.param(MyIntEnum, 'int', {MyIntEnum}, id='enum-int'),
         pytest.param(
-            List[
+            list[
                 Annotated[
-                    Union[WidgetTerms, WidgetHistogram], Field(discriminator='type')
+                    Union[WidgetTerms, WidgetHistogram], Field(discriminator='type')  # noqa
                 ]
             ],  # type: ignore
-            'List[Union[WidgetTerms, WidgetHistogram]]',
+            'list[Union[WidgetTerms, WidgetHistogram]]',
             {list, Union, WidgetTerms, WidgetHistogram},
             id='annotated-ignored',
         ),
diff --git a/tests/test_search.py b/tests/test_search.py
index 6f033aff28b7548b2a5045431197e30bb2d1640e..06dd47db9e4621ff0bb0354be80d0c38ad692607 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -18,7 +18,8 @@
 
 import json
 from datetime import datetime
-from typing import Any, Dict, Iterable, List, Sequence, Union
+from typing import Any, Dict, List, Union
+from collections.abc import Iterable, Sequence
 
 import pytest
 
@@ -59,8 +60,8 @@ def split(path):
 
 
 def assert_search_upload(
-    entries: Union[int, Iterable] = -1,
-    additional_keys: List[str] = [],
+    entries: int | Iterable = -1,
+    additional_keys: list[str] = [],
     upload_id: str = None,
     **kwargs,
 ):
@@ -73,7 +74,7 @@ def assert_search_upload(
 
     keys = ['entry_id', 'upload_id', 'mainfile']
     refresh()
-    body: Dict[str, Any] = {}
+    body: dict[str, Any] = {}
     body.update(size=10)
     if upload_id is not None:
         body['query'] = dict(match=dict(upload_id=upload_id))
diff --git a/tests/utils.py b/tests/utils.py
index 4ae5c223df4abd17cb377c0825046a1012656c49..cfd95f936b141015af038242dcb117dbab630fd3 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -66,18 +66,14 @@ def assert_at_least(source, target):
     the same information in target. Additional information in target is ignored.
     """
     for key, value in source.items():
-        assert key in target, '%s with value %s in %s is not in %s' % (
-            key,
-            source[key],
-            source,
-            target,
+        assert key in target, (
+            f'{key} with value {source[key]} in {source} is not in {target}'
         )
         if isinstance(value, dict):
             assert_at_least(value, target[key])
         else:
             assert value == target[key], (
-                '%s with value %s in %s is not equal the target value %s in %s'
-                % (key, source[key], source, target[key], target)
+                f'{key} with value {source[key]} in {source} is not equal the target value {target[key]} in {target}'
             )
 
 
@@ -94,7 +90,7 @@ def assert_url_query_args(url: str, **kwargs):
             assert query_dict[k][0] == str(v)
 
 
-def build_url(base_url: str, query_args: Dict[str, Any]) -> str:
+def build_url(base_url: str, query_args: dict[str, Any]) -> str:
     """
     Takes a base_url and a dictionary, and combines to a url with query arguments.
     Arguments with value None are ignored.
@@ -106,7 +102,7 @@ def build_url(base_url: str, query_args: Dict[str, Any]) -> str:
     return base_url + '?' + urllib.parse.urlencode(query_args_clean, doseq=True)
 
 
-def set_upload_entry_metadata(upload, metadata: Dict[str, Any]):
+def set_upload_entry_metadata(upload, metadata: dict[str, Any]):
     """
     Sets the provided metadata values on all entries of the given upload.
     """
@@ -119,11 +115,11 @@ def set_upload_entry_metadata(upload, metadata: Dict[str, Any]):
 
 def create_template_upload_file(
     tmp,
-    mainfiles: Union[str, List[str]] = None,
+    mainfiles: str | list[str] = None,
     auxfiles: int = 4,
     directory: str = 'examples_template',
     name: str = 'examples_template.zip',
-    more_files: Union[str, List[str]] = None,
+    more_files: str | list[str] = None,
 ):
     """
     Creates a temporary upload.zip file based on template.json (for the artificial test