From 31ddc69a8d08bc55bbe825364d1432e847ca3d4b Mon Sep 17 00:00:00 2001
From: David Sikter <david.sikter@physik.hu-berlin.de>
Date: Tue, 4 Jan 2022 12:22:25 +0100
Subject: [PATCH] refactor terminology usage in important file

---
 .../external_project_upload/upload.py         |   2 +-
 nomad/app/flask/dcat/datasets.py              |   2 +-
 nomad/app/optimade/elasticsearch.py           |   2 +-
 nomad/app/v1/models.py                        |   4 +-
 nomad/app/v1/routers/entries.py               |  14 +-
 nomad/app/v1/routers/uploads.py               |  18 +-
 nomad/archive/partial.py                      |   2 +-
 nomad/cli/admin/admin.py                      |   6 +-
 nomad/cli/admin/clean.py                      |  32 +--
 nomad/cli/admin/entries.py                    |   2 +-
 nomad/cli/admin/uploads.py                    |  66 +++---
 nomad/cli/client/__init__.py                  |   2 +-
 nomad/cli/dev.py                              |   2 +-
 nomad/client/processing.py                    |  16 +-
 nomad/datamodel/__init__.py                   |   6 +-
 nomad/datamodel/datamodel.py                  |  10 +-
 nomad/files.py                                |  84 ++++----
 nomad/infrastructure.py                       |   2 +-
 nomad/normalizing/__init__.py                 |   2 +-
 nomad/parsing/__init__.py                     |   4 +-
 nomad/processing/__init__.py                  |   2 +-
 nomad/processing/data.py                      | 180 ++++++++--------
 nomad/search.py                               |   6 +-
 nomad/utils/__init__.py                       |   6 +-
 nomad/utils/structlogging.py                  |   4 +-
 tests/app/test_optimade.py                    |  12 +-
 tests/app/v1/routers/test_uploads.py          |   6 +-
 tests/conftest.py                             |   6 +-
 tests/processing/test_data.py                 | 195 +++++++++---------
 tests/processing/test_edit_metadata.py        |   6 +-
 tests/test_cli.py                             |  62 +++---
 tests/test_files.py                           | 146 ++++++-------
 tests/utils.py                                |   6 +-
 33 files changed, 449 insertions(+), 466 deletions(-)

diff --git a/examples/uploading/external_project_upload/upload.py b/examples/uploading/external_project_upload/upload.py
index 0d261abed4..42e55a2b6b 100644
--- a/examples/uploading/external_project_upload/upload.py
+++ b/examples/uploading/external_project_upload/upload.py
@@ -32,7 +32,7 @@ while upload['process_running']:
     assert response.status_code == 200
     upload = response.json()['data']
     time.sleep(5)
-    print('processed: %d, failures: %d' % (upload['processed_calcs'], upload['failed_calcs']))
+    print('processed: %d, failures: %d' % (upload['processed_entries_count'], upload['failed_entries_count']))
 
 # check if processing was a success
 if upload['process_status'] != 'SUCCESS':
diff --git a/nomad/app/flask/dcat/datasets.py b/nomad/app/flask/dcat/datasets.py
index c2fafcfa32..d459a160e5 100644
--- a/nomad/app/flask/dcat/datasets.py
+++ b/nomad/app/flask/dcat/datasets.py
@@ -38,7 +38,7 @@ class Dataset(Resource):
         ''' Returns a DCAT dataset for a given NOMAD entry id. '''
         results = search(query=dict(entry_id=entry_id))
         if results.pagination.total == 0:
-            abort(404, message='There is no calculation with id %s' % entry_id)
+            abort(404, message='There is no entry with id %s' % entry_id)
 
         entry = results.data[0]
 
diff --git a/nomad/app/optimade/elasticsearch.py b/nomad/app/optimade/elasticsearch.py
index 0c8f48cbc4..34830d5d43 100644
--- a/nomad/app/optimade/elasticsearch.py
+++ b/nomad/app/optimade/elasticsearch.py
@@ -42,7 +42,7 @@ class StructureResourceAttributesByAlias(StructureResourceAttributes):
     nmd_raw_file_download_url: Optional[str] = OptimadeField(
         None,
         alias='_nmd_raw_file_download_url',
-        description='The url to download all calculation raw files as .zip file.',
+        description='The url to download all entry raw files as .zip file.',
         support=SupportLevel.OPTIONAL)
 
     nmd_archive_url: Optional[str] = OptimadeField(
diff --git a/nomad/app/v1/models.py b/nomad/app/v1/models.py
index 2dd3a19297..27dab7261f 100644
--- a/nomad/app/v1/models.py
+++ b/nomad/app/v1/models.py
@@ -1116,8 +1116,8 @@ class MetadataEditRequest(WithQuery):
             `entries_metadata_key` defines which type of key is used in the dictionary to identify
             the entries. Note, only quantities defined on the entry level can be set using this method.'''))
     entries_key: Optional[str] = Field(
-        default='calc_id', description=strip('''
-            Defines which type of key is used in `entries_metadata`. Default is `calc_id`.'''))
+        default='entry_id', description=strip('''
+            Defines which type of key is used in `entries_metadata`. Default is `entry_id`.'''))
     verify_only: Optional[bool] = Field(
         default=False, description=strip('''
             Do not execute the request, just verifies it and provides detailed feedback on
diff --git a/nomad/app/v1/routers/entries.py b/nomad/app/v1/routers/entries.py
index 1d7de71a6f..ba77f001d9 100644
--- a/nomad/app/v1/routers/entries.py
+++ b/nomad/app/v1/routers/entries.py
@@ -82,7 +82,7 @@ use:
 }
 ```
 
-Or to only get the `energy_total` from each individual calculations, use:
+Or to only get the `energy_total` from each individual entry, use:
 ```json
 {
     "run": {
@@ -1120,16 +1120,16 @@ async def post_entry_archive_query(
 
 
 def edit(query: Query, user: User, mongo_update: Dict[str, Any] = None, re_index=True) -> List[str]:
-    # get all calculations that have to change
+    # get all entries that have to change
     entry_ids: List[str] = []
     upload_ids: Set[str] = set()
     with utils.timer(logger, 'edit query executed'):
         all_entries = _do_exaustive_search(
             owner=Owner.user, query=query, include=['entry_id', 'upload_id'], user=user)
 
-        for entry in all_entries:
-            entry_ids.append(entry['entry_id'])
-            upload_ids.add(entry['upload_id'])
+        for entry_dict in all_entries:
+            entry_ids.append(entry_dict['entry_id'])
+            upload_ids.add(entry_dict['upload_id'])
 
     # perform the update on the mongo db
     with utils.timer(logger, 'edit mongo update executed', size=len(entry_ids)):
@@ -1142,8 +1142,8 @@ def edit(query: Query, user: User, mongo_update: Dict[str, Any] = None, re_index
     with utils.timer(logger, 'edit elastic update executed', size=len(entry_ids)):
         if re_index:
             updated_metadata: List[datamodel.EntryMetadata] = []
-            for calc in proc.Calc.objects(calc_id__in=entry_ids):
-                entry_metadata = calc.mongo_metadata(calc.upload)
+            for entry in proc.Calc.objects(calc_id__in=entry_ids):
+                entry_metadata = entry.mongo_metadata(entry.upload)
                 # Ensure that updated fields are marked as "set", even if they are cleared
                 entry_metadata.m_update_from_dict(mongo_update)
                 # Add to list
diff --git a/nomad/app/v1/routers/uploads.py b/nomad/app/v1/routers/uploads.py
index 48626efad7..83b67c875b 100644
--- a/nomad/app/v1/routers/uploads.py
+++ b/nomad/app/v1/routers/uploads.py
@@ -472,9 +472,9 @@ async def get_upload_entries(
     start = pagination.get_simple_index()
     end = start + pagination.page_size
 
-    # load upload's calcs. Use calc_id as tie breaker for ordering.
-    entries = list(upload.all_calcs(start, end, order_by=(order_by_with_sign, 'calc_id')))
-    failed_calcs = upload.failed_calcs
+    # load upload's entries. Use calc_id as tie breaker for ordering.
+    entries = list(upload.entries_sublist(start, end, order_by=(order_by_with_sign, 'calc_id')))
+    failed_entries_count = upload.failed_entries_count
 
     # load entries's metadata from search
     metadata_entries_query = WithQuery(
@@ -497,13 +497,13 @@ async def get_upload_entries(
         pydantic_entry.entry_metadata = metadata_entries_map.get(entry.entry_id)
         data.append(pydantic_entry)
 
-    pagination_response = PaginationResponse(total=upload.total_calcs, **pagination.dict())
+    pagination_response = PaginationResponse(total=upload.total_entries_count, **pagination.dict())
     pagination_response.populate_simple_index_and_urls(request)
 
     return EntryProcDataQueryResponse(
         pagination=pagination_response,
-        processing_successful=upload.processed_calcs - failed_calcs,
-        processing_failed=failed_calcs,
+        processing_successful=upload.processed_entries_count - failed_entries_count,
+        processing_failed=failed_entries_count,
         upload=_upload_to_pydantic(upload),
         data=data)
 
@@ -527,7 +527,7 @@ async def get_upload_entry(
     Fetches a specific entry for a specific upload.
     '''
     upload = _get_upload_with_read_access(upload_id, user)
-    entry = upload.get_calc(entry_id)
+    entry = upload.get_entry(entry_id)
     if not entry:
         raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strip('''
             An entry by that id could not be found in the specified upload.'''))
@@ -1100,7 +1100,7 @@ async def post_upload_action_publish(
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail='Cannot publish an upload that failed processing.')
-    if upload.processed_calcs == 0:
+    if upload.processed_entries_count == 0:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail='Cannot publish an upload without any resulting entries.')
@@ -1549,7 +1549,7 @@ def _get_upload_with_write_access(
 def _upload_to_pydantic(upload: Upload) -> UploadProcData:
     ''' Converts the mongo db object to an UploadProcData object. '''
     pydantic_upload = UploadProcData.from_orm(upload)
-    pydantic_upload.entries = upload.total_calcs
+    pydantic_upload.entries = upload.total_entries_count
     return pydantic_upload
 
 
diff --git a/nomad/archive/partial.py b/nomad/archive/partial.py
index dca7d67322..c588f62b9d 100644
--- a/nomad/archive/partial.py
+++ b/nomad/archive/partial.py
@@ -121,7 +121,7 @@ def write_partial_archive_to_mongo(archive: EntryArchive):
     ''' Partially writes the given archive to mongodb. '''
     mongo_db = infrastructure.mongo_client[config.mongo.db_name]
     mongo_collection = mongo_db['archive']
-    mongo_id = archive.metadata.calc_id
+    mongo_id = archive.metadata.entry_id
 
     partial_archive_dict = create_partial_archive(archive)
     partial_archive_dict['_id'] = mongo_id
diff --git a/nomad/cli/admin/admin.py b/nomad/cli/admin/admin.py
index 98eaf6717f..c7468f7f72 100644
--- a/nomad/cli/admin/admin.py
+++ b/nomad/cli/admin/admin.py
@@ -47,7 +47,7 @@ def reset(remove, i_am_really_sure):
     infrastructure.reset(remove)
 
 
-@admin.command(help='Reset all "stuck" in processing uploads and calc in low level mongodb operations.')
+@admin.command(help='Reset all uploads and entries "stuck" in processing using level mongodb operations.')
 @click.option('--zero-complete-time', is_flag=True, help='Sets the complete time to epoch zero.')
 def reset_processing(zero_complete_time):
     from datetime import datetime
@@ -111,7 +111,7 @@ def ops():
 #     pass
 
 
-@ops.command(help=('Dump the mongo (calculation metadata) db.'))
+@ops.command(help=('Dump the mongo db.'))
 @click.option('--restore', is_flag=True, help='Do not dump, but restore.')
 def dump(restore: bool):
     from datetime import datetime
@@ -121,7 +121,7 @@ def dump(restore: bool):
         config.mongo.host, config.mongo.port, config.mongo.db_name, date_str))
 
 
-@ops.command(help=('Restore the mongo (calculation metadata) db.'))
+@ops.command(help=('Restore the mongo db.'))
 @click.argument('PATH_TO_DUMP', type=str, nargs=1)
 def restore(path_to_dump):
     print('mongorestore --host {} --port {} --db {} {}'.format(
diff --git a/nomad/cli/admin/clean.py b/nomad/cli/admin/clean.py
index b1bbeac465..06f4f01f61 100644
--- a/nomad/cli/admin/clean.py
+++ b/nomad/cli/admin/clean.py
@@ -23,12 +23,12 @@ from .admin import admin
 
 @admin.command(help='Checks consistency of files and es vs mongo and deletes orphan entries.')
 @click.option('--dry', is_flag=True, help='Do not delete anything, just check.')
-@click.option('--skip-calcs', is_flag=True, help='Skip cleaning calcs with missing uploads.')
+@click.option('--skip-entries', is_flag=True, help='Skip cleaning entries with missing uploads.')
 @click.option('--skip-fs', is_flag=True, help='Skip cleaning the filesystem.')
 @click.option('--skip-es', is_flag=True, help='Skip cleaning the es index.')
 @click.option('--staging-too', is_flag=True, help='Also clean published entries in staging, make sure these files are not due to reprocessing')
 @click.option('--force', is_flag=True, help='Do not ask for confirmation.')
-def clean(dry, skip_calcs, skip_fs, skip_es, staging_too, force):
+def clean(dry, skip_entries, skip_fs, skip_es, staging_too, force):
     import os
     import shutil
     import tabulate
@@ -41,26 +41,26 @@ def clean(dry, skip_calcs, skip_fs, skip_es, staging_too, force):
     mongo_client = infrastructure.setup_mongo()
     infrastructure.setup_elastic()
 
-    if not skip_calcs:
-        uploads_for_calcs = mongo_client[nomad_config.mongo.db_name]['calc'].distinct('upload_id')
+    if not skip_entries:
+        uploads_for_entries = mongo_client[nomad_config.mongo.db_name]['calc'].distinct('upload_id')
         uploads = {}
         for upload in mongo_client[nomad_config.mongo.db_name]['upload'].distinct('_id'):
             uploads[upload] = True
 
         missing_uploads = []
-        for upload_for_calc in uploads_for_calcs:
-            if upload_for_calc not in uploads:
-                missing_uploads.append(upload_for_calc)
+        for upload_for_entry in uploads_for_entries:
+            if upload_for_entry not in uploads:
+                missing_uploads.append(upload_for_entry)
 
         if not dry and len(missing_uploads) > 0:
             if not force:
-                input('Will delete calcs (mongo + es) for %d missing uploads. Press any key to continue ...' % len(missing_uploads))
+                input('Will delete entries (mongo + es) for %d missing uploads. Press any key to continue ...' % len(missing_uploads))
 
             for upload in missing_uploads:
                 mongo_client[nomad_config.mongo.db_name]['calc'].remove(dict(upload_id=upload))
                 elasticsearch_dsl.Search(index=nomad_config.elastic.entries_index).query('term', upload_id=upload).delete()
         else:
-            print('Found %s uploads that have calcs in mongo, but there is no upload entry.' % len(missing_uploads))
+            print('Found %s uploads that have entries in mongo, but there is no upload entry.' % len(missing_uploads))
             print('List first 10:')
             for upload in missing_uploads[:10]:
                 print(upload)
@@ -115,18 +115,18 @@ def clean(dry, skip_calcs, skip_fs, skip_es, staging_too, force):
             for bucket in es_upload_buckets
             if processing.Upload.objects(upload_id=bucket.value).first() is None)
 
-        calcs = 0
-        for _, upload_calcs in to_delete:
-            calcs += upload_calcs
+        entries = 0
+        for _, upload_entries in to_delete:
+            entries += upload_entries
 
         if not dry and len(to_delete) > 0:
             if not force:
                 input(
-                    'Will delete %d calcs in %d uploads from ES. Press any key to continue ...' %
-                    (calcs, len(to_delete)))
+                    'Will delete %d entries in %d uploads from ES. Press any key to continue ...' %
+                    (entries, len(to_delete)))
             for upload_id, _ in to_delete:
                 delete_by_query(owner='all', query=dict(upload_id=upload_id))
         else:
-            print('Found %d calcs in %d uploads from ES with no upload in mongo.' % (calcs, len(to_delete)))
+            print('Found %d entries in %d uploads from ES with no upload in mongo.' % (entries, len(to_delete)))
             print('List first 10:')
-            tabulate.tabulate(to_delete, headers=['id', '#calcs'])
+            tabulate.tabulate(to_delete, headers=['id', '#entries'])
diff --git a/nomad/cli/admin/entries.py b/nomad/cli/admin/entries.py
index a06f649ca3..ddd1eab320 100644
--- a/nomad/cli/admin/entries.py
+++ b/nomad/cli/admin/entries.py
@@ -29,7 +29,7 @@ def entries():
 @entries.command(help='Delete selected entries from mongo and elastic')
 @click.argument('ENTRIES', nargs=-1)
 @click.option('--skip-es', help='Keep the elastic index version of the data.', is_flag=True)
-@click.option('--skip-mongo', help='Keep uploads and calcs in mongo.', is_flag=True)
+@click.option('--skip-mongo', help='Keep uploads and entries in mongo.', is_flag=True)
 def rm(entries, skip_es, skip_mongo):
     from nomad import processing as proc, infrastructure, search
 
diff --git a/nomad/cli/admin/uploads.py b/nomad/cli/admin/uploads.py
index 40653fa429..f0b21d0aa6 100644
--- a/nomad/cli/admin/uploads.py
+++ b/nomad/cli/admin/uploads.py
@@ -144,13 +144,13 @@ def _run_processing(
 @click.option('--outdated', help='Select published uploads with older nomad version', is_flag=True)
 @click.option('--processing', help='Select only processing uploads', is_flag=True)
 @click.option('--processing-failure-uploads', is_flag=True, help='Select uploads with failed processing')
-@click.option('--processing-failure-calcs', is_flag=True, help='Select uploads with calcs with failed processing')
-@click.option('--processing-failure', is_flag=True, help='Select uploads where the upload or any calc has failed processing')
+@click.option('--processing-failure-entries', is_flag=True, help='Select uploads with entries with failed processing')
+@click.option('--processing-failure', is_flag=True, help='Select uploads where the upload or any entry has failed processing')
 @click.option('--processing-incomplete-uploads', is_flag=True, help='Select uploads that have not yet been processed')
-@click.option('--processing-incomplete-calcs', is_flag=True, help='Select uploads where any calc has net yot been processed')
-@click.option('--processing-incomplete', is_flag=True, help='Select uploads where the upload or any calc has not yet been processed')
-@click.option('--processing-necessary', is_flag=True, help='Select uploads where the upload or any calc has either not been processed or processing has failed in the past')
-@click.option('--unindexed', is_flag=True, help='Select uploads that have no calcs in the elastic search index.')
+@click.option('--processing-incomplete-entries', is_flag=True, help='Select uploads where any entry has net yot been processed')
+@click.option('--processing-incomplete', is_flag=True, help='Select uploads where the upload or any entry has not yet been processed')
+@click.option('--processing-necessary', is_flag=True, help='Select uploads where the upload or any entry has either not been processed or processing has failed in the past')
+@click.option('--unindexed', is_flag=True, help='Select uploads that have no entries in the elastic search index.')
 @click.pass_context
 def uploads(ctx, **kwargs):
     ctx.obj.uploads_kwargs = kwargs
@@ -160,9 +160,9 @@ def _query_uploads(
         uploads,
         unpublished: bool, published: bool, processing: bool, outdated: bool,
         uploads_mongo_query: str, entries_mongo_query: str, entries_es_query: str,
-        processing_failure_uploads: bool, processing_failure_calcs: bool,
+        processing_failure_uploads: bool, processing_failure_entries: bool,
         processing_failure: bool, processing_incomplete_uploads: bool,
-        processing_incomplete_calcs: bool, processing_incomplete: bool,
+        processing_incomplete_entries: bool, processing_incomplete: bool,
         processing_necessary: bool, unindexed: bool):
 
     '''
@@ -216,10 +216,10 @@ def _query_uploads(
     if outdated:
         entries_mongo_query_q &= Q(nomad_version={'$ne': config.meta.version})
 
-    if processing_failure_calcs or processing_failure or processing_necessary:
+    if processing_failure_entries or processing_failure or processing_necessary:
         entries_mongo_query_q &= Q(process_status=proc.ProcessStatus.FAILURE)
 
-    if processing_incomplete_calcs or processing_incomplete or processing_necessary:
+    if processing_incomplete_entries or processing_incomplete or processing_necessary:
         entries_mongo_query_q &= Q(process_status__in=proc.ProcessStatus.STATUSES_PROCESSING)
 
     mongo_entry_based_uploads = set(proc.Calc.objects(entries_mongo_query_q).distinct(field="upload_id"))
@@ -260,11 +260,11 @@ def _query_uploads(
 
 @uploads.command(help='List selected uploads')
 @click.argument('UPLOADS', nargs=-1)
-@click.option('-c', '--calculations', is_flag=True, help='Show details about calculations.')
+@click.option('-e', '--entries', is_flag=True, help='Show details about entries.')
 @click.option('--ids', is_flag=True, help='Only show a list of ids.')
 @click.option('--json', is_flag=True, help='Output a JSON array of ids.')
 @click.pass_context
-def ls(ctx, uploads, calculations, ids, json):
+def ls(ctx, uploads, entries, ids, json):
     import tabulate
 
     _, uploads = _query_uploads(uploads, **ctx.obj.uploads_kwargs)
@@ -277,17 +277,17 @@ def ls(ctx, uploads, calculations, ids, json):
             upload.process_status,
             upload.published]
 
-        if calculations:
+        if entries:
             row += [
-                upload.total_calcs,
-                upload.failed_calcs,
-                upload.total_calcs - upload.processed_calcs]
+                upload.total_entries_count,
+                upload.failed_entries_count,
+                upload.total_entries_count - upload.processed_entries_count]
 
         return row
 
     headers = ['id', 'upload_name', 'user', 'process', 'published']
-    if calculations:
-        headers += ['calcs', 'failed', 'processing']
+    if entries:
+        headers += ['entries', 'failed', 'processing']
 
     if ids:
         for upload in uploads:
@@ -304,7 +304,7 @@ def ls(ctx, uploads, calculations, ids, json):
         headers=headers))
 
 
-@uploads.command(help='Change the owner of the upload and all its calcs.')
+@uploads.command(help='Change the owner of the upload and all its entries.')
 @click.argument('USERNAME', nargs=1)
 @click.argument('UPLOADS', nargs=-1)
 @click.pass_context
@@ -324,11 +324,11 @@ def chown(ctx, username, uploads):
 
 @uploads.command(help='Reset the processing state.')
 @click.argument('UPLOADS', nargs=-1)
-@click.option('--with-calcs', is_flag=True, help='Also reset all calculations.')
+@click.option('--with-entries', is_flag=True, help='Also reset all entries.')
 @click.option('--success', is_flag=True, help='Set the process status to success instead of pending')
 @click.option('--failure', is_flag=True, help='Set the process status to failure instead of pending.')
 @click.pass_context
-def reset(ctx, uploads, with_calcs, success, failure):
+def reset(ctx, uploads, with_entries, success, failure):
     from nomad import processing as proc
 
     _, uploads = _query_uploads(uploads, **ctx.obj.uploads_kwargs)
@@ -338,15 +338,15 @@ def reset(ctx, uploads, with_calcs, success, failure):
 
     i = 0
     for upload in uploads:
-        if with_calcs:
-            calc_update = proc.Calc.reset_pymongo_update()
+        if with_entries:
+            entry_update = proc.Calc.reset_pymongo_update()
             if success:
-                calc_update['process_status'] = proc.ProcessStatus.SUCCESS
+                entry_update['process_status'] = proc.ProcessStatus.SUCCESS
             if failure:
-                calc_update['process_status'] = proc.ProcessStatus.FAILURE
+                entry_update['process_status'] = proc.ProcessStatus.FAILURE
 
             proc.Calc._get_collection().update_many(
-                dict(upload_id=upload.upload_id), {'$set': calc_update})
+                dict(upload_id=upload.upload_id), {'$set': entry_update})
 
         upload.reset(force=True)
         if success:
@@ -358,7 +358,7 @@ def reset(ctx, uploads, with_calcs, success, failure):
         print('resetted %d of %d uploads' % (i, uploads_count))
 
 
-@uploads.command(help='(Re-)index all calcs of the given uploads.')
+@uploads.command(help='(Re-)index all entries of the given uploads.')
 @click.argument('UPLOADS', nargs=-1)
 @click.option('--parallel', default=1, type=int, help='Use the given amount of parallel processes. Default is 1.')
 @click.option('--transformer', help='Qualified name to a Python function that should be applied to each EntryMetadata.')
@@ -384,7 +384,7 @@ def index(ctx, uploads, parallel, transformer, skip_materials, print_progress):
             except Exception as e:
                 import traceback
                 traceback.print_exc()
-                print(f'   ERROR failed to transform calc (stop transforming for upload): {str(e)}')
+                print(f'   ERROR failed to transform entry (stop transforming for upload): {str(e)}')
                 break
 
     def index_upload(upload, logger):
@@ -429,7 +429,7 @@ def delete_upload(upload, skip_es: bool = False, skip_files: bool = False, skip_
 @uploads.command(help='Delete selected upload')
 @click.argument('UPLOADS', nargs=-1)
 @click.option('--skip-es', help='Keep the elastic index version of the data.', is_flag=True)
-@click.option('--skip-mongo', help='Keep uploads and calcs in mongo.', is_flag=True)
+@click.option('--skip-mongo', help='Keep uploads and entries in mongo.', is_flag=True)
 @click.option('--skip-files', help='Keep all related files.', is_flag=True)
 @click.pass_context
 def rm(ctx, uploads, skip_es, skip_mongo, skip_files):
@@ -476,11 +476,11 @@ def re_pack(ctx, uploads):
 
 @uploads.command(help='Attempt to abort the processing of uploads.')
 @click.argument('UPLOADS', nargs=-1)
-@click.option('--calcs', is_flag=True, help='Only stop calculation processing.')
+@click.option('--entries', is_flag=True, help='Only stop entries processing.')
 @click.option('--kill', is_flag=True, help='Use the kill signal and force task failure.')
 @click.option('--no-celery', is_flag=True, help='Do not attempt to stop the actual celery tasks')
 @click.pass_context
-def stop(ctx, uploads, calcs: bool, kill: bool, no_celery: bool):
+def stop(ctx, uploads, entries: bool, kill: bool, no_celery: bool):
     import mongoengine
 
     from nomad import utils, processing as proc
@@ -519,7 +519,7 @@ def stop(ctx, uploads, calcs: bool, kill: bool, no_celery: bool):
 
     running_query = query & mongoengine.Q(process_status__in=proc.ProcessStatus.STATUSES_PROCESSING)
     stop_all(proc.Calc.objects(running_query))
-    if not calcs:
+    if not entries:
         stop_all(proc.Upload.objects(running_query))
 
 
@@ -547,5 +547,5 @@ def entry_index(ctx, uploads):
             pagination=Pagination(page_size=0),
             user_id=config.services.admin_user_id)
 
-        if search_results.pagination.total != upload.total_calcs:
+        if search_results.pagination.total != upload.total_entries_count:
             print(upload.upload_id)
diff --git a/nomad/cli/client/__init__.py b/nomad/cli/client/__init__.py
index 0a19429d8f..7fce4c6e98 100644
--- a/nomad/cli/client/__init__.py
+++ b/nomad/cli/client/__init__.py
@@ -122,7 +122,7 @@ def upload(ctx, path, upload_name: str, local_path: bool, publish: bool):
 
 @client.command(help='Run processing locally.')
 @click.argument('ENTRY_ID', nargs=1, required=True, type=str)
-@click.option('--override', is_flag=True, help='Override existing local calculation data.')
+@click.option('--override', is_flag=True, help='Override existing local entry data.')
 @click.option('--show-archive', is_flag=True, help='Print the archive data.')
 @click.option('--show-metadata', is_flag=True, help='Print the extracted repo metadata.')
 @click.option('--skip-normalizers', is_flag=True, help='Do not normalize.')
diff --git a/nomad/cli/dev.py b/nomad/cli/dev.py
index 4dd7f789fb..d0be81037c 100644
--- a/nomad/cli/dev.py
+++ b/nomad/cli/dev.py
@@ -293,7 +293,7 @@ def example_data(username: str):
 
     data = ExampleData(main_author=main_author)
 
-    # one upload with two calc published with embargo, one shared
+    # one upload with two entries published with embargo, one shared
     upload_id = utils.create_uuid()
     data.create_upload(upload_id=upload_id, published=True, embargo_length=0)
     data.create_entry(
diff --git a/nomad/client/processing.py b/nomad/client/processing.py
index 7ea7948a44..522ba939c0 100644
--- a/nomad/client/processing.py
+++ b/nomad/client/processing.py
@@ -32,7 +32,7 @@ def parse(
         backend_factory: typing.Callable = None,
         strict: bool = True, logger=None):
     '''
-    Run the given parser on the downloaded calculation. If no parser is given,
+    Run the given parser on the downloaded entry. If no parser is given,
     do parser matching and use the respective parser.
     '''
     from nomad import parsing
@@ -101,7 +101,7 @@ def normalize(
 
 def normalize_all(entry_archive, logger=None):
     '''
-    Parse the downloaded calculation and run the whole normalizer chain.
+    Parse the downloaded entry and run the whole normalizer chain.
     '''
     from nomad import normalizing
 
@@ -122,7 +122,7 @@ class LocalEntryProcessing:
 
     Arguments:
         entry_id: The entry_id of the entry to locally process.
-        override: Set to true to override any existing local calculation data.
+        override: Set to true to override any existing local entry data.
         auth: Optional Auth object to download private data.
     '''
     def __init__(self, entry_id: str, override: bool = False, auth: Auth = None) -> None:
@@ -149,7 +149,7 @@ class LocalEntryProcessing:
                 for chunk in response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE):
                     f.write(chunk)
         else:
-            print('Calc already downloaded.')
+            print('Entry already downloaded.')
 
         self.upload_files = files.StagingUploadFiles(upload_id=f'tmp_{self.entry_id}', create=True)
 
@@ -166,7 +166,7 @@ class LocalEntryProcessing:
 
     def __enter__(self):
         # open/extract upload file
-        print('Extracting calc data.')
+        print('Extracting entry data.')
         if self.upload_files.is_empty():  # Only add the files once
             self.upload_files.add_rawfiles(self.local_path)
 
@@ -191,7 +191,7 @@ class LocalEntryProcessing:
 
     def parse(self, parser_name: str = None, **kwargs):
         '''
-        Run the given parser on the downloaded calculation. If no parser is given,
+        Run the given parser on the downloaded entry. If no parser is given,
         do parser matching and use the respective parser.
         '''
         return parse(
@@ -200,7 +200,7 @@ class LocalEntryProcessing:
 
     def normalize(self, normalizer: typing.Union[str, typing.Callable], entry_archive=None):
         '''
-        Parse the downloaded calculation and run the given normalizer.
+        Parse the downloaded entry and run the given normalizer.
         '''
         if entry_archive is None:
             entry_archive = self.parse()
@@ -210,7 +210,7 @@ class LocalEntryProcessing:
 
     def normalize_all(self, entry_archive=None):
         '''
-        Parse the downloaded calculation and run the whole normalizer chain.
+        Parse the downloaded entry and run the whole normalizer chain.
         '''
         return normalize_all(
             entry_archive=entry_archive, logger=utils.get_logger(__name__))
diff --git a/nomad/datamodel/__init__.py b/nomad/datamodel/__init__.py
index df8384a9ce..f0e3ffd9d0 100644
--- a/nomad/datamodel/__init__.py
+++ b/nomad/datamodel/__init__.py
@@ -32,7 +32,7 @@ be easily rendered on the GUI. The information is readily available through the
 See also the `datamodel section in the introduction <introduction.html#data-model>`_.
 
 This module contains classes that allow to represent the core
-nomad data entities (entries/calculations, users, datasets) on a high level of abstraction
+nomad data entities (entries, users, datasets) on a high level of abstraction
 independent from their representation in the different modules
 :py:mod:`nomad.processing`, :py:mod:`nomad.parsing`, :py:mod:`nomad.search`, :py:mod:`nomad.app`.
 
@@ -40,7 +40,7 @@ Datamodel entities
 ------------------
 
 The entities in the datamodel are defined as NOMAD Metainfo sections. They are treated
-similarily to all Archive data. The entry/calculation datamodel data is created during
+similarily to all Archive data. The entry datamodel data is created during
 processing. It is not about representing every detail, but those parts that are directly involved in
 api, processing, mirroring, or other 'infrastructure' operations.
 
@@ -63,7 +63,7 @@ Domains
 ------------------
 
 The datamodel supports different *domains*. This means that most domain metadata of an
-entry/calculation is stored in domain-specific sub sections of the :class:`EntryMetadata`
+entry is stored in domain-specific sub sections of the :class:`EntryMetadata`
 section. We currently have the following domain specific metadata classes/sections:
 
 .. autoclass:: nomad.datamodel.OptimadeEntry
diff --git a/nomad/datamodel/datamodel.py b/nomad/datamodel/datamodel.py
index 507ebd2e03..9e58ec3120 100644
--- a/nomad/datamodel/datamodel.py
+++ b/nomad/datamodel/datamodel.py
@@ -339,11 +339,11 @@ def derive_authors(entry: 'EntryMetadata') -> List[User]:
 class EntryMetadata(metainfo.MSection):
     '''
     Attributes:
-        upload_id: The ``upload_id`` of the calculations upload (random UUID).
+        upload_id: The id of the upload (random UUID).
         upload_name: The user provided upload name.
         upload_create_time: The time that the upload was created
-        calc_id: The unique mainfile based calculation id.
-        calc_hash: The raw file content based checksum/hash of this calculation.
+        calc_id: The unique mainfile based entry id.
+        calc_hash: The raw file content based checksum/hash of this entry.
         entry_create_time: The time that the entry was created
         last_edit_time: The date and time the user metadata was last edited.
         parser_name: The NOMAD parser used for the last processing.
@@ -361,8 +361,8 @@ class EntryMetadata(metainfo.MSection):
             only visible to the main author, the upload coauthors, and the upload reviewers.
         license: A short license description (e.g. CC BY 4.0), that refers to the
             license of this entry.
-        processed: Boolean indicating if this calc was successfully processed and archive
-            data and calc metadata is available.
+        processed: Boolean indicating if this entry was successfully processed and archive
+            data and entry metadata is available.
         last_processing_time: The date and time of the last processing.
         processing_errors: Errors that occured during processing.
         nomad_version: A string that describes the version of the nomad software that was
diff --git a/nomad/files.py b/nomad/files.py
index 41fd0eddf7..beab90fe2e 100644
--- a/nomad/files.py
+++ b/nomad/files.py
@@ -28,19 +28,19 @@ almost readonly (beside metadata) storage.
 .. code-block:: sh
 
     fs/staging/<upload>/raw/**
-                       /archive/<calc>.msg
+                       /archive/<entry_id>.msg
     fs/public/<upload>/raw-{access}.plain.zip
                       /archive-{access}.msg.msg
 
 Where `access` is either "public" (non-embargoed) or "restricted" (embargoed).
 
 There is an implicit relationship between files, based on them being in the same
-directory. Each directory with at least one *mainfile* is a *calculation directory*
-and all the files are *aux* files to that *mainfile*. This is independent of the
-respective files actually contributing data or not. A *calculation directory* might
-contain multiple *mainfile*. E.g., user simulated multiple states of the same system, have
-one calculation based on the other, etc. In this case the other *mainfile* is an *aux*
-file to the original *mainfile* and vice versa.
+directory. Each directory with at least one *mainfile* is an *entry directory*
+and all the files are *aux* files to that mainfile. This is independent of whether the
+respective files actually contributes data or not. An entry directory might
+contain multiple mainfiles. E.g., user simulated multiple states of the same system, have
+one entry based on the other, etc. In this case the other mainfile is an *aux file* to the
+original mainfile, and vice versa.
 '''
 
 from abc import ABCMeta
@@ -618,10 +618,10 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
             mime_type = 'application/octet-stream'
         return mime_type
 
-    def read_archive(self, calc_id: str) -> ArchiveReader:
+    def read_archive(self, entry_id: str) -> ArchiveReader:
         '''
         Returns an :class:`nomad.archive.ArchiveReader` that contains the
-        given calc_id.
+        given entry_id.
         '''
         raise NotImplementedError()
 
@@ -749,11 +749,11 @@ class StagingUploadFiles(UploadFiles):
         assert is_safe_relative_path(file_path)
         return self._raw_dir.join_file(file_path)
 
-    def write_archive(self, calc_id: str, data: Any) -> int:
+    def write_archive(self, entry_id: str, data: Any) -> int:
         ''' Writes the data as archive file and returns the archive file size. '''
-        archive_file_object = self.archive_file_object(calc_id)
+        archive_file_object = self.archive_file_object(entry_id)
         try:
-            write_archive(archive_file_object.os_path, 1, data=[(calc_id, data)])
+            write_archive(archive_file_object.os_path, 1, data=[(entry_id, data)])
         except Exception as e:
             # in case of failure, remove the possible corrupted archive file
             if archive_file_object.exists():
@@ -761,17 +761,17 @@ class StagingUploadFiles(UploadFiles):
 
             raise e
 
-        return self.archive_file_object(calc_id).size
+        return self.archive_file_object(entry_id).size
 
-    def read_archive(self, calc_id: str) -> ArchiveReader:
+    def read_archive(self, entry_id: str) -> ArchiveReader:
         try:
-            return read_archive(self.archive_file_object(calc_id).os_path)
+            return read_archive(self.archive_file_object(entry_id).os_path)
 
         except FileNotFoundError:
-            raise KeyError(calc_id)
+            raise KeyError(entry_id)
 
-    def archive_file_object(self, calc_id: str) -> PathObject:
-        return self._archive_dir.join_file(f'{calc_id}.msg')
+    def archive_file_object(self, entry_id: str) -> PathObject:
+        return self._archive_dir.join_file(f'{entry_id}.msg')
 
     def add_rawfiles(
             self, path: str, target_dir: str = '', cleanup_source_file_and_dir: bool = False) -> None:
@@ -986,13 +986,13 @@ class StagingUploadFiles(UploadFiles):
         number_of_entries = len(entries)
 
         def create_iterator():
-            for calc in entries:
-                archive_file = self.archive_file_object(calc.calc_id)
+            for entry in entries:
+                archive_file = self.archive_file_object(entry.entry_id)
                 if archive_file.exists():
-                    data = read_archive(archive_file.os_path)[calc.calc_id].to_dict()
-                    yield (calc.calc_id, data)
+                    data = read_archive(archive_file.os_path)[entry.entry_id].to_dict()
+                    yield (entry.entry_id, data)
                 else:
-                    yield (calc.calc_id, {})
+                    yield (entry.entry_id, {})
 
         try:
             file_object = PublicUploadFiles._create_msg_file_object(target_dir, access)
@@ -1024,11 +1024,11 @@ class StagingUploadFiles(UploadFiles):
             self.logger.error('exception during packing raw files', exc_info=e)
             raise
 
-    def calc_files(self, mainfile: str, with_mainfile: bool = True, with_cutoff: bool = True) -> Iterable[str]:
+    def entry_files(self, mainfile: str, with_mainfile: bool = True, with_cutoff: bool = True) -> Iterable[str]:
         '''
         Returns all the auxfiles and mainfile for a given mainfile. This implements
-        nomad's logic about what is part of a calculation and what not. The mainfile
-        is first entry, the rest is sorted.
+        nomad's logic about what is part of an entry and what not. The mainfile
+        is the first element, the rest is sorted.
         Arguments:
             mainfile: The mainfile relative to upload
             with_mainfile: Do include the mainfile, default is True
@@ -1038,19 +1038,19 @@ class StagingUploadFiles(UploadFiles):
             raise KeyError(mainfile)
 
         mainfile_basename = os.path.basename(mainfile)
-        calc_dir = os.path.dirname(mainfile_object.os_path)
-        calc_relative_dir = calc_dir[len(self._raw_dir.os_path) + 1:]
+        entry_dir = os.path.dirname(mainfile_object.os_path)
+        entry_relative_dir = entry_dir[len(self._raw_dir.os_path) + 1:]
 
         file_count = 0
         aux_files: List[str] = []
-        for filename in os.listdir(calc_dir):
-            if filename != mainfile_basename and os.path.isfile(os.path.join(calc_dir, filename)):
-                aux_files.append(os.path.join(calc_relative_dir, filename))
+        for filename in os.listdir(entry_dir):
+            if filename != mainfile_basename and os.path.isfile(os.path.join(entry_dir, filename)):
+                aux_files.append(os.path.join(entry_relative_dir, filename))
                 file_count += 1
 
             if with_cutoff and file_count > config.auxfile_cutoff:
-                # If there are two many of them, its probably just a directory with lots of
-                # calculations. In this case it does not make any sense to provide thousands of
+                # If there are too many of them, its probably just a directory with lots of
+                # mainfiles/entries. In this case it does not make any sense to provide thousands of
                 # aux files.
                 break
 
@@ -1063,16 +1063,16 @@ class StagingUploadFiles(UploadFiles):
 
     def calc_hash(self, mainfile: str) -> str:
         '''
-        Calculates a hash for the given calc based on file contents and aux file contents.
+        Calculates a hash for the given entry based on file contents and aux file contents.
         Arguments:
-            mainfile: The mainfile path relative to the upload that identifies the calc in the folder structure.
+            mainfile: The mainfile path relative to the upload that identifies the entry in the folder structure.
         Returns:
             The calculated hash
         Raises:
             KeyError: If the mainfile does not exist.
         '''
         hash = hashlib.sha512()
-        for filepath in self.calc_files(mainfile):
+        for filepath in self.entry_files(mainfile):
             with open(self._raw_dir.join_file(filepath).os_path, 'rb') as f:
                 for data in iter(lambda: f.read(65536), b''):
                     hash.update(data)
@@ -1234,9 +1234,9 @@ class PublicUploadFiles(UploadFiles):
 
         if include_archive:
             with self._open_msg_file() as archive:
-                for calc_id, data in archive.items():
-                    calc_id = calc_id.strip()
-                    staging_upload_files.write_archive(calc_id, data.to_dict())
+                for entry_id, data in archive.items():
+                    entry_id = entry_id.strip()
+                    staging_upload_files.write_archive(entry_id, data.to_dict())
 
         return staging_upload_files
 
@@ -1373,15 +1373,15 @@ class PublicUploadFiles(UploadFiles):
 
         raise KeyError(file_path)
 
-    def read_archive(self, calc_id: str) -> Any:
+    def read_archive(self, entry_id: str) -> Any:
         try:
             archive = self._open_msg_file()
-            if calc_id in archive:
+            if entry_id in archive:
                 return archive
         except FileNotFoundError:
             pass
 
-        raise KeyError(calc_id)
+        raise KeyError(entry_id)
 
     def re_pack(self, with_embargo: bool) -> None:
         '''
diff --git a/nomad/infrastructure.py b/nomad/infrastructure.py
index a9803566bd..b63a1393bd 100644
--- a/nomad/infrastructure.py
+++ b/nomad/infrastructure.py
@@ -376,7 +376,7 @@ keycloak = Keycloak()
 
 def reset(remove: bool):
     '''
-    Resets the databases mongo, elastic/calcs, and all files. Be careful.
+    Resets the databases mongo, elastic/entries, and all files. Be careful.
     In contrast to :func:`remove`, it will only remove the contents of dbs and indicies.
     This function just attempts to remove everything, there is no exception handling
     or any warranty it will succeed.
diff --git a/nomad/normalizing/__init__.py b/nomad/normalizing/__init__.py
index a7706f92fa..bc5db161c9 100644
--- a/nomad/normalizing/__init__.py
+++ b/nomad/normalizing/__init__.py
@@ -17,7 +17,7 @@
 #
 
 '''
-After parsing calculations have to be normalized with a set of *normalizers*.
+After parsing entries have to be normalized with a set of *normalizers*.
 In NOMAD-coe those were programmed in python (we'll reuse) and scala (we'll rewrite).
 
 Currently the normalizers are:
diff --git a/nomad/parsing/__init__.py b/nomad/parsing/__init__.py
index 7051718186..c9a5a1a87d 100644
--- a/nomad/parsing/__init__.py
+++ b/nomad/parsing/__init__.py
@@ -57,9 +57,9 @@ The parser definitions are available via the following two variables.
 .. autodata:: nomad.parsing.parsers.parsers
 .. autodata:: nomad.parsing.parsers.parser_dict
 
-Parsers are reused for multiple calculations.
+Parsers are reused for multiple entries.
 
-Parsers and calculation files are matched via regular expressions.
+Parsers and mainfiles are matched via regular expressions.
 
 .. autofunction:: nomad.parsing.parsers.match_parser
 
diff --git a/nomad/processing/__init__.py b/nomad/processing/__init__.py
index bd3a4d1c44..52a53ffde5 100644
--- a/nomad/processing/__init__.py
+++ b/nomad/processing/__init__.py
@@ -26,7 +26,7 @@ Processing is build on top of *celery* (http://www.celeryproject.org/) and
 Celery provides a task-based programming model for distributed computing. It uses
 a broker, e.g. a distributed task queue like *RabbitMQ* to distribute tasks. We
 use mongodb to store the current state of processing in :class:`Upload` and
-:class:`Calculation` documents. This combination allows us to easily distribute
+:class:`Calc` documents. This combination allows us to easily distribute
 processing work while having the processing state, i.e. (intermediate) results,
 always available.
 
diff --git a/nomad/processing/data.py b/nomad/processing/data.py
index 9fd31e1224..f5d0952c25 100644
--- a/nomad/processing/data.py
+++ b/nomad/processing/data.py
@@ -19,7 +19,7 @@
 '''
 This module comprises a set of persistent document classes that hold all user related
 data. These are information about users, their uploads and datasets, the associated
-calculations, and files
+entries, and files
 
 
 .. autoclass:: Calc
@@ -544,7 +544,7 @@ class MetadataEditRequestHandler:
 
     def _get_entry_key(self, entry: 'Calc', entries_key: str) -> str:
         if entries_key == 'calc_id' or entries_key == 'entry_id':
-            return entry.calc_id
+            return entry.entry_id
         elif entries_key == 'mainfile':
             return entry.mainfile
         assert False, f'Invalid entries_key: {entries_key}'
@@ -651,23 +651,23 @@ class MetadataEditRequestHandler:
 
 class Calc(Proc):
     '''
-    Instances of this class represent calculations. This class manages the elastic
-    search index entry, files, and archive for the respective calculation.
+    Instances of this class represent entries. This class manages the elastic
+    search index entry, files, and archive for the respective entry.
 
-    It also contains the calculations processing and its state.
+    It also contains information about the entry's processing state.
 
     The attribute list, does not include the various metadata properties generated
     while parsing, including ``code_name``, ``code_version``, etc.
 
     Attributes:
         upload_id: the id of the upload to which this entry belongs
-        calc_id: the calc_id of this calc
+        calc_id: the id of this entry
         calc_hash: the hash of the entry files
         entry_create_time: the date and time of the creation of the entry
         last_processing_time: the date and time of the last processing
         last_edit_time: the date and time the user metadata was last edited
-        mainfile: the mainfile (including path in upload) that was used to create this calc
-        parser_name: the name of the parser used to process this calc
+        mainfile: the mainfile (including path in upload) that was used to create this entry
+        parser_name: the name of the parser used to process this entry
         pid: the legacy NOMAD pid of the entry
         external_id: a user provided external id. Usually the id for an entry in an
             external database where the data was imported from
@@ -719,7 +719,7 @@ class Calc(Proc):
         self._is_initial_processing: bool = False
         self._upload: Upload = None
         self._upload_files: StagingUploadFiles = None
-        self._calc_proc_logs: List[Any] = None
+        self._proc_logs: List[Any] = None
 
         self._entry_metadata: EntryMetadata = None
         self._perform_index = True
@@ -768,7 +768,7 @@ class Calc(Proc):
         entry_metadata.nomad_version = config.meta.version
         entry_metadata.nomad_commit = config.meta.commit
         entry_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
-        entry_metadata.files = self.upload_files.calc_files(self.mainfile)
+        entry_metadata.files = self.upload_files.entry_files(self.mainfile)
         entry_metadata.last_processing_time = datetime.utcnow()
         entry_metadata.processing_errors = []
 
@@ -827,20 +827,20 @@ class Calc(Proc):
                 and the archive files will be read from this object.
         '''
         assert upload.upload_id == self.upload_id, 'Mismatching upload_id encountered'
-        archive = upload.upload_files.read_archive(self.calc_id)
+        archive = upload.upload_files.read_archive(self.entry_id)
         try:
             # instead of loading the whole archive, it should be enough to load the
             # parts that are referenced by section_metadata/EntryMetadata
             # TODO somehow it should determine which root setions too load from the metainfo
             # or configuration
-            calc_archive = archive[self.calc_id]
-            entry_archive_dict = {section_metadata: calc_archive[section_metadata].to_dict()}
-            if section_workflow in calc_archive:
-                for workflow in calc_archive[section_workflow]:
+            entry_archive = archive[self.entry_id]
+            entry_archive_dict = {section_metadata: entry_archive[section_metadata].to_dict()}
+            if section_workflow in entry_archive:
+                for workflow in entry_archive[section_workflow]:
                     entry_archive_dict.setdefault(section_workflow, [])
                     entry_archive_dict[section_workflow].append(workflow.to_dict())
-            if section_results in calc_archive:
-                entry_archive_dict[section_results] = calc_archive[section_results].to_dict()
+            if section_results in entry_archive:
+                entry_archive_dict[section_results] = entry_archive[section_results].to_dict()
             entry_metadata = datamodel.EntryArchive.m_from_dict(entry_archive_dict)[section_metadata]
             self._apply_metadata_from_mongo(upload, entry_metadata)
             return entry_metadata
@@ -870,23 +870,23 @@ class Calc(Proc):
 
     def get_logger(self, **kwargs):
         '''
-        Returns a wrapped logger that additionally saves all entries to the calculation
+        Returns a wrapped logger that additionally saves all entries to the entry
         processing log in the archive.
         '''
         logger = super().get_logger()
         logger = logger.bind(
-            upload_id=self.upload_id, mainfile=self.mainfile, calc_id=self.calc_id,
+            upload_id=self.upload_id, mainfile=self.mainfile, calc_id=self.entry_id,
             parser=self.parser_name, **kwargs)
 
-        if self._calc_proc_logs is None:
-            self._calc_proc_logs = []
+        if self._proc_logs is None:
+            self._proc_logs = []
 
-        def save_to_calc_log(logger, method_name, event_dict):
+        def save_to_entry_log(logger, method_name, event_dict):
             try:
                 # sanitize the event_dict, because all kinds of values might have been added
                 dump_dict = {key: str(value) for key, value in event_dict.items()}
                 dump_dict.update(level=method_name.upper())
-                self._calc_proc_logs.append(dump_dict)
+                self._proc_logs.append(dump_dict)
 
                 if method_name == 'error':
                     error = event_dict.get('event', None)
@@ -899,14 +899,14 @@ class Calc(Proc):
 
             return event_dict
 
-        return wrap_logger(logger, processors=_log_processors + [save_to_calc_log])
+        return wrap_logger(logger, processors=_log_processors + [save_to_entry_log])
 
     @process(is_child=True)
-    def process_calc(self):
-        ''' Processes or reprocesses a calculation. '''
+    def process_entry(self):
+        ''' Processes or reprocesses an entry. '''
         logger = self.get_logger()
         if self.upload is None:
-            logger.error('calculation upload does not exist')
+            logger.error('upload does not exist')
 
         # Load the reprocess settings from the upload, and apply defaults
         settings = config.reprocess.customize(self.upload.reprocess_settings)
@@ -948,8 +948,8 @@ class Calc(Proc):
 
             if len(self._entry_metadata.files) >= config.auxfile_cutoff:
                 self.warning(
-                    'This calc has many aux files in its directory. '
-                    'Have you placed many calculations in the same directory?')
+                    'This entry has many aux files in its directory. '
+                    'Have you placed many mainfiles in the same directory?')
 
             self.parsing()
             self.normalizing()
@@ -958,8 +958,8 @@ class Calc(Proc):
             self.set_last_status_message('Preserving entry data')
             try:
                 upload_files = PublicUploadFiles(self.upload_id)
-                with upload_files.read_archive(self.calc_id) as archive:
-                    self.upload_files.write_archive(self.calc_id, archive[self.calc_id].to_dict())
+                with upload_files.read_archive(self.entry_id) as archive:
+                    self.upload_files.write_archive(self.entry_id, archive[self.entry_id].to_dict())
 
             except Exception as e:
                 logger.error('could not copy archive for non-reprocessed entry', exc_info=e)
@@ -1081,19 +1081,19 @@ class Calc(Proc):
         if self.upload.publish_directly:
             self._entry_metadata.published |= True
 
-        # persist the calc metadata
-        with utils.timer(logger, 'calc metadata saved'):
+        # persist the entry metadata
+        with utils.timer(logger, 'entry metadata saved'):
             self._apply_metadata_to_mongo_entry(self._entry_metadata)
 
         # index in search
         if self._perform_index:
-            with utils.timer(logger, 'calc metadata indexed'):
+            with utils.timer(logger, 'entry metadata indexed'):
                 assert self._parser_results.metadata == self._entry_metadata
                 search.index(self._parser_results)
 
         # persist the archive
         with utils.timer(
-                logger, 'calc archived',
+                logger, 'entry archived',
                 input_size=self.mainfile_file.size) as log_data:
 
             archive_size = self.write_archive(self._parser_results)
@@ -1115,8 +1115,8 @@ class Calc(Proc):
                     if log.get('level') != 'DEBUG']
             return logs
 
-        if self._calc_proc_logs is None:
-            self._calc_proc_logs = []
+        if self._proc_logs is None:
+            self._proc_logs = []
 
         if archive is not None:
             archive = archive.m_copy()
@@ -1126,21 +1126,21 @@ class Calc(Proc):
         if archive.metadata is None:
             archive.m_add_sub_section(datamodel.EntryArchive.metadata, self._entry_metadata)
 
-        archive.processing_logs = filter_processing_logs(self._calc_proc_logs)
+        archive.processing_logs = filter_processing_logs(self._proc_logs)
 
         # save the archive msg-pack
         try:
-            return self.upload_files.write_archive(self.calc_id, archive.m_to_dict())
+            return self.upload_files.write_archive(self.entry_id, archive.m_to_dict())
         except Exception as e:
             # most likely failed due to domain data, try to write metadata and processing logs
             archive = datamodel.EntryArchive(m_context=self.upload.archive_context)
             archive.m_add_sub_section(datamodel.EntryArchive.metadata, self._entry_metadata)
-            archive.processing_logs = filter_processing_logs(self._calc_proc_logs)
-            self.upload_files.write_archive(self.calc_id, archive.m_to_dict())
+            archive.processing_logs = filter_processing_logs(self._proc_logs)
+            self.upload_files.write_archive(self.entry_id, archive.m_to_dict())
             raise
 
     def __str__(self):
-        return 'calc %s calc_id=%s upload_id%s' % (super().__str__(), self.calc_id, self.upload_id)
+        return 'entry %s entry_id=%s upload_id%s' % (super().__str__(), self.entry_id, self.upload_id)
 
 
 class Upload(Proc):
@@ -1287,7 +1287,7 @@ class Upload(Proc):
             main_author=main_author_user)
 
     def delete(self):
-        ''' Deletes this upload process state entry and its calcs. '''
+        ''' Deletes this upload and its entries. '''
         Calc.objects(upload_id=self.upload_id).delete()
         super().delete()
 
@@ -1303,8 +1303,8 @@ class Upload(Proc):
                 search.delete_upload(self.upload_id, refresh=True)
 
             with utils.timer(logger, 'upload partial archives deleted'):
-                calc_ids = [calc.calc_id for calc in Calc.objects(upload_id=self.upload_id)]
-                delete_partial_archives_from_mongo(calc_ids)
+                entry_ids = [entry.entry_id for entry in Calc.objects(upload_id=self.upload_id)]
+                delete_partial_archives_from_mongo(entry_ids)
 
             with utils.timer(logger, 'upload files deleted'):
                 self.upload_files.delete()
@@ -1327,7 +1327,7 @@ class Upload(Proc):
         Moves the upload out of staging to the public area. It will
         pack the staging upload files in to public upload files.
         '''
-        assert self.processed_calcs > 0
+        assert self.processed_entries_count > 0
 
         logger = self.get_logger(upload_size=self.upload_files.size)
         logger.info('started to publish')
@@ -1607,7 +1607,7 @@ class Upload(Proc):
 
         try:
             metadata_handler = None
-            if not self.published and not self.total_calcs:
+            if not self.published and not self.total_entries_count:
                 # In staging and no entries yet -> import upload level metadata from files if provided
                 metadata_handler = MetadataEditRequestHandler(
                     logger, self.main_author_user, self.staging_upload_files, self.upload_id)
@@ -1623,29 +1623,29 @@ class Upload(Proc):
                 with utils.timer(logger, 'existing entries scanned'):
                     for entry in Calc.objects(upload_id=self.upload_id):
                         if entry.process_running:
-                            processing_entries.append(entry.calc_id)
+                            processing_entries.append(entry.entry_id)
                         if self._passes_path_filter(entry.mainfile, path_filter):
-                            old_entries.add(entry.calc_id)
+                            old_entries.add(entry.entry_id)
 
                 with utils.timer(logger, 'matching completed'):
                     for filename, parser in self.match_mainfiles(path_filter):
-                        calc_id = generate_entry_id(self.upload_id, filename)
+                        entry_id = generate_entry_id(self.upload_id, filename)
 
                         try:
-                            entry = Calc.get(calc_id)
+                            entry = Calc.get(entry_id)
                             # Matching entry already exists.
                             # Ensure that we update the parser if in staging
                             if not self.published and parser.name != entry.parser_name:
                                 entry.parser_name = parser.name
                                 entry.save()
 
-                            old_entries.remove(calc_id)
+                            old_entries.remove(entry_id)
                         except KeyError:
                             # No existing entry found
                             if not self.published or reprocess_settings.add_matched_entries_to_published:
                                 # Create new entry
                                 entry = Calc.create(
-                                    calc_id=calc_id,
+                                    calc_id=entry_id,
                                     mainfile=filename,
                                     parser_name=parser.name,
                                     worker_hostname=self.worker_hostname,
@@ -1665,9 +1665,9 @@ class Upload(Proc):
                         if not self.published or reprocess_settings.delete_unmatched_published_entries:
                             entries_to_delete: List[str] = list(old_entries)
                             delete_partial_archives_from_mongo(entries_to_delete)
-                            for calc_id in entries_to_delete:
-                                search.delete_entry(entry_id=calc_id, update_materials=True)
-                                entry = Calc.get(calc_id)
+                            for entry_id in entries_to_delete:
+                                search.delete_entry(entry_id=entry_id, update_materials=True)
+                                entry = Calc.get(entry_id)
                             entry.delete()
 
                 # No entries *should* be processing, but if there are, we reset them to
@@ -1698,7 +1698,7 @@ class Upload(Proc):
             logger = self.get_logger()
             next_level: int = None
             next_entries: List[Calc] = None
-            with utils.timer(logger, 'calcs processing called'):
+            with utils.timer(logger, 'entries processing called'):
                 # Determine what the next level is and which entries belongs to this level
                 for entry in Calc.objects(upload_id=self.upload_id):
                     parser = parser_dict.get(entry.parser_name)
@@ -1714,12 +1714,12 @@ class Upload(Proc):
                                 next_entries.append(entry)
                 if next_entries:
                     self.parser_level = next_level
-                    # Trigger calcs
+                    # Trigger processing
                     logger.info('Triggering next level', next_level=next_level, n_entries=len(next_entries))
                     self.set_last_status_message(f'Parsing level {next_level}')
                     with utils.timer(logger, 'processes triggered'):
                         for entry in next_entries:
-                            entry.process_calc()
+                            entry.process_entry()
                     return True
             return False
         except Exception as e:
@@ -1766,14 +1766,14 @@ class Upload(Proc):
             self.last_update = datetime.utcnow()
             self.save()
 
-        if self.publish_directly and not self.published and self.processed_calcs > 0:
+        if self.publish_directly and not self.published and self.processed_entries_count > 0:
             logger = self.get_logger(upload_size=self.upload_files.size)
             logger.info('started to publish upload directly')
 
             with utils.lnr(logger, 'publish failed'):
-                with self.entries_metadata() as calcs:
+                with self.entries_metadata() as entries:
                     with utils.timer(logger, 'upload staging files packed'):
-                        self.staging_upload_files.pack(calcs, with_embargo=self.with_embargo)
+                        self.staging_upload_files.pack(entries, with_embargo=self.with_embargo)
 
                 with utils.timer(logger, 'upload staging files deleted'):
                     self.staging_upload_files.delete()
@@ -1820,39 +1820,30 @@ class Upload(Proc):
                 with utils.timer(self.get_logger(), 'upload staging files deleted'):
                     staging_upload_files.delete()
 
-    def get_calc(self, calc_id) -> Calc:
-        ''' Returns the upload calc with the given id or ``None``. '''
-        return Calc.objects(upload_id=self.upload_id, calc_id=calc_id).first()
+    def get_entry(self, entry_id) -> Calc:
+        ''' Returns the upload entry with the given id or ``None``. '''
+        return Calc.objects(upload_id=self.upload_id, calc_id=entry_id).first()
 
     @property
-    def processed_calcs(self) -> int:
-        '''
-        The number of successfully or not successfully processed calculations. I.e.
-        calculations that have finished processing.
-        '''
+    def processed_entries_count(self) -> int:
+        ''' The number of entries that have finished processing (process_status == SUCCESS | FAILURE). '''
         return Calc.objects(
             upload_id=self.upload_id, process_status__in=[
                 ProcessStatus.SUCCESS, ProcessStatus.FAILURE]).count()
 
     @property
-    def total_calcs(self) -> int:
-        ''' The number of all calculations. '''
+    def total_entries_count(self) -> int:
+        ''' The total number of entries for this upload (regardless of process status). '''
         return Calc.objects(upload_id=self.upload_id).count()
 
     @property
-    def failed_calcs(self) -> int:
-        ''' The number of calculations with failed processing. '''
+    def failed_entries_count(self) -> int:
+        ''' The number of entries with failed processing. '''
         return Calc.objects(upload_id=self.upload_id, process_status=ProcessStatus.FAILURE).count()
 
-    @property
-    def processing_calcs(self) -> int:
-        ''' The number of calculations currently processing. '''
-        return Calc.objects(
-            upload_id=self.upload_id, process_status__in=ProcessStatus.STATUSES_PROCESSING).count()
-
-    def all_calcs(self, start, end, order_by=None) -> Sequence[Calc]:
+    def entries_sublist(self, start, end, order_by=None) -> Sequence[Calc]:
         '''
-        Returns all calculations, paginated and ordered.
+        Returns all entries, paginated and ordered.
 
         Arguments:
             start: the start index of the requested page
@@ -1868,15 +1859,8 @@ class Upload(Proc):
         return query.order_by(*order_by)
 
     @property
-    def outdated_calcs(self) -> Sequence[Calc]:
-        ''' All successfully processed and outdated calculations. '''
-        return Calc.objects(
-            upload_id=self.upload_id, process_status=ProcessStatus.SUCCESS,
-            nomad_version__ne=config.meta.version)
-
-    @property
-    def calcs(self) -> Sequence[Calc]:
-        ''' All successfully processed calculations. '''
+    def successful_entries(self) -> Sequence[Calc]:
+        ''' All successfully processed entries. '''
         return Calc.objects(upload_id=self.upload_id, process_status=ProcessStatus.SUCCESS)
 
     @contextmanager
@@ -1886,10 +1870,10 @@ class Upload(Proc):
         processing upload's entries into list of :class:`EntryMetadata` objects.
         '''
         try:
-            # read all calc objects first to avoid missing curser errors
+            # read all entry objects first to avoid missing cursor errors
             yield [
-                calc.full_entry_metadata(self)
-                for calc in list(Calc.objects(upload_id=self.upload_id))]
+                entry.full_entry_metadata(self)
+                for entry in list(Calc.objects(upload_id=self.upload_id))]
 
         finally:
             self.upload_files.close()  # Because full_entry_metadata reads the archive files.
@@ -1899,7 +1883,7 @@ class Upload(Proc):
         Returns a list of :class:`EntryMetadata` containing the mongo metadata
         only, for all entries of this upload.
         '''
-        return [calc.mongo_metadata(self) for calc in Calc.objects(upload_id=self.upload_id)]
+        return [entry.mongo_metadata(self) for entry in Calc.objects(upload_id=self.upload_id)]
 
     @process()
     def edit_upload_metadata(self, edit_request_json: Dict[str, Any], user_id: str):
@@ -1938,7 +1922,7 @@ class Upload(Proc):
             entry_updates = handler.get_entry_mongo_metadata(self, entry)
             entry_updates['last_edit_time'] = last_edit_time
             # Add mongo entry update operation to bulk write list
-            entry_mongo_writes.append(UpdateOne({'_id': entry.calc_id}, {'$set': entry_updates}))
+            entry_mongo_writes.append(UpdateOne({'_id': entry.entry_id}, {'$set': entry_updates}))
             # Create updates for ES
             entry_metadata = entry.mongo_metadata(self)
             if upload_updates:
@@ -1960,7 +1944,7 @@ class Upload(Proc):
                 assert not failed_es, f'Failed to update ES, there were {failed_es} fails'
 
     def entry_ids(self) -> List[str]:
-        return [calc.calc_id for calc in Calc.objects(upload_id=self.upload_id)]
+        return [entry.entry_id for entry in Calc.objects(upload_id=self.upload_id)]
 
     def export_bundle(
             self, export_as_stream: bool, export_path: str,
@@ -2016,7 +2000,7 @@ class Upload(Proc):
                 include_archive_files=include_archive_files,
                 include_datasets=include_datasets),
             upload=self.to_mongo().to_dict(),
-            entries=[entry.to_mongo().to_dict() for entry in self.calcs])
+            entries=[entry.to_mongo().to_dict() for entry in self.successful_entries])
         # Handle datasets
         dataset_ids: Set[str] = set()
         for entry_dict in bundle_info['entries']:
diff --git a/nomad/search.py b/nomad/search.py
index de69572e58..0e0f7048c7 100644
--- a/nomad/search.py
+++ b/nomad/search.py
@@ -222,7 +222,7 @@ def update_metadata(
 
             yield dict(
                 doc=entry_doc,
-                _id=entry_metadata.calc_id,
+                _id=entry_metadata.entry_id,
                 _type=entry_index.doc_type.name,
                 _index=entry_index.index_name,
                 _op_type='update')
@@ -255,7 +255,7 @@ def delete_entry(entry_id: str, index: str = None, refresh: bool = False, **kwar
     '''
     Deletes the given entry.
     '''
-    delete_by_query(query=dict(calc_id=entry_id), **kwargs)
+    delete_by_query(query=dict(entry_id=entry_id), **kwargs)
 
     if refresh:
         _refresh()
@@ -1232,7 +1232,7 @@ def search(
 def search_iterator(
         owner: str = 'public',
         query: Union[Query, EsQuery] = None,
-        order_by: str = 'calc_id',
+        order_by: str = 'entry_id',
         required: MetadataRequired = None,
         aggregations: Dict[str, Aggregation] = {},
         user_id: str = None,
diff --git a/nomad/utils/__init__.py b/nomad/utils/__init__.py
index 04cf4461e0..e28086e029 100644
--- a/nomad/utils/__init__.py
+++ b/nomad/utils/__init__.py
@@ -23,8 +23,8 @@
 
 Logging in nomad is structured. Structured logging means that log entries contain
 dictionaries with quantities related to respective events. E.g. having the code,
-parser, parser version, calc_id, mainfile, etc. for all events that happen during
-calculation processing. This means the :func:`get_logger` and all logger functions
+parser, parser version, entry_id, mainfile, etc. for all events that happen during
+entry processing. This means the :func:`get_logger` and all logger functions
 take keyword arguments for structured data. Otherwise :func:`get_logger` can
 be used similar to the standard *logging.getLogger*.
 
@@ -74,7 +74,7 @@ def dump_json(data):
 
 
 default_hash_len = 28
-''' Length of hashes and hash-based ids (e.g. calc, upload) in nomad. '''
+''' Length of hashes and hash-based ids (e.g. entry_id) in nomad. '''
 
 try:
     from . import structlogging
diff --git a/nomad/utils/structlogging.py b/nomad/utils/structlogging.py
index 22797a9af7..5bacc3fdcf 100644
--- a/nomad/utils/structlogging.py
+++ b/nomad/utils/structlogging.py
@@ -23,8 +23,8 @@
 
 Logging in nomad is structured. Structured logging means that log entries contain
 dictionaries with quantities related to respective events. E.g. having the code,
-parser, parser version, calc_id, mainfile, etc. for all events that happen during
-calculation processing. This means the :func:`get_logger` and all logger functions
+parser, parser version, entry_id, mainfile, etc. for all events that happen during
+entry processing. This means the :func:`get_logger` and all logger functions
 take keyword arguments for structured data. Otherwise :func:`get_logger` can
 be used similar to the standard *logging.getLogger*.
 
diff --git a/tests/app/test_optimade.py b/tests/app/test_optimade.py
index ca3ec81e83..c329dded45 100644
--- a/tests/app/test_optimade.py
+++ b/tests/app/test_optimade.py
@@ -30,7 +30,7 @@ from tests.utils import ExampleData
 
 
 def test_get_entry(published: Upload):
-    entry_id = list(published.calcs)[0].calc_id
+    entry_id = list(published.successful_entries)[0].entry_id
     with published.upload_files.read_archive(entry_id) as archive:
         data = archive[entry_id]
         assert data['metadata']['optimade'] is not None
@@ -203,7 +203,7 @@ def test_list_endpoint_response_fields(client, example_structures):
 
 
 def test_single_endpoint_response_fields(client, example_structures):
-    rv = client.get('/optimade/structures/%s?response_fields=nelements,elements' % 'test_calc_id_1')
+    rv = client.get('/optimade/structures/%s?response_fields=nelements,elements' % 'test_entry_id_1')
     assert rv.status_code == 200, json.dumps(rv.json(), indent=2)
     data = rv.json()
     ref_elements = ['H', 'O']
@@ -214,7 +214,7 @@ def test_single_endpoint_response_fields(client, example_structures):
 
 
 def test_single_endpoint(client, example_structures):
-    rv = client.get('/optimade/structures/%s' % 'test_calc_id_1')
+    rv = client.get('/optimade/structures/%s' % 'test_entry_id_1')
     assert rv.status_code == 200
     data = rv.json()
     for key in ['type', 'id', 'attributes']:
@@ -276,7 +276,7 @@ def test_structures_endpoint(client, example_structures):
 
 
 def test_structure_endpoint(client, example_structures):
-    rv = client.get('/optimade/structures/%s' % 'test_calc_id_1')
+    rv = client.get('/optimade/structures/%s' % 'test_entry_id_1')
     assert rv.status_code == 200
     data = rv.json()
     assert data.get('data') is not None
@@ -297,7 +297,7 @@ def test_nmd_properties_info(client, example_structures):
 
 
 def test_nmd_properties(client, example_structures):
-    rv = client.get('/optimade/structures/%s' % 'test_calc_id_1?response_fields=_nmd_results_material_elements,_nmd_results_material_structural_type,_nmd_doesnotexist,_nmd_archive_url')
+    rv = client.get('/optimade/structures/%s' % 'test_entry_id_1?response_fields=_nmd_results_material_elements,_nmd_results_material_structural_type,_nmd_doesnotexist,_nmd_archive_url')
     assert rv.status_code == 200
     data = rv.json()
     assert data.get('data') is not None
@@ -312,7 +312,7 @@ def test_nmd_properties(client, example_structures):
 
 def test_nmd_properties_include_all(client, example_structures):
     all_fields = [f'_nmd_{name}' for name in provider_specific_fields()]
-    rv = client.get(f'/optimade/structures/test_calc_id_1?response_fields={",".join(all_fields)}')
+    rv = client.get(f'/optimade/structures/test_entry_id_1?response_fields={",".join(all_fields)}')
     assert rv.status_code == 200
     data = rv.json()
     assert data.get('data') is not None
diff --git a/tests/app/v1/routers/test_uploads.py b/tests/app/v1/routers/test_uploads.py
index c5ce3f8d4b..368626fa8f 100644
--- a/tests/app/v1/routers/test_uploads.py
+++ b/tests/app/v1/routers/test_uploads.py
@@ -1326,11 +1326,11 @@ def test_post_upload_action_publish_to_central_nomad(
 
         old_upload = Upload.get(upload_id)
         new_upload = Upload.get(upload_id + suffix)
-        assert len(old_upload.calcs) == len(new_upload.calcs) == 1
+        assert len(old_upload.successful_entries) == len(new_upload.successful_entries) == 1
         if embargo_length is None:
             embargo_length = old_upload.embargo_length
-        old_calc = old_upload.calcs[0]
-        new_calc = new_upload.calcs[0]
+        old_calc = old_upload.successful_entries[0]
+        new_calc = new_upload.successful_entries[0]
         old_calc_metadata_dict = old_calc.full_entry_metadata(old_upload).m_to_dict()
         new_calc_metadata_dict = new_calc.full_entry_metadata(new_upload).m_to_dict()
         for k, v in old_calc_metadata_dict.items():
diff --git a/tests/conftest.py b/tests/conftest.py
index 2c4984bd19..90f0eef7df 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -676,12 +676,12 @@ def oasis_publishable_upload(
     monkeypatch.setattr('nomad.config.oasis.central_nomad_api_url', '/api')
 
     # create a dataset to also test this aspect of oasis uploads
-    calc = non_empty_processed.calcs[0]
+    entry = non_empty_processed.successful_entries[0]
     datamodel.Dataset(
         dataset_id='dataset_id', dataset_name='dataset_name',
         user_id=test_user.user_id).a_mongo.save()
-    calc.datasets = ['dataset_id']
-    calc.save()
+    entry.datasets = ['dataset_id']
+    entry.save()
     return non_empty_processed.upload_id, suffix
 
 
diff --git a/tests/processing/test_data.py b/tests/processing/test_data.py
index 6b63a07700..8cbd7ae4f0 100644
--- a/tests/processing/test_data.py
+++ b/tests/processing/test_data.py
@@ -88,35 +88,35 @@ def assert_processing(upload: Upload, published: bool = False, process='process_
     else:
         assert isinstance(upload_files, StagingUploadFiles)
 
-    for calc in Calc.objects(upload_id=upload.upload_id):
-        assert calc.parser_name is not None
-        assert calc.mainfile is not None
-        assert calc.process_status == ProcessStatus.SUCCESS
+    for entry in Calc.objects(upload_id=upload.upload_id):
+        assert entry.parser_name is not None
+        assert entry.mainfile is not None
+        assert entry.process_status == ProcessStatus.SUCCESS
 
-        with upload_files.read_archive(calc.calc_id) as archive:
-            calc_archive = archive[calc.calc_id]
-            assert 'run' in calc_archive
-            assert 'metadata' in calc_archive
-            assert 'processing_logs' in calc_archive
+        with upload_files.read_archive(entry.entry_id) as archive:
+            entry_archive = archive[entry.entry_id]
+            assert 'run' in entry_archive
+            assert 'metadata' in entry_archive
+            assert 'processing_logs' in entry_archive
 
             has_test_event = False
-            for log_data in calc_archive['processing_logs']:
+            for log_data in entry_archive['processing_logs']:
                 for key in ['event', 'calc_id', 'level']:
                     key in log_data
                 has_test_event = has_test_event or log_data['event'] == 'a test log entry'
 
             assert has_test_event
-        assert len(calc.errors) == 0
+        assert len(entry.errors) == 0
 
-        archive = read_partial_archive_from_mongo(calc.calc_id)
+        archive = read_partial_archive_from_mongo(entry.entry_id)
         assert archive.metadata is not None
         assert archive.workflow[0].calculation_result_ref \
             .system_ref.atoms.labels is not None
 
-        with upload_files.raw_file(calc.mainfile) as f:
+        with upload_files.raw_file(entry.mainfile) as f:
             f.read()
 
-        entry_metadata = calc.full_entry_metadata(upload)
+        entry_metadata = entry.full_entry_metadata(upload)
 
         for path in entry_metadata.files:
             with upload_files.raw_file(path) as f:
@@ -127,7 +127,7 @@ def assert_processing(upload: Upload, published: bool = False, process='process_
         assert len(entry_metadata.quantities) > 0
         assert len(entry_metadata.processing_errors) == 0
 
-        assert upload.get_calc(calc.calc_id) is not None
+        assert upload.get_entry(entry.entry_id) is not None
 
         upload_files.close()
 
@@ -165,8 +165,8 @@ def test_processing_with_large_dir(test_user, proc_infra, tmp):
         tmp, mainfiles=['tests/data/proc/templates/template.json'], auxfiles=150)
     upload_id = upload_path[:-4]
     upload = run_processing((upload_id, upload_path), test_user)
-    for calc in upload.calcs:
-        assert len(calc.warnings) == 1
+    for entry in upload.successful_entries:
+        assert len(entry.warnings) == 1
 
 
 def test_publish(non_empty_processed: Upload, no_warn, internal_example_user_metadata, monkeypatch):
@@ -274,22 +274,22 @@ def test_publish_to_central_nomad(
     new_upload = Upload.get(upload_id + suffix)
     new_upload.block_until_complete()
     assert_processing(new_upload, old_upload.published, 'import_bundle')
-    assert len(old_upload.calcs) == len(new_upload.calcs) == 1
+    assert len(old_upload.successful_entries) == len(new_upload.successful_entries) == 1
     if embargo_length is None:
         embargo_length = old_upload.embargo_length
-    old_calc = old_upload.calcs[0]
-    new_calc = new_upload.calcs[0]
-    old_calc_metadata_dict = old_calc.full_entry_metadata(old_upload).m_to_dict()
-    new_calc_metadata_dict = new_calc.full_entry_metadata(new_upload).m_to_dict()
-    for k, v in old_calc_metadata_dict.items():
+    old_entry = old_upload.successful_entries[0]
+    new_entry = new_upload.successful_entries[0]
+    old_entry_metadata_dict = old_entry.full_entry_metadata(old_upload).m_to_dict()
+    new_entry_metadata_dict = new_entry.full_entry_metadata(new_upload).m_to_dict()
+    for k, v in old_entry_metadata_dict.items():
         if k == 'with_embargo':
-            assert new_calc_metadata_dict[k] == (embargo_length > 0)
+            assert new_entry_metadata_dict[k] == (embargo_length > 0)
         elif k not in (
                 'upload_id', 'calc_id', 'upload_create_time', 'entry_create_time',
                 'last_processing_time', 'publish_time', 'embargo_length',
                 'n_quantities', 'quantities'):  # TODO: n_quantities and quantities update problem?
-            assert new_calc_metadata_dict[k] == v, f'Metadata not matching: {k}'
-    assert new_calc.datasets == ['dataset_id']
+            assert new_entry_metadata_dict[k] == v, f'Metadata not matching: {k}'
+    assert new_entry.datasets == ['dataset_id']
     assert old_upload.published_to[0] == config.oasis.central_nomad_deployment_id
     assert new_upload.from_oasis and new_upload.oasis_deployment_id
     assert new_upload.embargo_length == embargo_length
@@ -324,21 +324,21 @@ def test_re_processing(published: Upload, internal_example_user_metadata, monkey
         monkeypatch.setattr('nomad.config.reprocess.use_original_parser', True)
 
     if with_failure == 'before':
-        calc = published.all_calcs(0, 1)[0]
-        calc.process_status = ProcessStatus.FAILURE
-        calc.errors = ['example error']
-        calc.save()
-        assert published.failed_calcs > 0
+        entry = published.entries_sublist(0, 1)[0]
+        entry.process_status = ProcessStatus.FAILURE
+        entry.errors = ['example error']
+        entry.save()
+        assert published.failed_entries_count > 0
 
     assert published.published
     assert published.upload_files.to_staging_upload_files() is None
 
     old_upload_time = published.last_update
-    first_calc: Calc = published.all_calcs(0, 1)[0]
-    old_calc_time = first_calc.last_processing_time
+    first_entry: Calc = published.entries_sublist(0, 1)[0]
+    old_entry_time = first_entry.last_processing_time
 
-    with published.upload_files.read_archive(first_calc.calc_id) as archive:
-        archive[first_calc.calc_id]['processing_logs']
+    with published.upload_files.read_archive(first_entry.entry_id) as archive:
+        archive[first_entry.entry_id]['processing_logs']
 
     old_archive_files = list(
         archive_file
@@ -378,28 +378,28 @@ def test_re_processing(published: Upload, internal_example_user_metadata, monkey
         pass
 
     published.reload()
-    first_calc.reload()
+    first_entry.reload()
 
     # assert new process time
     if with_failure != 'not-matched':
         assert published.last_update > old_upload_time
-        assert first_calc.last_processing_time > old_calc_time
+        assert first_entry.last_processing_time > old_entry_time
 
     # assert new process version
     if with_failure != 'not-matched':
-        assert first_calc.nomad_version == 're_process_test_version'
-        assert first_calc.nomad_commit == 're_process_test_commit'
+        assert first_entry.nomad_version == 're_process_test_version'
+        assert first_entry.nomad_commit == 're_process_test_commit'
 
     # assert changed archive files
     if with_failure == 'after':
-        with published.upload_files.read_archive(first_calc.calc_id) as archive_reader:
-            assert list(archive_reader[first_calc.calc_id].keys()) == ['processing_logs', 'metadata']
-            archive = EntryArchive.m_from_dict(archive_reader[first_calc.calc_id].to_dict())
+        with published.upload_files.read_archive(first_entry.entry_id) as archive_reader:
+            assert list(archive_reader[first_entry.entry_id].keys()) == ['processing_logs', 'metadata']
+            archive = EntryArchive.m_from_dict(archive_reader[first_entry.entry_id].to_dict())
 
     else:
-        with published.upload_files.read_archive(first_calc.calc_id) as archive_reader:
-            assert len(archive_reader[first_calc.calc_id]) > 2  # contains more then logs and metadata
-            archive = EntryArchive.m_from_dict(archive_reader[first_calc.calc_id].to_dict())
+        with published.upload_files.read_archive(first_entry.entry_id) as archive_reader:
+            assert len(archive_reader[first_entry.entry_id]) > 2  # contains more then logs and metadata
+            archive = EntryArchive.m_from_dict(archive_reader[first_entry.entry_id].to_dict())
 
     # assert maintained user metadata (mongo+es)
     assert_upload_files(published.upload_id, entries, PublicUploadFiles, published=True)
@@ -407,7 +407,7 @@ def test_re_processing(published: Upload, internal_example_user_metadata, monkey
     if with_failure not in ['after', 'not-matched']:
         assert_processing(Upload.get(published.upload_id), published=True)
 
-    # assert changed calc data
+    # assert changed entry data
     if with_failure not in ['after']:
         assert archive.results.material.elements[0] == 'H'
     else:
@@ -451,7 +451,7 @@ def test_re_process_match(non_empty_processed, published, monkeypatch, no_warn):
         upload.publish_upload(embargo_length=0)
         upload.block_until_complete(interval=.01)
 
-    assert upload.total_calcs == 1, upload.total_calcs
+    assert upload.total_entries_count == 1, upload.total_entries_count
 
     if published:
         import zipfile
@@ -467,7 +467,7 @@ def test_re_process_match(non_empty_processed, published, monkeypatch, no_warn):
     upload.process_upload()
     upload.block_until_complete(interval=.01)
 
-    assert upload.total_calcs == 2
+    assert upload.total_entries_count == 2
     if not published:
         assert upload.published == published
         assert not upload.with_embargo
@@ -533,7 +533,7 @@ def test_process_partial(proc_infra, non_empty_processed: Upload, args):
     remove = args.get('remove', [])
     path_filter = args['path_filter']
     expected_result = args['expected_result']
-    old_timestamps = {e.mainfile: e.complete_time for e in non_empty_processed.calcs}
+    old_timestamps = {e.mainfile: e.complete_time for e in non_empty_processed.successful_entries}
     upload_files: StagingUploadFiles = non_empty_processed.upload_files  # type: ignore
     for path in add:
         upload_files.add_rawfiles('tests/data/proc/templates/template.json', path)
@@ -543,7 +543,7 @@ def test_process_partial(proc_infra, non_empty_processed: Upload, args):
     non_empty_processed.block_until_complete()
     search_refresh()  # Process does not wait for search index to be refreshed when deleting
     assert_processing(non_empty_processed)
-    new_timestamps = {e.mainfile: e.complete_time for e in non_empty_processed.calcs}
+    new_timestamps = {e.mainfile: e.complete_time for e in non_empty_processed.successful_entries}
     assert new_timestamps.keys() == expected_result.keys()
     for key, expect_updated in expected_result.items():
         if expect_updated:
@@ -555,7 +555,6 @@ def test_re_pack(published: Upload):
     upload_files: PublicUploadFiles = published.upload_files  # type: ignore
     assert upload_files.access == 'restricted'
     assert published.with_embargo
-    calc: Calc = Calc.objects(upload_id=upload_id).first()
 
     # Lift embargo
     published.embargo_length = 0
@@ -567,9 +566,9 @@ def test_re_pack(published: Upload):
         with upload_files.raw_file(path_info.path) as f:
             f.read()
 
-    for calc in Calc.objects(upload_id=upload_id):
-        with upload_files.read_archive(calc.calc_id) as archive:
-            archive[calc.calc_id].to_dict()
+    for entry in Calc.objects(upload_id=upload_id):
+        with upload_files.read_archive(entry.entry_id) as archive:
+            archive[entry.entry_id].to_dict()
 
     published.reload()
 
@@ -606,25 +605,25 @@ def test_process_failure(monkeypatch, uploaded, function, proc_infra, test_user,
         assert upload.process_status == ProcessStatus.FAILURE
         assert len(upload.errors) > 0
     else:
-        # there is an empty example with no calcs, even if past parsing_all step
+        # there is an empty example with no entries, even if past parsing_all step
         utils.get_logger(__name__).error('fake')
-        if upload.total_calcs > 0:  # pylint: disable=E1101
+        if upload.total_entries_count > 0:  # pylint: disable=E1101
             assert upload.process_status == ProcessStatus.SUCCESS
             assert len(upload.errors) == 0
-            for calc in upload.all_calcs(0, 100):  # pylint: disable=E1101
-                assert calc.process_status == ProcessStatus.FAILURE
-                assert len(calc.errors) > 0
-
-    calc = Calc.objects(upload_id=upload_id).first()
-    if calc is not None:
-        with upload.upload_files.read_archive(calc.calc_id) as archive:
-            calc_archive = archive[calc.calc_id]
-            assert 'metadata' in calc_archive
+            for entry in upload.entries_sublist(0, 100):  # pylint: disable=E1101
+                assert entry.process_status == ProcessStatus.FAILURE
+                assert len(entry.errors) > 0
+
+    entry = Calc.objects(upload_id=upload_id).first()
+    if entry is not None:
+        with upload.upload_files.read_archive(entry.entry_id) as archive:
+            entry_archive = archive[entry.entry_id]
+            assert 'metadata' in entry_archive
             if function != 'cleanup':
-                assert len(calc_archive['metadata']['processing_errors']) > 0
-            assert 'processing_logs' in calc_archive
+                assert len(entry_archive['metadata']['processing_errors']) > 0
+            assert 'processing_logs' in entry_archive
             if function != 'parsing':
-                assert 'run' in calc_archive
+                assert 'run' in entry_archive
 
 
 # consume_ram, segfault, and exit are not testable with the celery test worker
@@ -642,12 +641,12 @@ def test_malicious_parser_failure(proc_infra, failure, test_user, tmp):
     assert len(upload.errors) == 0
     assert upload.process_status == ProcessStatus.SUCCESS
 
-    calcs = Calc.objects(upload_id=upload.upload_id)
-    assert calcs.count() == 1
-    calc = next(calcs)
-    assert not calc.process_running
-    assert calc.process_status == ProcessStatus.FAILURE
-    assert len(calc.errors) == 1
+    entries = Calc.objects(upload_id=upload.upload_id)
+    assert entries.count() == 1
+    entry = next(entries)
+    assert not entry.process_running
+    assert entry.process_status == ProcessStatus.FAILURE
+    assert len(entry.errors) == 1
 
 
 @pytest.mark.timeout(config.tests.default_timeout)
@@ -655,8 +654,8 @@ def test_ems_data(proc_infra, test_user):
     upload = run_processing(('test_ems_upload', 'tests/data/proc/examples_ems.zip'), test_user)
 
     additional_keys = ['results.method.method_name', 'results.material.elements']
-    assert upload.total_calcs == 2
-    assert len(upload.calcs) == 2
+    assert upload.total_entries_count == 2
+    assert len(upload.successful_entries) == 2
 
     with upload.entries_metadata() as entries:
         assert_upload_files(upload.upload_id, entries, StagingUploadFiles, published=False)
@@ -668,8 +667,8 @@ def test_qcms_data(proc_infra, test_user):
     upload = run_processing(('test_qcms_upload', 'tests/data/proc/examples_qcms.zip'), test_user)
 
     additional_keys = ['results.method.simulation.program_name', 'results.material.elements']
-    assert upload.total_calcs == 1
-    assert len(upload.calcs) == 1
+    assert upload.total_entries_count == 1
+    assert len(upload.successful_entries) == 1
 
     with upload.entries_metadata() as entries:
         assert_upload_files(upload.upload_id, entries, StagingUploadFiles, published=False)
@@ -681,8 +680,8 @@ def test_phonopy_data(proc_infra, test_user):
     upload = run_processing(('test_upload', 'tests/data/proc/examples_phonopy.zip'), test_user)
 
     additional_keys = ['results.method.simulation.program_name']
-    assert upload.total_calcs == 2
-    assert len(upload.calcs) == 2
+    assert upload.total_entries_count == 2
+    assert len(upload.successful_entries) == 2
 
     with upload.entries_metadata() as entries:
         assert_upload_files(upload.upload_id, entries, StagingUploadFiles, published=False)
@@ -692,31 +691,31 @@ def test_phonopy_data(proc_infra, test_user):
 def test_read_metadata_from_file(proc_infra, test_user, other_test_user, tmp):
     upload_file = os.path.join(tmp, 'upload.zip')
     with zipfile.ZipFile(upload_file, 'w') as zf:
-        zf.write('tests/data/proc/templates/template.json', 'examples/calc_1/template.json')
-        zf.write('tests/data/proc/templates/template.json', 'examples/calc_2/template.json')
-        zf.write('tests/data/proc/templates/template.json', 'examples/calc_3/template.json')
+        zf.write('tests/data/proc/templates/template.json', 'examples/entry_1/template.json')
+        zf.write('tests/data/proc/templates/template.json', 'examples/entry_2/template.json')
+        zf.write('tests/data/proc/templates/template.json', 'examples/entry_3/template.json')
         zf.write('tests/data/proc/templates/template.json', 'examples/template.json')
-        calc_1 = dict(
-            comment='Calculation 1 of 3',
+        entry_1 = dict(
+            comment='Entry 1 of 3',
             references='http://test1',
             external_id='external_id_1')
-        with zf.open('examples/calc_1/nomad.yaml', 'w') as f: f.write(yaml.dump(calc_1).encode())
-        calc_2 = dict(
-            comment='Calculation 2 of 3',
+        with zf.open('examples/entry_1/nomad.yaml', 'w') as f: f.write(yaml.dump(entry_1).encode())
+        entry_2 = dict(
+            comment='Entry 2 of 3',
             references=['http://test2'],
             external_id='external_id_2')
-        with zf.open('examples/calc_2/nomad.json', 'w') as f: f.write(json.dumps(calc_2).encode())
+        with zf.open('examples/entry_2/nomad.json', 'w') as f: f.write(json.dumps(entry_2).encode())
         metadata = {
             'upload_name': 'my name',
             'coauthors': other_test_user.user_id,
             'references': ['http://test0'],
             'entries': {
-                'examples/calc_3/template.json': {
-                    'comment': 'Calculation 3 of 3',
+                'examples/entry_3/template.json': {
+                    'comment': 'Entry 3 of 3',
                     'references': 'http://test3',
                     'external_id': 'external_id_3'
                 },
-                'examples/calc_1/template.json': {
+                'examples/entry_1/template.json': {
                     'comment': 'root entries comment 1'
                 }
             }
@@ -725,16 +724,16 @@ def test_read_metadata_from_file(proc_infra, test_user, other_test_user, tmp):
 
     upload = run_processing(('test_upload', upload_file), test_user)
 
-    calcs = Calc.objects(upload_id=upload.upload_id)
-    calcs = sorted(calcs, key=lambda calc: calc.mainfile)
+    entries = Calc.objects(upload_id=upload.upload_id)
+    entries = sorted(entries, key=lambda entry: entry.mainfile)
 
-    comment = ['root entries comment 1', 'Calculation 2 of 3', 'Calculation 3 of 3', None]
+    comment = ['root entries comment 1', 'Entry 2 of 3', 'Entry 3 of 3', None]
     external_ids = ['external_id_1', 'external_id_2', 'external_id_3', None]
     references = [['http://test1'], ['http://test2'], ['http://test3'], ['http://test0']]
     expected_coauthors = [other_test_user]
 
-    for i in range(len(calcs)):
-        entry_metadata = calcs[i].full_entry_metadata(upload)
+    for i in range(len(entries)):
+        entry_metadata = entries[i].full_entry_metadata(upload)
         assert entry_metadata.comment == comment[i]
         assert entry_metadata.references == references[i]
         assert entry_metadata.external_id == external_ids[i]
@@ -750,7 +749,7 @@ def test_read_metadata_from_file(proc_infra, test_user, other_test_user, tmp):
 
 def test_skip_matching(proc_infra, test_user):
     upload = run_processing(('test_skip_matching', 'tests/data/proc/skip_matching.zip'), test_user)
-    assert upload.total_calcs == 1
+    assert upload.total_entries_count == 1
 
 
 @pytest.mark.parametrize('url,normalized_url', [
diff --git a/tests/processing/test_edit_metadata.py b/tests/processing/test_edit_metadata.py
index 4408e68ac3..b425725db7 100644
--- a/tests/processing/test_edit_metadata.py
+++ b/tests/processing/test_edit_metadata.py
@@ -63,7 +63,7 @@ def assert_edit_request(user, **kwargs):
     owner = kwargs.get('owner')
     metadata = kwargs.get('metadata')
     entries = kwargs.get('entries')
-    entries_key = kwargs.get('entries_key', 'calc_id')
+    entries_key = kwargs.get('entries_key', 'entry_id')
     verify_only = kwargs.get('verify_only', False)
     expected_error_loc = kwargs.get('expected_error_loc')
     affected_upload_ids = kwargs.get('affected_upload_ids', [upload_id])
@@ -93,11 +93,11 @@ def assert_metadata_edited(
     for upload_id in affected_upload_ids:
         upload = Upload.get(upload_id)
         upload.block_until_complete()
-        for entry in upload.calcs:
+        for entry in upload.successful_entries:
             assert entry.last_edit_time
             assert edit_start is None or entry.last_edit_time.isoformat()[0:22] >= edit_start
             entry_metadata_mongo = entry.mongo_metadata(upload).m_to_dict()
-            entry_metadata_es = search(owner=None, query={'calc_id': entry.calc_id}).data[0]
+            entry_metadata_es = search(owner=None, query={'entry_id': entry.entry_id}).data[0]
             values_to_check = expected_metadata
             for quantity_name, value_expected in values_to_check.items():
                 # Note, the expected value is provided on the "request format"
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 670d08393d..9e202932a3 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -102,7 +102,7 @@ class TestAdmin:
         upload_id = published.upload_id
         published.publish_time = publish_time
         published.save()
-        calc = Calc.objects(upload_id=upload_id).first()
+        entry = Calc.objects(upload_id=upload_id).first()
 
         assert published.upload_files.exists()
         assert published.with_embargo
@@ -118,25 +118,25 @@ class TestAdmin:
         assert not published.with_embargo == lifted
         assert (search(owner='public', query=dict(upload_id=upload_id)).pagination.total > 0) == lifted
         if lifted:
-            with files.UploadFiles.get(upload_id=upload_id).read_archive(calc_id=calc.calc_id) as archive:
-                assert calc.calc_id in archive
+            with files.UploadFiles.get(upload_id=upload_id).read_archive(entry_id=entry.entry_id) as archive:
+                assert entry.entry_id in archive
 
     def test_delete_entry(self, published):
         upload_id = published.upload_id
-        calc = Calc.objects(upload_id=upload_id).first()
+        entry = Calc.objects(upload_id=upload_id).first()
 
         result = invoke_cli(
-            cli, ['admin', 'entries', 'rm', calc.calc_id], catch_exceptions=False)
+            cli, ['admin', 'entries', 'rm', entry.entry_id], catch_exceptions=False)
 
         assert result.exit_code == 0
         assert 'deleting' in result.stdout
         assert Upload.objects(upload_id=upload_id).first() is not None
-        assert Calc.objects(calc_id=calc.calc_id).first() is None
+        assert Calc.objects(calc_id=entry.entry_id).first() is None
 
 
-def transform_for_index_test(calc):
-    calc.comment = 'specific'
-    return calc
+def transform_for_index_test(entry):
+    entry.comment = 'specific'
+    return entry
 
 
 @pytest.mark.usefixtures('reset_config', 'no_warn')
@@ -183,9 +183,9 @@ class TestAdminUploads:
 
     def test_index(self, published):
         upload_id = published.upload_id
-        calc = Calc.objects(upload_id=upload_id).first()
-        calc.comment = 'specific'
-        calc.save()
+        entry = Calc.objects(upload_id=upload_id).first()
+        entry.comment = 'specific'
+        entry.save()
 
         assert search(owner='all', query=dict(comment='specific')).pagination.total == 0
 
@@ -214,20 +214,20 @@ class TestAdminUploads:
     def test_re_process(self, published, monkeypatch):
         monkeypatch.setattr('nomad.config.meta.version', 'test_version')
         upload_id = published.upload_id
-        calc = Calc.objects(upload_id=upload_id).first()
-        assert calc.nomad_version != 'test_version'
+        entry = Calc.objects(upload_id=upload_id).first()
+        assert entry.nomad_version != 'test_version'
 
         result = invoke_cli(
             cli, ['admin', 'uploads', 'process', '--parallel', '2', upload_id], catch_exceptions=False)
 
         assert result.exit_code == 0
         assert 'processing' in result.stdout
-        calc.reload()
-        assert calc.nomad_version == 'test_version'
+        entry.reload()
+        assert entry.nomad_version == 'test_version'
 
     def test_re_pack(self, published, monkeypatch):
         upload_id = published.upload_id
-        calc = Calc.objects(upload_id=upload_id).first()
+        entry = Calc.objects(upload_id=upload_id).first()
         assert published.with_embargo
         published.embargo_length = 0
         published.save()
@@ -237,14 +237,14 @@ class TestAdminUploads:
 
         assert result.exit_code == 0
         assert 're-pack' in result.stdout
-        calc.reload()
+        entry.reload()
         upload_files = files.PublicUploadFiles(upload_id)
         for path_info in upload_files.raw_directory_list(recursive=True, files_only=True):
             with upload_files.raw_file(path_info.path) as f:
                 f.read()
-        for calc in Calc.objects(upload_id=upload_id):
-            with upload_files.read_archive(calc.calc_id) as archive:
-                assert calc.calc_id in archive
+        for entry in Calc.objects(upload_id=upload_id):
+            with upload_files.read_archive(entry.entry_id) as archive:
+                assert entry.entry_id in archive
 
         published.reload()
         assert published.process_status == ProcessStatus.SUCCESS
@@ -269,21 +269,21 @@ class TestAdminUploads:
             for entry_metadata in entries_metadata:
                 assert entry_metadata.main_author.user_id == other_test_user.user_id
 
-    @pytest.mark.parametrize('with_calcs,success,failure', [
+    @pytest.mark.parametrize('with_entries,success,failure', [
         (True, False, False),
         (False, False, False),
         (True, True, False),
         (False, False, True)])
-    def test_reset(self, non_empty_processed, with_calcs, success, failure):
+    def test_reset(self, non_empty_processed, with_entries, success, failure):
         upload_id = non_empty_processed.upload_id
 
         upload = Upload.objects(upload_id=upload_id).first()
-        calc = Calc.objects(upload_id=upload_id).first()
+        entry = Calc.objects(upload_id=upload_id).first()
         assert upload.process_status == ProcessStatus.SUCCESS
-        assert calc.process_status == ProcessStatus.SUCCESS
+        assert entry.process_status == ProcessStatus.SUCCESS
 
         args = ['admin', 'uploads', 'reset']
-        if with_calcs: args.append('--with-calcs')
+        if with_entries: args.append('--with-entries')
         if success: args.append('--success')
         if failure: args.append('--failure')
         args.append(upload_id)
@@ -292,16 +292,16 @@ class TestAdminUploads:
         assert result.exit_code == 0
         assert 'reset' in result.stdout
         upload = Upload.objects(upload_id=upload_id).first()
-        calc = Calc.objects(upload_id=upload_id).first()
+        entry = Calc.objects(upload_id=upload_id).first()
 
         expected_state = ProcessStatus.READY
         if success: expected_state = ProcessStatus.SUCCESS
         if failure: expected_state = ProcessStatus.FAILURE
         assert upload.process_status == expected_state
-        if not with_calcs:
-            assert calc.process_status == ProcessStatus.SUCCESS
+        if not with_entries:
+            assert entry.process_status == ProcessStatus.SUCCESS
         else:
-            assert calc.process_status == expected_state
+            assert entry.process_status == expected_state
 
     @pytest.mark.parametrize('indexed', [True, False])
     def test_integrity_entry_index(self, test_user, mongo, elastic, indexed):
@@ -335,7 +335,7 @@ class TestClient:
     def test_local(self, published_wo_user_metadata, client_with_api_v1):
         result = invoke_cli(
             cli,
-            ['client', 'local', published_wo_user_metadata.calcs[0].calc_id],
+            ['client', 'local', published_wo_user_metadata.successful_entries[0].entry_id],
             catch_exceptions=True)
 
         assert result.exit_code == 0, result.output
diff --git a/tests/test_files.py b/tests/test_files.py
index 1b93c487f6..c7fa21ea36 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -32,7 +32,7 @@ from nomad.files import StagingUploadFiles, PublicUploadFiles, UploadFiles
 from nomad.processing import Upload
 
 
-CalcWithFiles = Tuple[datamodel.EntryMetadata, str]
+EntryWithFiles = Tuple[datamodel.EntryMetadata, str]
 UploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], UploadFiles]
 StagingUploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], StagingUploadFiles]
 PublicUploadWithFiles = Tuple[str, List[datamodel.EntryMetadata], PublicUploadFiles]
@@ -111,47 +111,47 @@ class TestObjects:
         assert os.path.exists(os.path.dirname(file.os_path)) == create
 
 
-example_calc: Dict[str, Any] = {
-    'calc_id': '0',
+example_entry: Dict[str, Any] = {
+    'entry_id': '0',
     'mainfile': 'examples_template/template.json',
     'data': 'value'
 }
-example_calc_id = example_calc['calc_id']
+example_entry_id = example_entry['entry_id']
 
 
-def generate_example_calc(
-        calc_id: int, with_mainfile_prefix: bool, subdirectory: str = None,
-        **kwargs) -> CalcWithFiles:
-    ''' Generate an example calc with :class:`EntryMetadata` and rawfile. '''
+def generate_example_entry(
+        entry_id: int, with_mainfile_prefix: bool, subdirectory: str = None,
+        **kwargs) -> EntryWithFiles:
+    ''' Generate an example entry with :class:`EntryMetadata` and rawfile. '''
 
-    example_calc = datamodel.EntryMetadata(domain='dft', calc_id=str(calc_id))
+    example_entry = datamodel.EntryMetadata(domain='dft', entry_id=str(entry_id))
 
     if with_mainfile_prefix:
-        mainfile = '%d.template.json' % calc_id
+        mainfile = '%d.template.json' % entry_id
     else:
         mainfile = 'template.json'
 
     if subdirectory is not None:
         mainfile = os.path.join(subdirectory, mainfile)
 
-    example_calc.mainfile = mainfile
-    example_calc.m_update(**kwargs)
+    example_entry.mainfile = mainfile
+    example_entry.m_update(**kwargs)
 
     example_file = os.path.join(config.fs.tmp, 'example.zip')
-    example_calc.files = []
+    example_entry.files = []
     with zipfile.ZipFile(example_file, 'w', zipfile.ZIP_DEFLATED) as zf:
         for filepath in example_file_contents:
             filename = os.path.basename(filepath)
             arcname = filename
             if arcname == 'template.json' and with_mainfile_prefix:
-                arcname = '%d.template.json' % calc_id
+                arcname = '%d.template.json' % entry_id
 
             if subdirectory is not None:
                 arcname = os.path.join(subdirectory, arcname)
-            example_calc.files.append(arcname)
+            example_entry.files.append(arcname)
             zf.write(os.path.join(example_directory, filename), arcname)
 
-    return example_calc, example_file
+    return example_entry, example_file
 
 
 def assert_example_files(names, with_mainfile: bool = True):
@@ -172,9 +172,9 @@ def assert_example_files(names, with_mainfile: bool = True):
     assert source == target
 
 
-def assert_example_calc(calc):
-    assert calc is not None
-    assert calc['data'] == example_calc['data']
+def assert_example_entry(entry):
+    assert entry is not None
+    assert entry['data'] == example_entry['data']
 
 
 class UploadFilesFixtures:
@@ -207,15 +207,15 @@ class UploadFilesContract(UploadFilesFixtures):
 
     def test_rawfile(self, test_upload: UploadWithFiles):
         _, entries, upload_files = test_upload
-        for calc in entries:
-            for file_path in calc.files:
+        for entry in entries:
+            for file_path in entry.files:
                 with upload_files.raw_file(file_path) as f:
                     assert len(f.read()) > 0
 
     def test_rawfile_size(self, test_upload: UploadWithFiles):
         _, entries, upload_files = test_upload
-        for calc in entries:
-            for file_path in calc.files:
+        for entry in entries:
+            for file_path in entry.files:
                 assert upload_files.raw_file_size(file_path) > 0
 
     @pytest.mark.parametrize('prefix', [None, 'examples'])
@@ -244,48 +244,48 @@ class UploadFilesContract(UploadFilesFixtures):
     def test_read_archive(self, test_upload: UploadWithFiles, with_access: str):
         _, _, upload_files = test_upload
 
-        with upload_files.read_archive(example_calc_id) as archive:
-            assert archive[example_calc_id].to_dict() == example_archive_contents
+        with upload_files.read_archive(example_entry_id) as archive:
+            assert archive[example_entry_id].to_dict() == example_archive_contents
 
 
-def create_staging_upload(upload_id: str, calc_specs: str, embargo_length: int = 0) -> StagingUploadWithFiles:
+def create_staging_upload(upload_id: str, entry_specs: str, embargo_length: int = 0) -> StagingUploadWithFiles:
     '''
     Create an upload according to given spec. Additional arguments are given to
     the StagingUploadFiles contstructor.
 
     Arguments:
         upload_id: The id that should be given to this test upload.
-        calc_specs: A string that determines the properties of the given upload.
-            With letters determining example calcs being public `p` or restricted `r`.
-            The calcs will be copies of calcs in `example_file`.
-            First calc is at top level, following calcs will be put under 1/, 2/, etc.
-            All calcs with capital `P`/`R` will be put in the same directory under multi/.
+        entry_specs: A string that determines the properties of the given upload.
+            With letters determining example entries being public `p` or restricted `r`.
+            The entries will be copies of entries in `example_file`.
+            First entry is at top level, following entries will be put under 1/, 2/, etc.
+            All entries with capital `P`/`R` will be put in the same directory under multi/.
     '''
     upload_files = StagingUploadFiles(upload_id, create=True)
-    calcs = []
+    entries = []
 
     prefix = 0
-    for calc_spec in calc_specs:
-        is_multi = calc_spec in ['R', 'P']
-        calc_spec = calc_spec.lower()
-        assert (calc_spec == 'r') == (embargo_length > 0)
+    for entry_spec in entry_specs:
+        is_multi = entry_spec in ['R', 'P']
+        entry_spec = entry_spec.lower()
+        assert (entry_spec == 'r') == (embargo_length > 0)
         if is_multi or prefix == 0:
             directory = 'examples_template'
         else:
             directory = os.path.join(str(prefix), 'examples_template')
 
-        calc, calc_file = generate_example_calc(
+        entry, entry_file = generate_example_entry(
             prefix, with_mainfile_prefix=is_multi, subdirectory=directory,
             with_embargo=embargo_length > 0)
 
-        upload_files.add_rawfiles(calc_file)
-        upload_files.write_archive(calc.calc_id, example_archive_contents)
+        upload_files.add_rawfiles(entry_file)
+        upload_files.write_archive(entry.entry_id, example_archive_contents)
 
-        calcs.append(calc)
+        entries.append(entry)
         prefix += 1
 
-    assert len(calcs) == len(calc_specs)
-    return upload_id, calcs, upload_files
+    assert len(entries) == len(entry_specs)
+    return upload_id, entries, upload_files
 
 
 class TestStagingUploadFiles(UploadFilesContract):
@@ -293,7 +293,7 @@ class TestStagingUploadFiles(UploadFilesContract):
     @pytest.fixture(scope='function', params=['r', 'rr', 'p', 'pp', 'RR', 'PP'])
     def test_upload(self, request, test_upload_id: str) -> StagingUploadWithFiles:
         embargo_length = 12 if 'r' in request.param.lower() else 0
-        return create_staging_upload(test_upload_id, calc_specs=request.param, embargo_length=embargo_length)
+        return create_staging_upload(test_upload_id, entry_specs=request.param, embargo_length=embargo_length)
 
     @pytest.fixture(scope='function')
     def empty_test_upload(self, test_upload_id) -> UploadFiles:
@@ -314,11 +314,11 @@ class TestStagingUploadFiles(UploadFilesContract):
         _, entries, upload_files = test_upload
         upload_files.pack(entries, with_embargo=entries[0].with_embargo)
 
-    @pytest.mark.parametrize('calc_specs', ['r', 'p'])
-    def test_pack_potcar(self, calc_specs):
-        embargo_length = 12 if 'r' in calc_specs.lower() else 0
+    @pytest.mark.parametrize('entry_specs', ['r', 'p'])
+    def test_pack_potcar(self, entry_specs):
+        embargo_length = 12 if 'r' in entry_specs.lower() else 0
         upload_id, entries, upload_files = create_staging_upload(
-            'test_potcar', calc_specs=calc_specs, embargo_length=embargo_length)
+            'test_potcar', entry_specs=entry_specs, embargo_length=embargo_length)
         # Add potcar files: one stripped and one unstripped
         filenames = ('POTCAR', 'POTCAR.stripped')
         for filename in filenames:
@@ -336,12 +336,12 @@ class TestStagingUploadFiles(UploadFilesContract):
                 assert not filename.endswith('.stripped'), 'Only non-stripped file should be removed'
 
     @pytest.mark.parametrize('with_mainfile', [True, False])
-    def test_calc_files(self, test_upload: StagingUploadWithFiles, with_mainfile):
+    def test_entry_files(self, test_upload: StagingUploadWithFiles, with_mainfile):
         _, entries, upload_files = test_upload
-        for calc in entries:
-            mainfile = calc.mainfile
-            calc_files = upload_files.calc_files(mainfile, with_mainfile=with_mainfile)
-            assert_example_files(calc_files, with_mainfile=with_mainfile)
+        for entry in entries:
+            mainfile = entry.mainfile
+            entry_files = upload_files.entry_files(mainfile, with_mainfile=with_mainfile)
+            assert_example_files(entry_files, with_mainfile=with_mainfile)
 
     def test_delete(self, test_upload: StagingUploadWithFiles):
         _, _, upload_files = test_upload
@@ -380,9 +380,9 @@ class TestStagingUploadFiles(UploadFilesContract):
 
 
 def create_public_upload(
-        upload_id: str, calc_specs: str, embargo_length: int = 0, with_upload: bool = True) -> PublicUploadWithFiles:
+        upload_id: str, entry_specs: str, embargo_length: int = 0, with_upload: bool = True) -> PublicUploadWithFiles:
 
-    _, entries, upload_files = create_staging_upload(upload_id, calc_specs, embargo_length)
+    _, entries, upload_files = create_staging_upload(upload_id, entry_specs, embargo_length)
 
     upload_files.pack(entries, with_embargo=embargo_length > 0)
     upload_files.delete()
@@ -399,17 +399,17 @@ class TestPublicUploadFiles(UploadFilesContract):
     @pytest.fixture(scope='function')
     def empty_test_upload(self, test_upload_id: str) -> UploadFiles:
         _, _, upload_files = create_public_upload(
-            test_upload_id, calc_specs='', with_upload=False)
+            test_upload_id, entry_specs='', with_upload=False)
 
         return upload_files
 
     @pytest.fixture(scope='function', params=itertools.product(
         ['r', 'rr', 'p', 'pp', 'RR', 'PP'], [True, False]))
     def test_upload(self, request, test_upload_id: str) -> PublicUploadWithFiles:
-        calc_specs, both_accesses = request.param
-        embargo_length = 12 if 'r' in calc_specs.lower() else 0
+        entry_specs, both_accesses = request.param
+        embargo_length = 12 if 'r' in entry_specs.lower() else 0
         _, entries, upload_files = create_staging_upload(
-            test_upload_id, calc_specs=calc_specs, embargo_length=embargo_length)
+            test_upload_id, entry_specs=entry_specs, embargo_length=embargo_length)
         upload_files.pack(entries, with_embargo=embargo_length > 0)
         upload_files.delete()
         public_upload_files = PublicUploadFiles(test_upload_id)
@@ -471,8 +471,8 @@ class TestPublicUploadFiles(UploadFilesContract):
 
     def test_repack(self, test_upload):
         upload_id, entries, upload_files = test_upload
-        for calc in entries:
-            calc.with_embargo = False
+        for entry in entries:
+            entry.with_embargo = False
         upload_files.re_pack(with_embargo=False)
         assert_upload_files(upload_id, entries, PublicUploadFiles, with_embargo=False)
         assert upload_files.access == 'public'
@@ -481,7 +481,7 @@ class TestPublicUploadFiles(UploadFilesContract):
 
     def test_archive_version_suffix(self, monkeypatch, test_upload_id):
         monkeypatch.setattr('nomad.config.fs.archive_version_suffix', 'test_suffix')
-        _, entries, upload_files = create_staging_upload(test_upload_id, calc_specs='p')
+        _, entries, upload_files = create_staging_upload(test_upload_id, entry_specs='p')
         upload_files.pack(entries, with_embargo=False)
         upload_files.delete()
 
@@ -506,21 +506,21 @@ def assert_upload_files(
     Arguments:
         upload_id: The id of the upload to assert
         cls: The :class:`UploadFiles` subclass that this upload should have
-        n_calcs: The number of expected calcs in the upload
-        **kwargs: Key, value pairs that each calc metadata should have
+        no_archive:
+        **kwargs: Key, value pairs that each entry metadata should have
     '''
     upload_files = UploadFiles.get(upload_id)
     assert upload_files is not None
     assert isinstance(upload_files, cls)
 
     upload_files = UploadFiles.get(upload_id)
-    for calc in entries:
-        with upload_files.raw_file(calc.mainfile, 'rb') as f:
+    for entry in entries:
+        with upload_files.raw_file(entry.mainfile, 'rb') as f:
             f.read()
 
         try:
-            archive = upload_files.read_archive(calc.calc_id)
-            assert calc.calc_id in archive
+            archive = upload_files.read_archive(entry.entry_id)
+            assert entry.entry_id in archive
 
         except KeyError:
             assert no_archive
@@ -542,7 +542,7 @@ def create_test_upload_files(
         upload_id: The upload id for the upload. Will generate a random UUID if None.
         archives: A list of class:`datamodel.EntryArchive` metainfo objects. This will
             be used to determine the mainfiles. Will create respective directories and
-            copy the template calculation to create raw files for each archive.
+            copy the template entry to create raw files for each archive.
             Will also be used to fill the archives in the create upload.
         published: Creates a :class:`PublicUploadFiles` object with published files
             instead of a :class:`StagingUploadFiles` object with staging files. Default
@@ -576,9 +576,9 @@ def create_test_upload_files(
             os.path.join(target, os.path.basename(mainfile)))
 
         # create an archive "file" for each archive
-        calc_id = archive.metadata.calc_id
-        assert calc_id is not None, 'Archives to create test upload must have a calc id'
-        upload_files.write_archive(calc_id, archive.m_to_dict())
+        entry_id = archive.metadata.entry_id
+        assert entry_id is not None, 'Archives to create test upload must have an entry_id'
+        upload_files.write_archive(entry_id, archive.m_to_dict())
 
     # remove the template
     shutil.rmtree(source)
@@ -612,8 +612,8 @@ def test_test_upload_files(raw_files_infra):
     for index in range(0, 3):
         archive = datamodel.EntryArchive()
         metadata = archive.m_create(datamodel.EntryMetadata)
-        metadata.calc_id = 'example_calc_id_%d' % index
-        metadata.mainfile = 'test/test/calc_%d/mainfile_%d.json' % (index, index)
+        metadata.entry_id = 'example_entry_id_%d' % index
+        metadata.mainfile = 'test/test/entry_%d/mainfile_%d.json' % (index, index)
         archives.append(archive)
 
     upload_files = create_test_upload_files(upload_id, archives, embargo_length=0)
diff --git a/tests/utils.py b/tests/utils.py
index db66056525..9f0d2107d0 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -343,13 +343,13 @@ class ExampleData:
             upload_id: str, id: int, h: int, o: int, extra: List[str], periodicity: int,
             optimade: bool = True, metadata: dict = None):
 
-        ''' Creates a calculation in Elastic and Mongodb with the given properties.
+        ''' Creates an entry in Elastic and Mongodb with the given properties.
 
         Does require initialized :func:`elastic_infra` and :func:`mongo_infra`.
 
         Args:
             meta_info: A legace metainfo env.
-            id: A number to create ``test_calc_id_<number>`` ids.
+            id: A number to create ``test_entry_id_<number>`` ids.
             h: The amount of H atoms
             o: The amount of O atoms
             extra: A list of further atoms
@@ -386,7 +386,7 @@ class ExampleData:
 
         self.create_entry(
             entry_archive=archive,
-            upload_id=upload_id, calc_id='test_calc_id_%d' % id, domain='dft', **kwargs)
+            upload_id=upload_id, entry_id='test_entry_id_%d' % id, domain='dft', **kwargs)
 
 
 def create_template_upload_file(
-- 
GitLab