From ba2ccbd2360bcd02c7e789248dc83d496ec06f5f Mon Sep 17 00:00:00 2001
From: Alvin Noe Ladines <ladinesalvinnoe@gmail.com>
Date: Fri, 17 Jan 2020 22:11:14 +0100
Subject: [PATCH] Refractored of models in api, added curl command output, and
 small fixes

---
 nomad/app/api/archive.py |  87 +++++++-------
 nomad/app/api/common.py  | 152 +++++++++++++++++++++---
 nomad/app/api/raw.py     |  52 ++++-----
 nomad/app/api/repo.py    | 241 ++++++++++++++-------------------------
 tests/app/test_api.py    |  22 ++--
 5 files changed, 310 insertions(+), 244 deletions(-)

diff --git a/nomad/app/api/archive.py b/nomad/app/api/archive.py
index 8392aaebb0..bdce2388b4 100644
--- a/nomad/app/api/archive.py
+++ b/nomad/app/api/archive.py
@@ -32,8 +32,9 @@ from nomad import utils, search
 
 from .auth import authenticate, create_authorization_predicate
 from .api import api
-from .repo import search_request_parser, add_query
-from .common import calc_route, streamed_zipfile, pagination_model, build_snippet
+from .common import calc_route, streamed_zipfile, search_model, add_pagination_parameters,\
+    add_scroll_parameters, add_search_parameters, apply_search_parameters,\
+    query_api_python, query_api_curl
 
 ns = api.namespace(
     'archive',
@@ -108,39 +109,20 @@ class ArchiveCalcResource(Resource):
             abort(404, message='Calculation %s does not exist.' % archive_id)
 
 
-archives_from_query_parser = search_request_parser.copy()
-archives_from_query_parser.add_argument(
+_archive_download_parser = api.parser()
+add_search_parameters(_archive_download_parser)
+_archive_download_parser.add_argument(
     name='compress', type=bool, help='Use compression on .zip files, default is not.',
     location='args')
 
-archives_from_query_model_fields = {
-    'pagination': fields.Nested(pagination_model, skip_none=True),
-    'scroll': fields.Nested(allow_null=True, skip_none=True, model=api.model('Scroll', {
-        'total': fields.Integer(description='The total amount of hits for the search.'),
-        'scroll_id': fields.String(allow_null=True, description='The scroll_id that can be used to retrieve the next page.'),
-        'size': fields.Integer(help='The size of the returned scroll page.')})),
-    'results': fields.List(fields.Raw, description=(
-        'A list of search results. Each result is a dict with quantities names as key and '
-        'values as values')),
-    'archive_data': fields.Raw(description=('A dict of archive data with calc_ids as keys ')),
-    'code_snippet': fields.String(description=(
-        'A string of python code snippet which can be executed to reproduce the api result.')),
-}
-for group_name, (group_quantity, _) in search.groups.items():
-    archives_from_query_model_fields[group_name] = fields.Nested(api.model('ArchiveDatasets', {
-        'after': fields.String(description='The after value that can be used to retrieve the next %s.' % group_name),
-        'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_quantity)
-    }), skip_none=True)
-archives_from_query_model = api.model('RepoCalculations', archives_from_query_model_fields)
-
 
 @ns.route('/download')
 class ArchiveDownloadResource(Resource):
     manifest_quantities = ['upload_id', 'calc_id', 'external_id', 'raw_id', 'pid', 'calc_hash']
 
-    @api.doc('archive_zip_download')
+    @api.doc('archive_download')
     @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
-    @api.expect(archives_from_query_parser, validate=True)
+    @api.expect(_archive_download_parser, validate=True)
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
     @authenticate(signature_token=True)
     def get(self):
@@ -156,13 +138,13 @@ class ArchiveDownloadResource(Resource):
         The zip file will contain a ``manifest.json`` with the repository meta data.
         """
         try:
-            args = archives_from_query_parser.parse_args()
+            args = _archive_download_parser.parse_args()
             compress = args.get('compress', False)
         except Exception:
             abort(400, message='bad parameter types')
 
         search_request = search.SearchRequest()
-        add_query(search_request, search_request_parser.parse_args())
+        apply_search_parameters(search_request, args)
 
         calcs = search_request.execute_scan(order_by='upload_id')
 
@@ -216,12 +198,31 @@ class ArchiveDownloadResource(Resource):
             generator(), zipfile_name='nomad_archive.zip', compress=compress)
 
 
+_archive_query_parser = api.parser()
+add_pagination_parameters(_archive_query_parser)
+add_scroll_parameters(_archive_query_parser)
+
+_archive_query_model_fields = {
+    'results': fields.List(fields.Raw, description=(
+        'A list of search results. Each result is a dict with quantities names as key and '
+        'values as values')),
+    'python': fields.String(description=(
+        'A string of python code snippet which can be executed to reproduce the api result.')),
+    'curl': fields.String(description=(
+        'A string of curl command which can be executed to reproduce the api result.')),
+}
+_archive_query_model = api.inherit('ArchiveCalculations', search_model, _archive_query_model_fields)
+
+
 @ns.route('/query')
 class ArchiveQueryResource(Resource):
-    @api.doc('archive_json_query')
+    @api.doc('archive_query')
     @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
-    @api.expect(search_request_parser, validate=True)
-    @api.marshal_with(archives_from_query_model, skip_none=True, code=200, description='Search results sent')
+    @api.response(401, 'Not authorized to access the data.')
+    @api.response(404, 'The upload or calculation does not exist')
+    @api.response(200, 'Archive data send')
+    @api.expect(_archive_query_parser, validate=True)
+    @api.marshal_with(_archive_query_model, skip_none=True, code=200, description='Search results sent')
     @authenticate(signature_token=True)
     def get(self):
         """
@@ -230,11 +231,14 @@ class ArchiveQueryResource(Resource):
         See ``/repo`` endpoint for documentation on the search
         parameters.
 
-        The actual data are in archive_data and a supplementary python code to execute
-        search is wirtten in code_snippet.
+        The actual data are in archive_data and a supplementary python code (curl) to
+        execute search is in python (curl).
         """
         try:
-            args = search_request_parser.parse_args()
+            args = {
+                key: value for key, value in _archive_query_parser.parse_args().items()
+                if value is not None}
+
             scroll = args.get('scroll', False)
             scroll_id = args.get('scroll_id', None)
             page = args.get('page', 1)
@@ -254,7 +258,7 @@ class ArchiveQueryResource(Resource):
             abort(400, message='invalid pagination')
 
         search_request = search.SearchRequest()
-        add_query(search_request, search_request_parser.parse_args())
+        apply_search_parameters(search_request, _archive_query_parser.parse_args())
 
         try:
             if scroll:
@@ -271,11 +275,12 @@ class ArchiveQueryResource(Resource):
             traceback.print_exc()
             abort(400, str(e))
 
-        # build python code snippet
-        snippet = build_snippet(args, os.path.join(api.base_url, ns.name, 'query'))
-        results['code_snippet'] = snippet
+        # build python code and curl snippet
+        uri = os.path.join(api.base_url, ns.name, 'query')
+        results['python'] = query_api_python(args, uri)
+        results['curl'] = query_api_curl(args, uri)
 
-        data = {}
+        data = []
         calcs = results['results']
         try:
             upload_files = None
@@ -295,7 +300,7 @@ class ArchiveQueryResource(Resource):
                     upload_files.open_zipfile_cache()
 
                 fo = upload_files.archive_file(calc_id, 'rb')
-                data[calc_id] = json.loads(fo.read())
+                data.append(json.loads(fo.read()))
 
             if upload_files is not None:
                 upload_files.close_zipfile_cache()
@@ -306,7 +311,7 @@ class ArchiveQueryResource(Resource):
         except KeyError:
             abort(404, message='Calculation %s/%s does not exist.' % (upload_id, calc_id))
 
-        results['archive_data'] = data
+        results['results'] = data
 
         return results, 200
 
diff --git a/nomad/app/api/common.py b/nomad/app/api/common.py
index 4bdb311756..5701af8748 100644
--- a/nomad/app/api/common.py
+++ b/nomad/app/api/common.py
@@ -15,13 +15,17 @@
 """
 Common data, variables, decorators, models used throughout the API.
 """
-from typing import Callable, IO, Set, Tuple, Iterable
+from typing import Callable, IO, Set, Tuple, Iterable, Dict, Any
 from flask_restplus import fields
 import zipstream
-from flask import stream_with_context, Response
+from flask import stream_with_context, Response, g, abort
+
 import sys
+import os.path
 
-from nomad.app.utils import RFC3339DateTime
+from nomad import search
+from nomad.app.optimade import filterparser
+from nomad.app.utils import RFC3339DateTime, rfc3339DateTime
 from nomad.files import Restricted
 
 from .api import api
@@ -51,18 +55,104 @@ pagination_model = api.model('Pagination', {
 })
 """ Model used in responses with pagination. """
 
+search_model = api.model('Search', {
+    'pagination': fields.Nested(pagination_model, skip_none=True),
+    'scroll': fields.Nested(allow_null=True, skip_none=True, model=api.model('Scroll', {
+        'total': fields.Integer(description='The total amount of hits for the search.'),
+        'scroll_id': fields.String(allow_null=True, description='The scroll_id that can be used to retrieve the next page.'),
+        'size': fields.Integer(help='The size of the returned scroll page.')})),
+    'results': fields.List(fields.Raw, description=(
+        'A list of search results. Each result is a dict with quantitie names as key and '
+        'values as values')),
+})
+
+
+def add_pagination_parameters(request_parser):
+    """ Add pagination parameters to Flask querystring parser. """
+    request_parser.add_argument(
+        'page', type=int, help='The page, starting with 1.', location='args')
+    request_parser.add_argument(
+        'per_page', type=int, help='Desired calcs per page.', location='args')
+    request_parser.add_argument(
+        'order_by', type=str, help='The field to sort by.', location='args')
+    request_parser.add_argument(
+        'order', type=int, help='Use -1 for decending and 1 for acending order.', location='args')
+
+
+request_parser = api.parser()
+add_pagination_parameters(request_parser)
+pagination_request_parser = request_parser.copy()
+
+
+def add_scroll_parameters(request_parser):
+    """ Add scroll parameters to Flask querystring parser. """
+    request_parser.add_argument(
+        'scroll', type=bool, help='Enable scrolling')
+    request_parser.add_argument(
+        'scroll_id', type=str, help='The id of the current scrolling window to use.')
+
+
+def add_search_parameters(request_parser):
+    """ Add search parameters to Flask querystring parser. """
+    # more search parameters
+    request_parser.add_argument(
+        'owner', type=str,
+        help='Specify which calcs to return: ``all``, ``public``, ``user``, ``staging``, default is ``all``')
+    request_parser.add_argument(
+        'from_time', type=lambda x: rfc3339DateTime.parse(x),
+        help='A yyyy-MM-ddTHH:mm:ss (RFC3339) minimum entry time (e.g. upload time)')
+    request_parser.add_argument(
+        'until_time', type=lambda x: rfc3339DateTime.parse(x),
+        help='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
+
+    # main search parameters
+    for quantity in search.quantities.values():
+        request_parser.add_argument(
+            quantity.name, help=quantity.description,
+            action=quantity.argparse_action if quantity.multi else None)
+
+
+def apply_search_parameters(search_request: search.SearchRequest, args: Dict[str, Any]):
+    """
+    Help that adds query relevant request args to the given SearchRequest.
+    """
+    args = {key: value for key, value in args.items() if value is not None}
+
+    # owner
+    owner = args.get('owner', 'all')
+    try:
+        search_request.owner(
+            owner,
+            g.user.user_id if g.user is not None else None)
+    except ValueError as e:
+        abort(401, getattr(e, 'message', 'Invalid owner parameter: %s' % owner))
+    except Exception as e:
+        abort(400, getattr(e, 'message', 'Invalid owner parameter'))
+
+    # time range
+    from_time_str = args.get('from_time', None)
+    until_time_str = args.get('until_time', None)
 
-pagination_request_parser = api.parser()
-""" Parser used for requests with pagination. """
+    try:
+        from_time = rfc3339DateTime.parse(from_time_str) if from_time_str is not None else None
+        until_time = rfc3339DateTime.parse(until_time_str) if until_time_str is not None else None
+        search_request.time_range(start=from_time, end=until_time)
+    except Exception:
+        abort(400, message='bad datetime format')
 
-pagination_request_parser.add_argument(
-    'page', type=int, help='The page, starting with 1.', location='args')
-pagination_request_parser.add_argument(
-    'per_page', type=int, help='Desired calcs per page.', location='args')
-pagination_request_parser.add_argument(
-    'order_by', type=str, help='The field to sort by.', location='args')
-pagination_request_parser.add_argument(
-    'order', type=int, help='Use -1 for decending and 1 for acending order.', location='args')
+    # optimade
+    try:
+        optimade = args.get('optimade', None)
+        if optimade is not None:
+            q = filterparser.parse_filter(optimade)
+            search_request.query(q)
+    except filterparser.FilterException:
+        abort(400, message='could not parse optimade query')
+
+    # search parameter
+    search_request.search_parameters(**{
+        key: value for key, value in args.items()
+        if key not in ['optimade'] and key in search.quantities})
 
 
 def calc_route(ns, prefix: str = ''):
@@ -155,12 +245,32 @@ def streamed_zipfile(
     return response
 
 
-def build_snippet(args, base_url):
+def resolve_query_api_url(args: Dict[str, Any], base_url: str):
+    """
+    Generates a uri from query parameters and base url.
+    """
+    args_keys = list(args.keys())
+    args_keys.sort()
+    if args_keys == ['calc_id', 'upload_id']:
+        url = '"%s"' % os.path.join(base_url, args['upload_id'], args['calc_id'])
+    else:
+        url = '"%s?%s" % (base_url, urlencode(args))'
+    return url
+
+
+def query_api_python(args: Dict[str, Any], base_url: str):
+    """
+    Creates a string of python code to execute a search query to the repository using
+    the requests library.
+    Arguments:
+        args: A dict of search parameters that will be encoded in the uri
+        base_url: The resource url which is prepended to the uri
+    """
     str_code = 'import requests\n'
     str_code += 'from urllib.parse import urlencode\n'
     str_code += '\n\n'
     str_code += 'def query_repository(args, base_url):\n'
-    str_code += '    url = "%s?%s" % (base_url, urlencode(args))\n'
+    str_code += '    url = %s\n' % resolve_query_api_url(args, base_url)
     str_code += '    response = requests.get(url)\n'
     str_code += '    if response.status_code != 200:\n'
     str_code += '        raise Exception("nomad return status %d" % response.status_code)\n'
@@ -179,3 +289,15 @@ def build_snippet(args, base_url):
     str_code += 'JSON_DATA = query_repository(args, base_url)\n'
 
     return str_code
+
+
+def query_api_curl(args: Dict[str, Any], base_url: str):
+    """
+    Creates a string of curl command to execute a search query to the repository.
+    Arguments:
+        args: A dict of search parameters that will be encoded in the uri
+        base_url: The resource url which is prepended to the uri
+    """
+    args = {key: val for key, val in args.items() if val is not None}
+    uri = resolve_query_api_url(args, base_url)
+    return 'curl -X GET %s -H  "accept: application/json" --output "nomad.json"' % uri
diff --git a/nomad/app/api/raw.py b/nomad/app/api/raw.py
index 127fa5cbfb..4f0d9e0011 100644
--- a/nomad/app/api/raw.py
+++ b/nomad/app/api/raw.py
@@ -34,8 +34,7 @@ from nomad.processing import Calc
 
 from .api import api
 from .auth import authenticate, create_authorization_predicate
-from .repo import search_request_parser, add_query
-from .common import streamed_zipfile
+from .common import streamed_zipfile, add_search_parameters, apply_search_parameters
 
 
 ns = api.namespace('raw', description='Downloading raw data files.')
@@ -56,16 +55,16 @@ raw_file_strip_argument = dict(
     name='strip', type=bool, help='Removes a potential common path prefix from all file paths.',
     location='args')
 
-raw_file_from_path_parser = api.parser()
-raw_file_from_path_parser.add_argument(**raw_file_compress_argument)
-raw_file_from_path_parser.add_argument(**raw_file_strip_argument)
-raw_file_from_path_parser.add_argument(
+_raw_file_from_path_parser = api.parser()
+_raw_file_from_path_parser.add_argument(**raw_file_compress_argument)
+_raw_file_from_path_parser.add_argument(**raw_file_strip_argument)
+_raw_file_from_path_parser.add_argument(
     name='length', type=int, help='Download only x bytes from the given file.',
     location='args')
-raw_file_from_path_parser.add_argument(
+_raw_file_from_path_parser.add_argument(
     name='offset', type=int, help='Start downloading a file\' content from the given offset.',
     location='args')
-raw_file_from_path_parser.add_argument(
+_raw_file_from_path_parser.add_argument(
     name='decompress', type=int, help='Automatically decompress the file if compressed. Only supports .gz',
     location='args')
 
@@ -194,7 +193,7 @@ class RawFileFromUploadPathResource(Resource):
     @api.response(404, 'The upload or path does not exist')
     @api.response(401, 'Not authorized to access the requested files.')
     @api.response(200, 'File(s) send')
-    @api.expect(raw_file_from_path_parser, validate=True)
+    @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, path: str):
         """ Get a single raw calculation file, directory contents, or whole directory sub-tree
@@ -255,7 +254,7 @@ class RawFileFromCalcPathResource(Resource):
     @api.response(404, 'The upload or path does not exist')
     @api.response(401, 'Not authorized to access the requested files.')
     @api.response(200, 'File(s) send')
-    @api.expect(raw_file_from_path_parser, validate=True)
+    @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, calc_id: str, path: str):
         """ Get a single raw calculation file, calculation contents, or all files for a
@@ -295,7 +294,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource):
     @api.response(404, 'The upload or path does not exist')
     @api.response(401, 'Not authorized to access the requested files.')
     @api.response(200, 'File(s) send')
-    @api.expect(raw_file_from_path_parser, validate=True)
+    @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, calc_id: str):
         """ Get calculation contents.
@@ -306,7 +305,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource):
         return super().get(upload_id, calc_id, None)
 
 
-raw_files_request_model = api.model('RawFilesRequest', {
+_raw_files_request_model = api.model('RawFilesRequest', {
     'files': fields.List(
         fields.String, default=[], description='List of files to download.'),
     'compress': fields.Boolean(
@@ -314,11 +313,11 @@ raw_files_request_model = api.model('RawFilesRequest', {
         description='Enable compression, default is not compression.')
 })
 
-raw_files_request_parser = api.parser()
-raw_files_request_parser.add_argument(
+_raw_files_request_parser = api.parser()
+_raw_files_request_parser.add_argument(
     'files', required=True, type=str, help='Comma separated list of files to download.', location='args')
-raw_files_request_parser.add_argument(**raw_file_strip_argument)
-raw_files_request_parser.add_argument(**raw_file_compress_argument)
+_raw_files_request_parser.add_argument(**raw_file_strip_argument)
+_raw_files_request_parser.add_argument(**raw_file_compress_argument)
 
 
 @ns.route('/<string:upload_id>')
@@ -329,7 +328,7 @@ class RawFilesResource(Resource):
     @api.doc('get_files')
     @api.response(404, 'The upload or path does not exist')
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
-    @api.expect(raw_files_request_model, validate=True)
+    @api.expect(_raw_files_request_model, validate=True)
     @authenticate()
     def post(self, upload_id):
         """ Download multiple raw calculation files in a .zip file.
@@ -346,7 +345,7 @@ class RawFilesResource(Resource):
     @api.doc('get_files_alternate')
     @api.response(404, 'The upload or path does not exist')
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
-    @api.expect(raw_files_request_parser, validate=True)
+    @api.expect(_raw_files_request_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id):
         """
@@ -355,7 +354,7 @@ class RawFilesResource(Resource):
         Zip files are streamed; instead of 401 errors, the zip file will just not contain
         any files that the user is not authorized to access.
         """
-        args = raw_files_request_parser.parse_args()
+        args = _raw_files_request_parser.parse_args()
         files_str = args.get('files')
         compress = args.get('compress', False)
         strip = args.get('strip', False)
@@ -367,12 +366,13 @@ class RawFilesResource(Resource):
         return respond_to_get_raw_files(upload_id, files, compress=compress, strip=strip)
 
 
-raw_file_from_query_parser = search_request_parser.copy()
-raw_file_from_query_parser.add_argument(
+_raw_file_from_query_parser = api.parser()
+add_search_parameters(_raw_file_from_query_parser)
+_raw_file_from_query_parser.add_argument(
     name='compress', type=bool, help='Use compression on .zip files, default is not.',
     location='args')
-raw_file_from_query_parser.add_argument(**raw_file_strip_argument)
-raw_file_from_query_parser.add_argument(
+_raw_file_from_query_parser.add_argument(**raw_file_strip_argument)
+_raw_file_from_query_parser.add_argument(
     name='file_pattern', type=str,
     help=(
         'A wildcard pattern. Only filenames that match this pattern will be in the '
@@ -386,7 +386,7 @@ class RawFileQueryResource(Resource):
 
     @api.doc('raw_files_from_query')
     @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
-    @api.expect(raw_file_from_query_parser, validate=True)
+    @api.expect(_raw_file_from_query_parser, validate=True)
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
     @authenticate(signature_token=True)
     def get(self):
@@ -403,7 +403,7 @@ class RawFileQueryResource(Resource):
         """
         patterns: List[str] = None
         try:
-            args = raw_file_from_query_parser.parse_args()
+            args = _raw_file_from_query_parser.parse_args()
             compress = args.get('compress', False)
             strip = args.get('strip', False)
             pattern = args.get('file_pattern', None)
@@ -417,7 +417,7 @@ class RawFileQueryResource(Resource):
             abort(400, message='bad parameter types')
 
         search_request = search.SearchRequest()
-        add_query(search_request, search_request_parser.parse_args())
+        apply_search_parameters(search_request, _raw_file_from_query_parser.parse_args())
 
         def path(entry):
             return '%s/%s' % (entry['upload_id'], entry['mainfile'])
diff --git a/nomad/app/api/repo.py b/nomad/app/api/repo.py
index d1ee22a524..7151fb6f44 100644
--- a/nomad/app/api/repo.py
+++ b/nomad/app/api/repo.py
@@ -27,13 +27,14 @@ from datetime import datetime
 import os.path
 
 from nomad import search, utils, datamodel, processing as proc, infrastructure
-from nomad.app.utils import rfc3339DateTime, RFC3339DateTime, with_logger
-from nomad.app.optimade import filterparser
+from nomad.app.utils import RFC3339DateTime, with_logger
 from nomad.datamodel import UserMetadata, Dataset, User
 
 from .api import api
 from .auth import authenticate
-from .common import pagination_model, pagination_request_parser, calc_route, build_snippet
+from .common import search_model, calc_route, add_pagination_parameters,\
+    add_scroll_parameters, add_search_parameters, apply_search_parameters,\
+    query_api_python, query_api_curl
 
 ns = api.namespace('repo', description='Access repository metadata.')
 
@@ -64,132 +65,58 @@ class RepoCalcResource(Resource):
             if not (any(g.user.user_id == user.user_id for user in calc.owners) or g.user.is_admin):
                 abort(401, message='Not authorized to access %s/%s.' % (upload_id, calc_id))
 
-        return calc.to_dict(), 200
+        result = calc.to_dict()
+        uri = os.path.join(api.base_url, ns.name, '')
+        result['python'] = query_api_python({'upload_id': upload_id, 'calc_id': calc_id}, uri)
+        result['curl'] = query_api_curl({'upload_id': upload_id, 'calc_id': calc_id}, uri)
 
-
-repo_calcs_model_fields = {
-    'pagination': fields.Nested(pagination_model, skip_none=True),
-    'scroll': fields.Nested(allow_null=True, skip_none=True, model=api.model('Scroll', {
-        'total': fields.Integer(description='The total amount of hits for the search.'),
-        'scroll_id': fields.String(allow_null=True, description='The scroll_id that can be used to retrieve the next page.'),
-        'size': fields.Integer(help='The size of the returned scroll page.')})),
-    'results': fields.List(fields.Raw, description=(
-        'A list of search results. Each result is a dict with quantitie names as key and '
-        'values as values')),
-    'statistics': fields.Raw(description=(
-        'A dict with all statistics. Each statistic is dictionary with a metrics dict as '
-        'value and quantity value as key. The possible metrics are code runs(calcs), %s. '
-        'There is a pseudo quantity "total" with a single value "all" that contains the '
-        ' metrics over all results. ' % ', '.join(datamodel.Domain.instance.metrics_names))),
-    'code_snippet': fields.String(description=(
-        'A string of python code snippet which can be executed to reproduce the api result.')),
-}
-for group_name, (group_quantity, _) in search.groups.items():
-    repo_calcs_model_fields[group_name] = fields.Nested(api.model('RepoDatasets', {
-        'after': fields.String(description='The after value that can be used to retrieve the next %s.' % group_name),
-        'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_quantity)
-    }), skip_none=True)
-repo_calcs_model = api.model('RepoCalculations', repo_calcs_model_fields)
+        return result, 200
 
 
-repo_calc_id_model = api.model('RepoCalculationId', {
-    'upload_id': fields.String(), 'calc_id': fields.String()
-})
-
-
-def add_common_parameters(request_parser):
-    request_parser.add_argument(
-        'owner', type=str,
-        help='Specify which calcs to return: ``all``, ``public``, ``user``, ``staging``, default is ``all``')
-    request_parser.add_argument(
-        'from_time', type=lambda x: rfc3339DateTime.parse(x),
-        help='A yyyy-MM-ddTHH:mm:ss (RFC3339) minimum entry time (e.g. upload time)')
-    request_parser.add_argument(
-        'until_time', type=lambda x: rfc3339DateTime.parse(x),
-        help='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
-
-    for quantity in search.quantities.values():
-        request_parser.add_argument(
-            quantity.name, help=quantity.description,
-            action=quantity.argparse_action if quantity.multi else None)
-
-
-repo_request_parser = pagination_request_parser.copy()
-add_common_parameters(repo_request_parser)
-repo_request_parser.add_argument(
-    'scroll', type=bool, help='Enable scrolling')
-repo_request_parser.add_argument(
-    'scroll_id', type=str, help='The id of the current scrolling window to use.')
-repo_request_parser.add_argument(
+_search_request_parser = api.parser()
+add_pagination_parameters(_search_request_parser)
+add_scroll_parameters(_search_request_parser)
+add_search_parameters(_search_request_parser)
+_search_request_parser.add_argument(
     'date_histogram', type=bool, help='Add an additional aggregation over the upload time')
-repo_request_parser.add_argument(
+_search_request_parser.add_argument(
     'metrics', type=str, action='append', help=(
         'Metrics to aggregate over all quantities and their values as comma separated list. '
         'Possible values are %s.' % ', '.join(datamodel.Domain.instance.metrics_names)))
-repo_request_parser.add_argument(
+_search_request_parser.add_argument(
     'statistics', type=bool, help=('Return statistics.'))
-
 for group_name in search.groups:
-    repo_request_parser.add_argument(
+    _search_request_parser.add_argument(
         group_name, type=bool, help=('Return %s group data.' % group_name))
-    repo_request_parser.add_argument(
+    _search_request_parser.add_argument(
         '%s_after' % group_name, type=str,
         help='The last %s id of the last scroll window for the %s group' % (group_name, group_name))
 
-
-search_request_parser = api.parser()
-add_common_parameters(search_request_parser)
-
-
-def add_query(search_request: search.SearchRequest, args: Dict[str, Any]):
-    """
-    Help that adds query relevant request args to the given SearchRequest.
-    """
-    args = {key: value for key, value in args.items() if value is not None}
-
-    # owner
-    owner = args.get('owner', 'all')
-    try:
-        search_request.owner(
-            owner,
-            g.user.user_id if g.user is not None else None)
-    except ValueError as e:
-        abort(401, getattr(e, 'message', 'Invalid owner parameter: %s' % owner))
-    except Exception as e:
-        abort(400, getattr(e, 'message', 'Invalid owner parameter'))
-
-    # time range
-    from_time_str = args.get('from_time', None)
-    until_time_str = args.get('until_time', None)
-
-    try:
-        from_time = rfc3339DateTime.parse(from_time_str) if from_time_str is not None else None
-        until_time = rfc3339DateTime.parse(until_time_str) if until_time_str is not None else None
-        search_request.time_range(start=from_time, end=until_time)
-    except Exception:
-        abort(400, message='bad datetime format')
-
-    # optimade
-    try:
-        optimade = args.get('optimade', None)
-        if optimade is not None:
-            q = filterparser.parse_filter(optimade)
-            search_request.query(q)
-    except filterparser.FilterException:
-        abort(400, message='could not parse optimade query')
-
-    # search parameter
-    search_request.search_parameters(**{
-        key: value for key, value in args.items()
-        if key not in ['optimade'] and key in search.quantities})
+_repo_calcs_model_fields = {
+    'statistics': fields.Raw(description=(
+        'A dict with all statistics. Each statistic is dictionary with a metrics dict as '
+        'value and quantity value as key. The possible metrics are code runs(calcs), %s. '
+        'There is a pseudo quantity "total" with a single value "all" that contains the '
+        ' metrics over all results. ' % ', '.join(datamodel.Domain.instance.metrics_names))),
+    'python': fields.String(description=(
+        'A string of python code snippet which can be executed to reproduce the api result.')),
+    'curl': fields.String(description=(
+        'A string of curl command which can be executed to reproduce the api result.')),
+}
+for group_name, (group_quantity, _) in search.groups.items():
+    _repo_calcs_model_fields[group_name] = fields.Nested(api.model('RepoDatasets', {
+        'after': fields.String(description='The after value that can be used to retrieve the next %s.' % group_name),
+        'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_quantity)
+    }), skip_none=True)
+_repo_calcs_model = api.inherit('RepoCalculations', search_model, _repo_calcs_model_fields)
 
 
 @ns.route('/')
 class RepoCalcsResource(Resource):
     @api.doc('search')
     @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
-    @api.expect(repo_request_parser, validate=True)
-    @api.marshal_with(repo_calcs_model, skip_none=True, code=200, description='Search results send')
+    @api.expect(_search_request_parser, validate=True)
+    @api.marshal_with(_repo_calcs_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self):
         """
@@ -225,7 +152,7 @@ class RepoCalcsResource(Resource):
 
         try:
             args = {
-                key: value for key, value in repo_request_parser.parse_args().items()
+                key: value for key, value in _search_request_parser.parse_args().items()
                 if value is not None}
 
             scroll = args.get('scroll', False)
@@ -244,7 +171,7 @@ class RepoCalcsResource(Resource):
             abort(400, message='bad parameters: %s' % str(e))
 
         search_request = search.SearchRequest()
-        add_query(search_request, args)
+        apply_search_parameters(search_request, args)
         if date_histogram:
             search_request.date_histogram()
 
@@ -303,9 +230,10 @@ class RepoCalcsResource(Resource):
                     if args.get(group_name, False):
                         results[group_name] = quantities[group_quantity]
 
-            # build python code snippet
-            snippet = build_snippet(args, os.path.join(api.base_url, ns.name, ''))
-            results['code_snippet'] = snippet
+            # build python code/curl snippet
+            uri = os.path.join(api.base_url, ns.name, '')
+            results['curl'] = query_api_curl(args, uri)
+            results['python'] = query_api_python(args, uri)
 
             return results, 200
         except search.ScrollIdNotFound:
@@ -316,7 +244,7 @@ class RepoCalcsResource(Resource):
             abort(400, str(e))
 
 
-query_model_parameters = {
+_query_model_parameters = {
     'owner': fields.String(description='Specify which calcs to return: ``all``, ``public``, ``user``, ``staging``, default is ``all``'),
     'from_time': RFC3339DateTime(description='A yyyy-MM-ddTHH:mm:ss (RFC3339) minimum entry time (e.g. upload time)'),
     'until_time': RFC3339DateTime(description='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
@@ -328,28 +256,28 @@ for quantity in search.quantities.values():
             return fields.List(fields.String(**kwargs))
     else:
         field = fields.String
-    query_model_parameters[quantity.name] = field(description=quantity.description)
+    _query_model_parameters[quantity.name] = field(description=quantity.description)
 
-repo_query_model = api.model('RepoQuery', query_model_parameters, skip_none=True)
+_repo_query_model = api.model('RepoQuery', _query_model_parameters, skip_none=True)
 
 
 def repo_edit_action_field(quantity):
     if quantity.is_scalar:
-        return fields.Nested(repo_edit_action_model, description=quantity.description, skip_none=True)
+        return fields.Nested(_repo_edit_action_model, description=quantity.description, skip_none=True)
     else:
         return fields.List(
-            fields.Nested(repo_edit_action_model, skip_none=True), description=quantity.description)
+            fields.Nested(_repo_edit_action_model, skip_none=True), description=quantity.description)
 
 
-repo_edit_action_model = api.model('RepoEditAction', {
+_repo_edit_action_model = api.model('RepoEditAction', {
     'value': fields.String(description='The value/values that is set as a string.'),
     'success': fields.Boolean(description='If this can/could be done. Only in API response.'),
     'message': fields.String(descriptin='A message that details the action result. Only in API response.')
 })
 
-repo_edit_model = api.model('RepoEdit', {
+_repo_edit_model = api.model('RepoEdit', {
     'verify': fields.Boolean(description='If true, no action is performed.'),
-    'query': fields.Nested(repo_query_model, skip_none=True, description='New metadata will be applied to query results.'),
+    'query': fields.Nested(_repo_query_model, skip_none=True, description='New metadata will be applied to query results.'),
     'actions': fields.Nested(
         api.model('RepoEditActions', {
             quantity.name: repo_edit_action_field(quantity)
@@ -365,7 +293,7 @@ def edit(parsed_query: Dict[str, Any], logger, mongo_update: Dict[str, Any] = No
     # get all calculations that have to change
     with utils.timer(logger, 'edit query executed'):
         search_request = search.SearchRequest()
-        add_query(search_request, parsed_query)
+        apply_search_parameters(search_request, parsed_query)
         upload_ids = set()
         calc_ids = []
         for hit in search_request.execute_scan():
@@ -404,7 +332,7 @@ def edit(parsed_query: Dict[str, Any], logger, mongo_update: Dict[str, Any] = No
 def get_uploader_ids(query):
     """ Get all the uploader from the query, to check coauthers and shared_with for uploaders. """
     search_request = search.SearchRequest()
-    add_query(search_request, query)
+    apply_search_parameters(search_request, query)
     search_request.quantity(name='uploader_id')
     return search_request.execute()['quantities']['uploader_id']['values']
 
@@ -413,8 +341,8 @@ def get_uploader_ids(query):
 class EditRepoCalcsResource(Resource):
     @api.doc('edit_repo')
     @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
-    @api.expect(repo_edit_model)
-    @api.marshal_with(repo_edit_model, skip_none=True, code=200, description='Edit verified/performed')
+    @api.expect(_repo_edit_model)
+    @api.marshal_with(_repo_edit_model, skip_none=True, code=200, description='Edit verified/performed')
     @authenticate()
     @with_logger
     def post(self, logger):
@@ -523,7 +451,7 @@ class EditRepoCalcsResource(Resource):
 
                         # check if necessary
                         search_request = search.SearchRequest()
-                        add_query(search_request, parsed_query)
+                        apply_search_parameters(search_request, parsed_query)
                         search_request.q = search_request.q & Q('term', with_embargo=True)
                         if search_request.execute()['total'] == 0:
                             action['success'] = False
@@ -551,7 +479,7 @@ class EditRepoCalcsResource(Resource):
                 if action_quantity_name == 'datasets':
                     # check if datasets edit is allowed and if datasets have to be removed
                     search_request = search.SearchRequest()
-                    add_query(search_request, parsed_query)
+                    apply_search_parameters(search_request, parsed_query)
                     search_request.quantity(name='dataset_id')
                     old_datasets = list(
                         search_request.execute()['quantities']['dataset_id']['values'].keys())
@@ -596,33 +524,29 @@ class EditRepoCalcsResource(Resource):
         return json_data, 200
 
 
-repo_quantity_model = api.model('RepoQuantity', {
+_repo_quantity_search_request_parser = api.parser()
+add_search_parameters(_repo_quantity_search_request_parser)
+_repo_quantity_search_request_parser.add_argument(
+    'after', type=str, help='The after value to use for "scrolling".')
+_repo_quantity_search_request_parser.add_argument(
+    'size', type=int, help='The max size of the returned values.')
+
+_repo_quantity_model = api.model('RepoQuantity', {
     'after': fields.String(description='The after value that can be used to retrieve the next set of values.'),
     'values': fields.Raw(description='A dict with values as key. Values are dicts with "total" and "examples" keys.')
 })
 
-repo_quantity_values_model = api.model('RepoQuantityValues', {
-    'quantity': fields.Nested(repo_quantity_model, allow_null=True)
+_repo_quantity_values_model = api.model('RepoQuantityValues', {
+    'quantity': fields.Nested(_repo_quantity_model, allow_null=True)
 })
 
-repo_quantities_model = api.model('RepoQuantities', {
-    'quantities': fields.List(fields.Nested(repo_quantity_model))
-})
-
-repo_quantity_search_request_parser = api.parser()
-add_common_parameters(repo_quantity_search_request_parser)
-repo_quantity_search_request_parser.add_argument(
-    'after', type=str, help='The after value to use for "scrolling".')
-repo_quantity_search_request_parser.add_argument(
-    'size', type=int, help='The max size of the returned values.')
-
 
 @ns.route('/quantity/<string:quantity>')
 class RepoQuantityResource(Resource):
     @api.doc('quantity_search')
     @api.response(400, 'Invalid requests, e.g. wrong owner type, bad quantity, bad search parameters')
-    @api.expect(repo_quantity_search_request_parser, validate=True)
-    @api.marshal_with(repo_quantity_values_model, skip_none=True, code=200, description='Search results send')
+    @api.expect(_repo_quantity_search_request_parser, validate=True)
+    @api.marshal_with(_repo_quantity_values_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self, quantity: str):
         """
@@ -646,10 +570,10 @@ class RepoQuantityResource(Resource):
         search_request = search.SearchRequest()
         args = {
             key: value
-            for key, value in repo_quantity_search_request_parser.parse_args().items()
+            for key, value in _repo_quantity_search_request_parser.parse_args().items()
             if value is not None}
 
-        add_query(search_request, args)
+        apply_search_parameters(search_request, args)
         after = args.get('after', None)
         size = args.get('size', 100)
 
@@ -672,20 +596,24 @@ class RepoQuantityResource(Resource):
             abort(400, 'Given quantity does not exist: %s' % str(e))
 
 
-repo_quantities_search_request_parser = api.parser()
-add_common_parameters(repo_quantities_search_request_parser)
-repo_quantities_search_request_parser.add_argument(
+_repo_quantities_search_request_parser = api.parser()
+add_search_parameters(_repo_quantities_search_request_parser)
+_repo_quantities_search_request_parser.add_argument(
     'quantities', type=str, action='append',
     help='The quantities to retrieve values from')
-repo_quantities_search_request_parser.add_argument(
+_repo_quantities_search_request_parser.add_argument(
     'size', type=int, help='The max size of the returned values.')
 
+repo_quantities_model = api.model('RepoQuantities', {
+    'quantities': fields.List(fields.Nested(_repo_quantity_model))
+})
+
 
 @ns.route('/quantities')
 class RepoQuantitiesResource(Resource):
     @api.doc('quantities_search')
     @api.response(400, 'Invalid requests, e.g. wrong owner type, bad quantity, bad search parameters')
-    @api.expect(repo_quantities_search_request_parser, validate=True)
+    @api.expect(_repo_quantities_search_request_parser, validate=True)
     @api.marshal_with(repo_quantities_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self):
@@ -708,10 +636,10 @@ class RepoQuantitiesResource(Resource):
         search_request = search.SearchRequest()
         args = {
             key: value
-            for key, value in repo_quantities_search_request_parser.parse_args().items()
+            for key, value in _repo_quantities_search_request_parser.parse_args().items()
             if value is not None}
 
-        add_query(search_request, args)
+        apply_search_parameters(search_request, args)
         quantities = args.get('quantities', [])
         size = args.get('size', 5)
 
@@ -731,6 +659,11 @@ class RepoQuantitiesResource(Resource):
         return search_request.execute(), 200
 
 
+repo_calc_id_model = api.model('RepoCalculationId', {
+    'upload_id': fields.String(), 'calc_id': fields.String()
+})
+
+
 @ns.route('/pid/<path:pid>')
 class RepoPidResource(Resource):
     @api.doc('resolve_pid')
diff --git a/tests/app/test_api.py b/tests/app/test_api.py
index 33707a36bf..f2efed254c 100644
--- a/tests/app/test_api.py
+++ b/tests/app/test_api.py
@@ -667,17 +667,15 @@ class TestArchive(UploadFilesBasedTests):
         assert rv.status_code == 200
         assert_zip_file(rv, files=1)
 
-    def test_archive_json_query(self, api, processeds, test_user_auth):
-        query_params = {'atoms': 'Si', 'res_type': 'json'}
+    def test_get_code_from_query(self, api, processeds, test_user_auth):
+        query_params = {'atoms': 'Si', 'res_type': 'json', 'order': 1, 'per_page': 5}
         url = '/archive/query?%s' % urlencode(query_params)
         rv = api.get(url, headers=test_user_auth)
-
         assert rv.status_code == 200
         data = json.loads(rv.data)
         assert isinstance(data, dict)
         assert data['results'] is not None
-        assert data['archive_data'] is not None
-        assert data['code_snippet'] is not None
+        assert data['python'] is not None
 
 
 class TestRepo():
@@ -742,6 +740,13 @@ class TestRepo():
         rv = api.get('/repo/0/1', headers=test_user_auth)
         assert rv.status_code == 200
 
+    def test_get_code(self, api, example_elastic_calcs, no_warn, test_user_auth):
+        rv = api.get('/repo/0/1', headers=test_user_auth)
+        assert rv.status_code == 200
+        data = rv.json
+        assert data['python'] is not None
+        assert data['curl'] is not None
+
     def test_public_calc(self, api, example_elastic_calcs, no_warn, other_test_user_auth):
         rv = api.get('/repo/0/1', headers=other_test_user_auth)
         assert rv.status_code == 200
@@ -1070,13 +1075,14 @@ class TestRepo():
         data = json.loads(rv.data)
         assert data['pagination']['total'] > 0
 
-    def test_get_code_snippet(self, api, example_elastic_calcs, test_user_auth):
+    def test_get_code_from_query(self, api, example_elastic_calcs, test_user_auth):
         rv = api.get('/repo/?per_page=10', headers=test_user_auth)
         assert rv.status_code == 200
         data = json.loads(rv.data)
-        assert data['code_snippet'] is not None
+        assert data['python'] is not None
+        assert data['curl'] is not None
         # exec does not seem to work
-        # exec(data['code_snippet'])
+        # exec(data['python'])
 
 
 class TestEditRepo():
-- 
GitLab