Commit ba2ccbd2 authored by Alvin Noe Ladines's avatar Alvin Noe Ladines
Browse files

Refractored of models in api, added curl command output, and small fixes

parent c569d422
Pipeline #67253 passed with stages
in 12 minutes and 21 seconds
...@@ -32,8 +32,9 @@ from nomad import utils, search ...@@ -32,8 +32,9 @@ from nomad import utils, search
from .auth import authenticate, create_authorization_predicate from .auth import authenticate, create_authorization_predicate
from .api import api from .api import api
from .repo import search_request_parser, add_query from .common import calc_route, streamed_zipfile, search_model, add_pagination_parameters,\
from .common import calc_route, streamed_zipfile, pagination_model, build_snippet add_scroll_parameters, add_search_parameters, apply_search_parameters,\
query_api_python, query_api_curl
ns = api.namespace( ns = api.namespace(
'archive', 'archive',
...@@ -108,39 +109,20 @@ class ArchiveCalcResource(Resource): ...@@ -108,39 +109,20 @@ class ArchiveCalcResource(Resource):
abort(404, message='Calculation %s does not exist.' % archive_id) abort(404, message='Calculation %s does not exist.' % archive_id)
archives_from_query_parser = search_request_parser.copy() _archive_download_parser = api.parser()
archives_from_query_parser.add_argument( add_search_parameters(_archive_download_parser)
_archive_download_parser.add_argument(
name='compress', type=bool, help='Use compression on .zip files, default is not.', name='compress', type=bool, help='Use compression on .zip files, default is not.',
location='args') location='args')
archives_from_query_model_fields = {
'pagination': fields.Nested(pagination_model, skip_none=True),
'scroll': fields.Nested(allow_null=True, skip_none=True, model=api.model('Scroll', {
'total': fields.Integer(description='The total amount of hits for the search.'),
'scroll_id': fields.String(allow_null=True, description='The scroll_id that can be used to retrieve the next page.'),
'size': fields.Integer(help='The size of the returned scroll page.')})),
'results': fields.List(fields.Raw, description=(
'A list of search results. Each result is a dict with quantities names as key and '
'values as values')),
'archive_data': fields.Raw(description=('A dict of archive data with calc_ids as keys ')),
'code_snippet': fields.String(description=(
'A string of python code snippet which can be executed to reproduce the api result.')),
}
for group_name, (group_quantity, _) in search.groups.items():
archives_from_query_model_fields[group_name] = fields.Nested(api.model('ArchiveDatasets', {
'after': fields.String(description='The after value that can be used to retrieve the next %s.' % group_name),
'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_quantity)
}), skip_none=True)
archives_from_query_model = api.model('RepoCalculations', archives_from_query_model_fields)
@ns.route('/download') @ns.route('/download')
class ArchiveDownloadResource(Resource): class ArchiveDownloadResource(Resource):
manifest_quantities = ['upload_id', 'calc_id', 'external_id', 'raw_id', 'pid', 'calc_hash'] manifest_quantities = ['upload_id', 'calc_id', 'external_id', 'raw_id', 'pid', 'calc_hash']
@api.doc('archive_zip_download') @api.doc('archive_download')
@api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters') @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
@api.expect(archives_from_query_parser, validate=True) @api.expect(_archive_download_parser, validate=True)
@api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'}) @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self): def get(self):
...@@ -156,13 +138,13 @@ class ArchiveDownloadResource(Resource): ...@@ -156,13 +138,13 @@ class ArchiveDownloadResource(Resource):
The zip file will contain a ``manifest.json`` with the repository meta data. The zip file will contain a ``manifest.json`` with the repository meta data.
""" """
try: try:
args = archives_from_query_parser.parse_args() args = _archive_download_parser.parse_args()
compress = args.get('compress', False) compress = args.get('compress', False)
except Exception: except Exception:
abort(400, message='bad parameter types') abort(400, message='bad parameter types')
search_request = search.SearchRequest() search_request = search.SearchRequest()
add_query(search_request, search_request_parser.parse_args()) apply_search_parameters(search_request, args)
calcs = search_request.execute_scan(order_by='upload_id') calcs = search_request.execute_scan(order_by='upload_id')
...@@ -216,12 +198,31 @@ class ArchiveDownloadResource(Resource): ...@@ -216,12 +198,31 @@ class ArchiveDownloadResource(Resource):
generator(), zipfile_name='nomad_archive.zip', compress=compress) generator(), zipfile_name='nomad_archive.zip', compress=compress)
_archive_query_parser = api.parser()
add_pagination_parameters(_archive_query_parser)
add_scroll_parameters(_archive_query_parser)
_archive_query_model_fields = {
'results': fields.List(fields.Raw, description=(
'A list of search results. Each result is a dict with quantities names as key and '
'values as values')),
'python': fields.String(description=(
'A string of python code snippet which can be executed to reproduce the api result.')),
'curl': fields.String(description=(
'A string of curl command which can be executed to reproduce the api result.')),
}
_archive_query_model = api.inherit('ArchiveCalculations', search_model, _archive_query_model_fields)
@ns.route('/query') @ns.route('/query')
class ArchiveQueryResource(Resource): class ArchiveQueryResource(Resource):
@api.doc('archive_json_query') @api.doc('archive_query')
@api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters') @api.response(400, 'Invalid requests, e.g. wrong owner type or bad search parameters')
@api.expect(search_request_parser, validate=True) @api.response(401, 'Not authorized to access the data.')
@api.marshal_with(archives_from_query_model, skip_none=True, code=200, description='Search results sent') @api.response(404, 'The upload or calculation does not exist')
@api.response(200, 'Archive data send')
@api.expect(_archive_query_parser, validate=True)
@api.marshal_with(_archive_query_model, skip_none=True, code=200, description='Search results sent')
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self): def get(self):
""" """
...@@ -230,11 +231,14 @@ class ArchiveQueryResource(Resource): ...@@ -230,11 +231,14 @@ class ArchiveQueryResource(Resource):
See ``/repo`` endpoint for documentation on the search See ``/repo`` endpoint for documentation on the search
parameters. parameters.
The actual data are in archive_data and a supplementary python code to execute The actual data are in archive_data and a supplementary python code (curl) to
search is wirtten in code_snippet. execute search is in python (curl).
""" """
try: try:
args = search_request_parser.parse_args() args = {
key: value for key, value in _archive_query_parser.parse_args().items()
if value is not None}
scroll = args.get('scroll', False) scroll = args.get('scroll', False)
scroll_id = args.get('scroll_id', None) scroll_id = args.get('scroll_id', None)
page = args.get('page', 1) page = args.get('page', 1)
...@@ -254,7 +258,7 @@ class ArchiveQueryResource(Resource): ...@@ -254,7 +258,7 @@ class ArchiveQueryResource(Resource):
abort(400, message='invalid pagination') abort(400, message='invalid pagination')
search_request = search.SearchRequest() search_request = search.SearchRequest()
add_query(search_request, search_request_parser.parse_args()) apply_search_parameters(search_request, _archive_query_parser.parse_args())
try: try:
if scroll: if scroll:
...@@ -271,11 +275,12 @@ class ArchiveQueryResource(Resource): ...@@ -271,11 +275,12 @@ class ArchiveQueryResource(Resource):
traceback.print_exc() traceback.print_exc()
abort(400, str(e)) abort(400, str(e))
# build python code snippet # build python code and curl snippet
snippet = build_snippet(args, os.path.join(api.base_url, ns.name, 'query')) uri = os.path.join(api.base_url, ns.name, 'query')
results['code_snippet'] = snippet results['python'] = query_api_python(args, uri)
results['curl'] = query_api_curl(args, uri)
data = {} data = []
calcs = results['results'] calcs = results['results']
try: try:
upload_files = None upload_files = None
...@@ -295,7 +300,7 @@ class ArchiveQueryResource(Resource): ...@@ -295,7 +300,7 @@ class ArchiveQueryResource(Resource):
upload_files.open_zipfile_cache() upload_files.open_zipfile_cache()
fo = upload_files.archive_file(calc_id, 'rb') fo = upload_files.archive_file(calc_id, 'rb')
data[calc_id] = json.loads(fo.read()) data.append(json.loads(fo.read()))
if upload_files is not None: if upload_files is not None:
upload_files.close_zipfile_cache() upload_files.close_zipfile_cache()
...@@ -306,7 +311,7 @@ class ArchiveQueryResource(Resource): ...@@ -306,7 +311,7 @@ class ArchiveQueryResource(Resource):
except KeyError: except KeyError:
abort(404, message='Calculation %s/%s does not exist.' % (upload_id, calc_id)) abort(404, message='Calculation %s/%s does not exist.' % (upload_id, calc_id))
results['archive_data'] = data results['results'] = data
return results, 200 return results, 200
......
...@@ -15,13 +15,17 @@ ...@@ -15,13 +15,17 @@
""" """
Common data, variables, decorators, models used throughout the API. Common data, variables, decorators, models used throughout the API.
""" """
from typing import Callable, IO, Set, Tuple, Iterable from typing import Callable, IO, Set, Tuple, Iterable, Dict, Any
from flask_restplus import fields from flask_restplus import fields
import zipstream import zipstream
from flask import stream_with_context, Response from flask import stream_with_context, Response, g, abort
import sys import sys
import os.path
from nomad.app.utils import RFC3339DateTime from nomad import search
from nomad.app.optimade import filterparser
from nomad.app.utils import RFC3339DateTime, rfc3339DateTime
from nomad.files import Restricted from nomad.files import Restricted
from .api import api from .api import api
...@@ -51,18 +55,104 @@ pagination_model = api.model('Pagination', { ...@@ -51,18 +55,104 @@ pagination_model = api.model('Pagination', {
}) })
""" Model used in responses with pagination. """ """ Model used in responses with pagination. """
search_model = api.model('Search', {
'pagination': fields.Nested(pagination_model, skip_none=True),
'scroll': fields.Nested(allow_null=True, skip_none=True, model=api.model('Scroll', {
'total': fields.Integer(description='The total amount of hits for the search.'),
'scroll_id': fields.String(allow_null=True, description='The scroll_id that can be used to retrieve the next page.'),
'size': fields.Integer(help='The size of the returned scroll page.')})),
'results': fields.List(fields.Raw, description=(
'A list of search results. Each result is a dict with quantitie names as key and '
'values as values')),
})
def add_pagination_parameters(request_parser):
""" Add pagination parameters to Flask querystring parser. """
request_parser.add_argument(
'page', type=int, help='The page, starting with 1.', location='args')
request_parser.add_argument(
'per_page', type=int, help='Desired calcs per page.', location='args')
request_parser.add_argument(
'order_by', type=str, help='The field to sort by.', location='args')
request_parser.add_argument(
'order', type=int, help='Use -1 for decending and 1 for acending order.', location='args')
request_parser = api.parser()
add_pagination_parameters(request_parser)
pagination_request_parser = request_parser.copy()
def add_scroll_parameters(request_parser):
""" Add scroll parameters to Flask querystring parser. """
request_parser.add_argument(
'scroll', type=bool, help='Enable scrolling')
request_parser.add_argument(
'scroll_id', type=str, help='The id of the current scrolling window to use.')
def add_search_parameters(request_parser):
""" Add search parameters to Flask querystring parser. """
# more search parameters
request_parser.add_argument(
'owner', type=str,
help='Specify which calcs to return: ``all``, ``public``, ``user``, ``staging``, default is ``all``')
request_parser.add_argument(
'from_time', type=lambda x: rfc3339DateTime.parse(x),
help='A yyyy-MM-ddTHH:mm:ss (RFC3339) minimum entry time (e.g. upload time)')
request_parser.add_argument(
'until_time', type=lambda x: rfc3339DateTime.parse(x),
help='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
# main search parameters
for quantity in search.quantities.values():
request_parser.add_argument(
quantity.name, help=quantity.description,
action=quantity.argparse_action if quantity.multi else None)
def apply_search_parameters(search_request: search.SearchRequest, args: Dict[str, Any]):
"""
Help that adds query relevant request args to the given SearchRequest.
"""
args = {key: value for key, value in args.items() if value is not None}
# owner
owner = args.get('owner', 'all')
try:
search_request.owner(
owner,
g.user.user_id if g.user is not None else None)
except ValueError as e:
abort(401, getattr(e, 'message', 'Invalid owner parameter: %s' % owner))
except Exception as e:
abort(400, getattr(e, 'message', 'Invalid owner parameter'))
# time range
from_time_str = args.get('from_time', None)
until_time_str = args.get('until_time', None)
pagination_request_parser = api.parser() try:
""" Parser used for requests with pagination. """ from_time = rfc3339DateTime.parse(from_time_str) if from_time_str is not None else None
until_time = rfc3339DateTime.parse(until_time_str) if until_time_str is not None else None
search_request.time_range(start=from_time, end=until_time)
except Exception:
abort(400, message='bad datetime format')
pagination_request_parser.add_argument( # optimade
'page', type=int, help='The page, starting with 1.', location='args') try:
pagination_request_parser.add_argument( optimade = args.get('optimade', None)
'per_page', type=int, help='Desired calcs per page.', location='args') if optimade is not None:
pagination_request_parser.add_argument( q = filterparser.parse_filter(optimade)
'order_by', type=str, help='The field to sort by.', location='args') search_request.query(q)
pagination_request_parser.add_argument( except filterparser.FilterException:
'order', type=int, help='Use -1 for decending and 1 for acending order.', location='args') abort(400, message='could not parse optimade query')
# search parameter
search_request.search_parameters(**{
key: value for key, value in args.items()
if key not in ['optimade'] and key in search.quantities})
def calc_route(ns, prefix: str = ''): def calc_route(ns, prefix: str = ''):
...@@ -155,12 +245,32 @@ def streamed_zipfile( ...@@ -155,12 +245,32 @@ def streamed_zipfile(
return response return response
def build_snippet(args, base_url): def resolve_query_api_url(args: Dict[str, Any], base_url: str):
"""
Generates a uri from query parameters and base url.
"""
args_keys = list(args.keys())
args_keys.sort()
if args_keys == ['calc_id', 'upload_id']:
url = '"%s"' % os.path.join(base_url, args['upload_id'], args['calc_id'])
else:
url = '"%s?%s" % (base_url, urlencode(args))'
return url
def query_api_python(args: Dict[str, Any], base_url: str):
"""
Creates a string of python code to execute a search query to the repository using
the requests library.
Arguments:
args: A dict of search parameters that will be encoded in the uri
base_url: The resource url which is prepended to the uri
"""
str_code = 'import requests\n' str_code = 'import requests\n'
str_code += 'from urllib.parse import urlencode\n' str_code += 'from urllib.parse import urlencode\n'
str_code += '\n\n' str_code += '\n\n'
str_code += 'def query_repository(args, base_url):\n' str_code += 'def query_repository(args, base_url):\n'
str_code += ' url = "%s?%s" % (base_url, urlencode(args))\n' str_code += ' url = %s\n' % resolve_query_api_url(args, base_url)
str_code += ' response = requests.get(url)\n' str_code += ' response = requests.get(url)\n'
str_code += ' if response.status_code != 200:\n' str_code += ' if response.status_code != 200:\n'
str_code += ' raise Exception("nomad return status %d" % response.status_code)\n' str_code += ' raise Exception("nomad return status %d" % response.status_code)\n'
...@@ -179,3 +289,15 @@ def build_snippet(args, base_url): ...@@ -179,3 +289,15 @@ def build_snippet(args, base_url):
str_code += 'JSON_DATA = query_repository(args, base_url)\n' str_code += 'JSON_DATA = query_repository(args, base_url)\n'
return str_code return str_code
def query_api_curl(args: Dict[str, Any], base_url: str):
"""
Creates a string of curl command to execute a search query to the repository.
Arguments:
args: A dict of search parameters that will be encoded in the uri
base_url: The resource url which is prepended to the uri
"""
args = {key: val for key, val in args.items() if val is not None}
uri = resolve_query_api_url(args, base_url)
return 'curl -X GET %s -H "accept: application/json" --output "nomad.json"' % uri
...@@ -34,8 +34,7 @@ from nomad.processing import Calc ...@@ -34,8 +34,7 @@ from nomad.processing import Calc
from .api import api from .api import api
from .auth import authenticate, create_authorization_predicate from .auth import authenticate, create_authorization_predicate
from .repo import search_request_parser, add_query from .common import streamed_zipfile, add_search_parameters, apply_search_parameters
from .common import streamed_zipfile
ns = api.namespace('raw', description='Downloading raw data files.') ns = api.namespace('raw', description='Downloading raw data files.')
...@@ -56,16 +55,16 @@ raw_file_strip_argument = dict( ...@@ -56,16 +55,16 @@ raw_file_strip_argument = dict(
name='strip', type=bool, help='Removes a potential common path prefix from all file paths.', name='strip', type=bool, help='Removes a potential common path prefix from all file paths.',
location='args') location='args')
raw_file_from_path_parser = api.parser() _raw_file_from_path_parser = api.parser()
raw_file_from_path_parser.add_argument(**raw_file_compress_argument) _raw_file_from_path_parser.add_argument(**raw_file_compress_argument)
raw_file_from_path_parser.add_argument(**raw_file_strip_argument) _raw_file_from_path_parser.add_argument(**raw_file_strip_argument)
raw_file_from_path_parser.add_argument( _raw_file_from_path_parser.add_argument(
name='length', type=int, help='Download only x bytes from the given file.', name='length', type=int, help='Download only x bytes from the given file.',
location='args') location='args')
raw_file_from_path_parser.add_argument( _raw_file_from_path_parser.add_argument(
name='offset', type=int, help='Start downloading a file\' content from the given offset.', name='offset', type=int, help='Start downloading a file\' content from the given offset.',
location='args') location='args')
raw_file_from_path_parser.add_argument( _raw_file_from_path_parser.add_argument(
name='decompress', type=int, help='Automatically decompress the file if compressed. Only supports .gz', name='decompress', type=int, help='Automatically decompress the file if compressed. Only supports .gz',
location='args') location='args')
...@@ -194,7 +193,7 @@ class RawFileFromUploadPathResource(Resource): ...@@ -194,7 +193,7 @@ class RawFileFromUploadPathResource(Resource):
@api.response(404, 'The upload or path does not exist') @api.response(404, 'The upload or path does not exist')
@api.response(401, 'Not authorized to access the requested files.') @api.response(401, 'Not authorized to access the requested files.')
@api.response(200, 'File(s) send') @api.response(200, 'File(s) send')
@api.expect(raw_file_from_path_parser, validate=True) @api.expect(_raw_file_from_path_parser, validate=True)
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self, upload_id: str, path: str): def get(self, upload_id: str, path: str):
""" Get a single raw calculation file, directory contents, or whole directory sub-tree """ Get a single raw calculation file, directory contents, or whole directory sub-tree
...@@ -255,7 +254,7 @@ class RawFileFromCalcPathResource(Resource): ...@@ -255,7 +254,7 @@ class RawFileFromCalcPathResource(Resource):
@api.response(404, 'The upload or path does not exist') @api.response(404, 'The upload or path does not exist')
@api.response(401, 'Not authorized to access the requested files.') @api.response(401, 'Not authorized to access the requested files.')
@api.response(200, 'File(s) send') @api.response(200, 'File(s) send')
@api.expect(raw_file_from_path_parser, validate=True) @api.expect(_raw_file_from_path_parser, validate=True)
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self, upload_id: str, calc_id: str, path: str): def get(self, upload_id: str, calc_id: str, path: str):
""" Get a single raw calculation file, calculation contents, or all files for a """ Get a single raw calculation file, calculation contents, or all files for a
...@@ -295,7 +294,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource): ...@@ -295,7 +294,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource):
@api.response(404, 'The upload or path does not exist') @api.response(404, 'The upload or path does not exist')
@api.response(401, 'Not authorized to access the requested files.') @api.response(401, 'Not authorized to access the requested files.')
@api.response(200, 'File(s) send') @api.response(200, 'File(s) send')
@api.expect(raw_file_from_path_parser, validate=True) @api.expect(_raw_file_from_path_parser, validate=True)
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self, upload_id: str, calc_id: str): def get(self, upload_id: str, calc_id: str):
""" Get calculation contents. """ Get calculation contents.
...@@ -306,7 +305,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource): ...@@ -306,7 +305,7 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource):
return super().get(upload_id, calc_id, None) return super().get(upload_id, calc_id, None)
raw_files_request_model = api.model('RawFilesRequest', { _raw_files_request_model = api.model('RawFilesRequest', {
'files': fields.List( 'files': fields.List(
fields.String, default=[], description='List of files to download.'), fields.String, default=[], description='List of files to download.'),
'compress': fields.Boolean( 'compress': fields.Boolean(
...@@ -314,11 +313,11 @@ raw_files_request_model = api.model('RawFilesRequest', { ...@@ -314,11 +313,11 @@ raw_files_request_model = api.model('RawFilesRequest', {
description='Enable compression, default is not compression.') description='Enable compression, default is not compression.')
}) })
raw_files_request_parser = api.parser() _raw_files_request_parser = api.parser()
raw_files_request_parser.add_argument( _raw_files_request_parser.add_argument(
'files', required=True, type=str, help='Comma separated list of files to download.', location='args') 'files', required=True, type=str, help='Comma separated list of files to download.', location='args')
raw_files_request_parser.add_argument(**raw_file_strip_argument) _raw_files_request_parser.add_argument(**raw_file_strip_argument)
raw_files_request_parser.add_argument(**raw_file_compress_argument) _raw_files_request_parser.add_argument(**raw_file_compress_argument)
@ns.route('/<string:upload_id>') @ns.route('/<string:upload_id>')
...@@ -329,7 +328,7 @@ class RawFilesResource(Resource): ...@@ -329,7 +328,7 @@ class RawFilesResource(Resource):
@api.doc('get_files') @api.doc('get_files')
@api.response(404, 'The upload or path does not exist') @api.response(404, 'The upload or path does not exist')
@api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'}) @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
@api.expect(raw_files_request_model, validate=True) @api.expect(_raw_files_request_model, validate=True)
@authenticate() @authenticate()
def post(self, upload_id): def post(self, upload_id):
""" Download multiple raw calculation files in a .zip file. """ Download multiple raw calculation files in a .zip file.
...@@ -346,7 +345,7 @@ class RawFilesResource(Resource): ...@@ -346,7 +345,7 @@ class RawFilesResource(Resource):
@api.doc('get_files_alternate') @api.doc('get_files_alternate')
@api.response(404, 'The upload or path does not exist') @api.response(404, 'The upload or path does not exist')
@api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'}) @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
@api.expect(raw_files_request_parser, validate=True) @api.expect(_raw_files_request_parser, validate=True)
@authenticate(signature_token=True) @authenticate(signature_token=True)
def get(self, upload_id): def get(self, upload_id):
""" """
...@@ -355,7 +354,7 @@ class RawFilesResource(Resource): ...@@ -355,7 +354,7 @@ class RawFilesResource(Resource):
Zip files are streamed; instead of 401 errors, the zip file will just not contain Zip files are streamed; instead of 401 errors, the zip file will just not contain
any files that the user is not authorized to access. any files that the user is not authorized to access.
""" """
args = raw_files_request_parser.parse_args() args = _raw_files_request_parser.parse_args()
files_str = args.get('files') files_str = args.get('files')
compress = args.get('compress', False) compress = args.get('compress', False)
strip = args.get('strip', False) strip = args.get('strip', False)
...@@ -367,12 +366,13 @@ class RawFilesResource(Resource): ...@@ -367,12 +366,13 @@ class RawFilesResource(Resource):
return respond_to_get_raw_files(upload_id, files, compress=compress, strip=strip) return respond_to_get_raw_files(upload_id, files, compress=compress, strip=strip)
raw_file_from_query_parser = search_request_parser.copy() _raw_file_from_query_parser = api.parser()
raw_file_from_query_parser.add_argument( add_search_parameters(_raw_file_from_query_parser)
_raw_file_from_query_parser.add_argument(
name='compress', type=bool, help='Use compression on .zip files, default is not.', name='compress', type=bool, help='Use compression on .zip files, default is not.',
location='args') location='args')
raw_file_from_query_parser.add_argument(**raw_file_strip_argument)