Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
nomad-lab
nomad-FAIR
Commits
898b2a71
Commit
898b2a71
authored
Aug 21, 2020
by
Markus Scheidgen
Browse files
Fixed parallel archive access issues.
#350
parent
17784aed
Pipeline
#80899
passed with stages
in 35 minutes and 13 seconds
Changes
3
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
examples/archive/client.py
View file @
898b2a71
...
...
@@ -2,7 +2,6 @@
A simple example that uses the NOMAD client library to access the archive.
'''
from
nomad
import
config
from
nomad.client
import
ArchiveQuery
from
nomad.metainfo
import
units
...
...
@@ -22,12 +21,13 @@ query = ArchiveQuery(
'section_system[-2]'
:
'*'
}
},
per_page
=
10
,
parallel
=
5
,
per_page
=
20
,
max
=
1000
)
print
(
query
)
for
result
in
query
[
0
:
10
]:
for
result
in
query
[
0
:
10
0
]:
run
=
result
.
section_run
[
0
]
energy
=
run
.
section_single_configuration_calculation
[
0
].
energy_total
formula
=
run
.
section_system
[
0
].
chemical_composition_reduced
...
...
nomad/client.py
View file @
898b2a71
This diff is collapsed.
Click to expand it.
tests/test_client.py
View file @
898b2a71
from
typing
import
List
from
typing
import
List
,
Tuple
import
pytest
from
nomad.client
import
query_archive
from
nomad.metainfo
import
MSection
,
SubSection
from
nomad.datamodel
import
EntryArchive
from
nomad.datamodel
import
EntryArchive
,
User
from
nomad.datamodel.metainfo.public
import
section_run
from
tests.app.test_app
import
BlueprintClient
from
tests.processing
import
test_data
as
test_processing
# TODO with the existing published_wo_user_metadata fixture there is only one entry
...
...
@@ -66,3 +67,52 @@ def test_query_authentication(api, published, other_test_user_auth, test_user_au
assert_results
(
query_archive
(
authentication
=
other_test_user_auth
),
total
=
1
)
assert_results
(
query_archive
(
authentication
=
test_user_auth
),
total
=
0
)
@
pytest
.
fixture
(
scope
=
'function'
)
def
many_uploads
(
non_empty_uploaded
:
Tuple
[
str
,
str
],
test_user
:
User
,
proc_infra
):
_
,
upload_file
=
non_empty_uploaded
for
index
in
range
(
0
,
4
):
upload
=
test_processing
.
run_processing
((
'test_upload_%d'
%
index
,
upload_file
),
test_user
)
upload
.
publish_upload
()
# pylint: disable=no-member
try
:
upload
.
block_until_complete
(
interval
=
.
01
)
except
Exception
:
pass
@
pytest
.
fixture
(
scope
=
'function'
,
autouse
=
True
)
def
patch_multiprocessing_and_api
(
monkeypatch
):
class
TestPool
:
''' A fake multiprocessing pool, because multiprocessing does not work well in pytest. '''
def
__init__
(
self
,
n
):
pass
def
map
(
self
,
f
,
args
):
return
[
f
(
arg
)
for
arg
in
args
]
def
__enter__
(
self
,
*
args
,
**
kwargs
):
return
self
def
__exit__
(
self
,
*
args
,
**
kwargs
):
pass
monkeypatch
.
setattr
(
'multiprocessing.Pool'
,
TestPool
)
monkeypatch
.
setattr
(
'nomad.client.get_json'
,
lambda
response
:
response
.
json
)
monkeypatch
.
setattr
(
'nomad.client.get_length'
,
lambda
response
:
int
(
response
.
headers
[
'Content-Length'
]))
def
test_parallel_query
(
api
,
many_uploads
,
monkeypatch
):
result
=
query_archive
(
required
=
dict
(
section_run
=
'*'
),
parallel
=
2
)
assert_results
(
result
,
total
=
4
)
assert
result
.
_statistics
.
nentries
==
4
assert
result
.
_statistics
.
loaded_nentries
==
4
assert
result
.
_statistics
.
last_response_nentries
==
4
assert
result
.
_statistics
.
napi_calls
==
1
result
=
query_archive
(
required
=
dict
(
section_run
=
'*'
),
parallel
=
2
,
per_page
=
1
)
assert_results
(
result
,
total
=
4
)
assert
result
.
_statistics
.
nentries
==
4
assert
result
.
_statistics
.
loaded_nentries
==
4
assert
result
.
_statistics
.
last_response_nentries
==
2
assert
result
.
_statistics
.
napi_calls
==
2
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment