Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
R
resolve
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Monitor
Service Desk
Analyze
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
ift
resolve
Commits
b09ae971
Commit
b09ae971
authored
3 years ago
by
Philipp Arras
Browse files
Options
Downloads
Patches
Plain Diff
Add simpler MPI likelihood example
parent
888f127c
No related branches found
No related tags found
1 merge request
!41
Draft: Lh distribution
Pipeline
#144667
passed
2 years ago
Stage: build_docker
Stage: testing
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
playground/getting_started_3_mpilh.py
+72
-70
72 additions, 70 deletions
playground/getting_started_3_mpilh.py
playground/lh_distribution_demo.py
+58
-0
58 additions, 0 deletions
playground/lh_distribution_demo.py
with
130 additions
and
70 deletions
playground/getting_started_3_mpilh.py
+
72
−
70
View file @
b09ae971
...
@@ -54,74 +54,76 @@ def distribution_strategy(distribution_shape, comm):
...
@@ -54,74 +54,76 @@ def distribution_strategy(distribution_shape, comm):
return
comm0
,
comm1
return
comm0
,
comm1
comm_world
=
MPI
.
COMM_WORLD
if
__name__
==
"
__main__
"
:
n_lhs
=
4
comm_world
=
MPI
.
COMM_WORLD
n_samples
=
5
n_lhs
=
4
samplecomm
,
lhcomm
=
distribution_strategy
((
2
*
n_samples
,
n_lhs
),
comm_world
)
n_samples
=
5
position_space
=
ift
.
RGSpace
((
128
,
128
))
samplecomm
,
lhcomm
=
distribution_strategy
((
2
*
n_samples
,
n_lhs
),
comm_world
)
args
=
{
"
offset_mean
"
:
0
,
position_space
=
ift
.
RGSpace
((
128
,
128
))
"
offset_std
"
:
(
1e-3
,
1e-6
),
args
=
{
"
fluctuations
"
:
(
1.0
,
0.8
),
"
offset_mean
"
:
0
,
"
loglogavgslope
"
:
(
-
3.0
,
1
),
"
offset_std
"
:
(
1e-3
,
1e-6
),
"
flexibility
"
:
(
2
,
1.0
),
"
fluctuations
"
:
(
1.0
,
0.8
),
"
asperity
"
:
(
0.5
,
0.4
),
"
loglogavgslope
"
:
(
-
3.0
,
1
),
}
"
flexibility
"
:
(
2
,
1.0
),
correlated_field
=
ift
.
SimpleCorrelatedField
(
position_space
,
**
args
)
"
asperity
"
:
(
0.5
,
0.4
),
signal
=
ift
.
sigmoid
(
correlated_field
)
}
correlated_field
=
ift
.
SimpleCorrelatedField
(
position_space
,
**
args
)
# Create all likelihoods
signal
=
ift
.
sigmoid
(
correlated_field
)
n_los
=
100
assert
n_los
>
n_lhs
# Create all likelihoods
LOS_starts
=
list
(
ift
.
random
.
current_rng
().
random
((
n_los
,
2
)).
T
)
n_los
=
100
LOS_ends
=
list
(
0.5
+
0
*
ift
.
random
.
current_rng
().
random
((
n_los
,
2
)).
T
)
assert
n_los
>
n_lhs
mock_position
=
ift
.
from_random
(
signal
.
domain
,
"
normal
"
)
LOS_starts
=
list
(
ift
.
random
.
current_rng
().
random
((
n_los
,
2
)).
T
)
responses
,
datas
,
inverse_covariances
=
[],
[],
[]
LOS_ends
=
list
(
0.5
+
0
*
ift
.
random
.
current_rng
().
random
((
n_los
,
2
)).
T
)
for
ii
in
range
(
n_lhs
):
mock_position
=
ift
.
from_random
(
signal
.
domain
,
"
normal
"
)
lo
,
hi
=
ift
.
utilities
.
shareRange
(
n_los
,
n_lhs
,
ii
)
responses
,
datas
,
inverse_covariances
=
[],
[],
[]
R
=
ift
.
LOSResponse
(
for
ii
in
range
(
n_lhs
):
position_space
,
lo
,
hi
=
ift
.
utilities
.
shareRange
(
n_los
,
n_lhs
,
ii
)
starts
=
[
xx
[
lo
:
hi
]
for
xx
in
LOS_starts
],
R
=
ift
.
LOSResponse
(
ends
=
[
yy
[
lo
:
hi
]
for
yy
in
LOS_ends
],
position_space
,
starts
=
[
xx
[
lo
:
hi
]
for
xx
in
LOS_starts
],
ends
=
[
yy
[
lo
:
hi
]
for
yy
in
LOS_ends
],
)
responses
.
append
(
R
)
data_space
=
R
.
target
noise
=
0.001
N
=
ift
.
ScalingOperator
(
data_space
,
noise
,
np
.
float64
)
datas
.
append
(
R
(
signal
(
mock_position
))
+
N
.
draw_sample
())
inverse_covariances
.
append
(
N
.
inverse
)
lhs
=
[
ift
.
GaussianEnergy
(
data
=
dd
,
inverse_covariance
=
ic
)
@
rr
for
dd
,
rr
,
ic
in
zip
(
datas
,
responses
,
inverse_covariances
)
]
# /Create all likelihoods
# Select the likelihoods relevant for the current task
lh_size
,
lh_rank
,
lh_master
=
ift
.
utilities
.
get_MPI_params_from_comm
(
lhcomm
)
lo
,
hi
=
ift
.
utilities
.
shareRange
(
n_lhs
,
lh_size
,
lh_rank
)
lhs
=
lhs
[
lo
:
hi
]
# /Select the likelihoods relevant for the current task
likelihood_energy
=
rve
.
AllreduceSum
(
lhs
,
lhcomm
)
@
signal
ic_sampling
=
ift
.
GradientNormController
(
name
=
"
Sampling (linear)
"
,
iteration_limit
=
20
)
ic_newton
=
ift
.
GradientNormController
(
name
=
"
Newton
"
,
iteration_limit
=
10
)
minimizer
=
ift
.
NewtonCG
(
ic_newton
)
samples
=
ift
.
optimize_kl
(
likelihood_energy
,
2
,
n_samples
,
minimizer
,
ic_sampling
,
None
,
plottable_operators
=
{
"
signal
"
:
(
signal
,
dict
(
vmin
=
0
,
vmax
=
1
))},
ground_truth_position
=
mock_position
,
output_directory
=
f
"
results_
{
comm_world
.
Get_size
()
}
tasks
"
,
overwrite
=
True
,
comm
=
samplecomm
,
)
)
responses
.
append
(
R
)
data_space
=
R
.
target
noise
=
0.001
N
=
ift
.
ScalingOperator
(
data_space
,
noise
,
np
.
float64
)
datas
.
append
(
R
(
signal
(
mock_position
))
+
N
.
draw_sample
())
inverse_covariances
.
append
(
N
.
inverse
)
lhs
=
[
ift
.
GaussianEnergy
(
data
=
dd
,
inverse_covariance
=
ic
)
@
rr
for
dd
,
rr
,
ic
in
zip
(
datas
,
responses
,
inverse_covariances
)
]
# /Create all likelihoods
# Select the likelihoods relevant for the current task
lh_size
,
lh_rank
,
lh_master
=
ift
.
utilities
.
get_MPI_params_from_comm
(
lhcomm
)
lo
,
hi
=
ift
.
utilities
.
shareRange
(
n_lhs
,
lh_size
,
lh_rank
)
lhs
=
lhs
[
lo
:
hi
]
# /Select the likelihoods relevant for the current task
likelihood_energy
=
rve
.
AllreduceSum
(
lhs
,
lhcomm
)
@
signal
ic_sampling
=
ift
.
GradientNormController
(
name
=
"
Sampling (linear)
"
,
iteration_limit
=
20
)
ic_newton
=
ift
.
GradientNormController
(
name
=
"
Newton
"
,
iteration_limit
=
10
)
minimizer
=
ift
.
NewtonCG
(
ic_newton
)
samples
=
ift
.
optimize_kl
(
likelihood_energy
,
2
,
n_samples
,
minimizer
,
ic_sampling
,
None
,
plottable_operators
=
{
"
signal
"
:
(
signal
,
dict
(
vmin
=
0
,
vmax
=
1
))},
ground_truth_position
=
mock_position
,
output_directory
=
f
"
results_
{
comm_world
.
Get_size
()
}
tasks
"
,
overwrite
=
True
,
comm
=
samplecomm
,
)
This diff is collapsed.
Click to expand it.
playground/lh_distribution_demo.py
0 → 100644
+
58
−
0
View file @
b09ae971
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2022 Max-Planck-Society
# Copyright(C) 2022 Max-Planck-Society, Philipp Arras
import
nifty8
as
ift
import
numpy
as
np
import
resolve
as
rve
from
nifty8.utilities
import
allreduce_sum
from
mpi4py
import
MPI
from
getting_started_3_mpilh
import
distribution_strategy
def
get_local
(
lst
,
comm
):
return
lst
[
slice
(
*
ift
.
utilities
.
shareRange
(
len
(
lst
),
*
ift
.
utilities
.
get_MPI_params_from_comm
(
comm
)[:
2
]))]
def
eval_likelihood
(
x
,
num
):
return
num
*
num
*
x
def
main
():
n_lhs
=
4
n_samples
=
5
comm
=
MPI
.
COMM_WORLD
samplecomm
,
lhcomm
=
distribution_strategy
((
2
*
n_samples
,
n_lhs
),
comm
)
# Would not be present in real application
global_samples
=
list
(
np
.
arange
(
n_samples
)
*
10
)
global_lh_inds
=
list
(
range
(
n_lhs
))
# /Would not be present in real application
# We want to compute: sum_i sum_j lh_j(sample_i)
# In this example lh_j = lambda x: j*j*x
# Get quantities that would be locally present
local_lh_inds
=
get_local
(
global_lh_inds
,
lhcomm
)
local_samples
=
get_local
(
global_samples
,
samplecomm
)
res
=
allreduce_sum
([
allreduce_sum
([
eval_likelihood
(
ss
,
i_local_lh
)
for
i_local_lh
in
local_lh_inds
],
lhcomm
)
for
ss
in
local_samples
],
samplecomm
)
print
(
"
Total sum
"
,
res
)
if
__name__
==
"
__main__
"
:
main
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment