Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
10347246
Commit
10347246
authored
Nov 08, 2017
by
Martin Reinecke
Browse files
fixes
parent
f53b3afa
Pipeline
#21249
failed with stage
in 3 minutes and 56 seconds
Changes
12
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
nifty/data_objects/distributed_do.py
View file @
10347246
...
...
@@ -20,9 +20,13 @@ def shareRange(nwork, nshares, myshare):
def
get_locshape
(
shape
,
distaxis
):
if
distaxis
==-
1
:
return
shape
if
distaxis
<
0
or
distaxis
>=
len
(
shape
):
print
distaxis
,
shape
shape2
=
list
(
shape
)
shape2
[
distaxis
]
=
shareSize
(
shape
[
distaxis
],
ntask
,
rank
)
return
tuple
(
shape2
)
def
local_shape
(
shape
,
distaxis
):
return
get_locshape
(
shape
,
distaxis
)
class
data_object
(
object
):
def
__init__
(
self
,
shape
,
data
,
distaxis
):
...
...
@@ -87,7 +91,7 @@ class data_object(object):
if
(
self
.
_distaxis
==-
1
):
return
res
res2
=
np
.
empty
(
1
,
dtype
=
res
.
dtype
)
MPI
.
COMM_WORLD
.
Allreduce
(
res
,
res2
,
mpiop
)
comm
.
Allreduce
(
res
,
res2
,
mpiop
)
return
res2
[
0
]
if
self
.
_distaxis
in
axis
:
...
...
@@ -113,6 +117,7 @@ class data_object(object):
if
a
.
_shape
!=
b
.
_shape
:
raise
ValueError
(
"shapes are incompatible."
)
if
a
.
_distaxis
!=
b
.
_distaxis
:
print
(
a
.
_distaxis
,
b
.
_distaxis
)
raise
ValueError
(
"distributions are incompatible."
)
a
=
a
.
_data
b
=
b
.
_data
...
...
@@ -178,16 +183,16 @@ class data_object(object):
return
self
.
_binary_helper
(
other
,
op
=
'__ne__'
)
def
__neg__
(
self
):
return
data_object
(
-
self
.
_data
)
return
data_object
(
self
.
_shape
,
-
self
.
_data
,
self
.
_distaxis
)
def
__abs__
(
self
):
return
data_object
(
np
.
abs
(
self
.
_data
))
return
data_object
(
self
.
_shape
,
np
.
abs
(
self
.
_data
)
,
self
.
_distaxis
)
def
ravel
(
self
):
return
data_object
(
self
.
_data
.
ravel
())
#
def ravel(self):
#
return data_object(self._data.ravel())
def
reshape
(
self
,
shape
):
return
data_object
(
self
.
_data
.
reshape
(
shape
))
#
def reshape(self, shape):
#
return data_object(self._data.reshape(shape))
def
all
(
self
):
return
self
.
_data
.
all
()
...
...
@@ -197,19 +202,19 @@ class data_object(object):
def
full
(
shape
,
fill_value
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
full
(
shape
,
local_
shape
(
shape
,
distaxis
),
fill_value
,
dtype
))
return
data_object
(
shape
,
np
.
full
(
get_loc
shape
(
shape
,
distaxis
),
fill_value
,
dtype
)
,
distaxis
)
def
empty
(
shape
,
dtype
=
np
.
float
):
return
data_object
(
np
.
empty
(
shape
,
dtype
)
)
def
empty
(
shape
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
empty
(
get_locshape
(
shape
,
distaxis
),
dtype
),
distaxis
)
def
zeros
(
shape
,
dtype
=
np
.
float
):
return
data_object
(
np
.
zeros
(
shape
,
dtype
)
)
def
zeros
(
shape
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
zeros
(
get_locshape
(
shape
,
distaxis
),
dtype
),
distaxis
)
def
ones
(
shape
,
dtype
=
np
.
float
):
return
data_object
(
np
.
ones
(
shape
,
dtype
)
)
def
ones
(
shape
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
ones
(
get_locshape
(
shape
,
distaxis
),
dtype
),
distaxis
)
def
empty_like
(
a
,
dtype
=
None
):
...
...
@@ -217,7 +222,10 @@ def empty_like(a, dtype=None):
def
vdot
(
a
,
b
):
return
np
.
vdot
(
a
.
_data
,
b
.
_data
)
tmp
=
np
.
vdot
(
a
.
_data
.
ravel
(),
b
.
_data
.
ravel
())
res
=
np
.
empty
(
1
,
dtype
=
type
(
tmp
))
comm
.
Allreduce
(
tmp
,
res
,
MPI
.
SUM
)
return
res
def
_math_helper
(
x
,
function
,
out
):
...
...
@@ -225,7 +233,7 @@ def _math_helper(x, function, out):
function
(
x
.
_data
,
out
=
out
.
_data
)
return
out
else
:
return
data_object
(
function
(
x
.
_data
))
return
data_object
(
x
.
shape
,
function
(
x
.
_data
)
,
x
.
_distaxis
)
def
abs
(
a
,
out
=
None
):
...
...
@@ -252,32 +260,37 @@ def bincount(x, weights=None, minlength=None):
def
from_object
(
object
,
dtype
=
None
,
copy
=
True
):
return
data_object
(
np
.
array
(
object
.
_data
,
dtype
=
dtype
,
copy
=
copy
))
return
data_object
(
object
.
_shape
,
np
.
array
(
object
.
_data
,
dtype
=
dtype
,
copy
=
copy
)
,
distaxis
=
object
.
_distaxis
)
def
from_random
(
random_type
,
shape
,
dtype
=
np
.
float64
,
**
kwargs
):
def
from_random
(
random_type
,
shape
,
dtype
=
np
.
float64
,
distaxis
=
0
,
**
kwargs
):
generator_function
=
getattr
(
Random
,
random_type
)
return
data_object
(
generator_function
(
dtype
=
dtype
,
shape
=
shape
,
**
kwargs
))
lshape
=
get_locshape
(
shape
,
distaxis
)
return
data_object
(
shape
,
generator_function
(
dtype
=
dtype
,
shape
=
lshape
,
**
kwargs
),
distaxis
=
distaxis
)
def
to_ndarray
(
arr
):
return
arr
.
_data
def
from_ndarray
(
arr
):
return
data_object
(
arr
.
shape
,
arr
,
-
1
)
def
from_ndarray
(
arr
,
distaxis
=
0
):
return
data_object
(
arr
.
shape
,
arr
,
distaxis
)
def
local_data
(
arr
):
return
arr
.
_data
#def ibegin(arr):
# return (0,)*arr._data.ndim
def
ibegin
(
arr
):
res
=
[
0
]
*
arr
.
_data
.
ndim
res
[
arr
.
_distaxis
]
=
shareRange
(
arr
.
_shape
[
arr
.
_distaxis
],
ntask
,
rank
)[
0
]
return
tuple
(
res
)
#def np_allreduce_sum(arr):
# return arr
def
np_allreduce_sum
(
arr
):
res
=
np
.
empty_like
(
arr
)
comm
.
Allreduce
(
arr
,
res
,
MPI
.
SUM
)
return
res
def
distaxis
(
arr
):
...
...
@@ -297,6 +310,13 @@ def from_global_data (arr, distaxis=0):
return
data_object
(
arr
.
shape
,
arr
[
sl
],
distaxis
)
def
to_global_data
(
arr
):
if
arr
.
_distaxis
==-
1
:
return
arr
.
_data
tmp
=
redistribute
(
arr
,
dist
=-
1
)
return
tmp
.
_data
def
redistribute
(
arr
,
dist
=
None
,
nodist
=
None
):
if
dist
is
not
None
:
if
nodist
is
not
None
:
...
...
nifty/data_objects/my_own_do.py
View file @
10347246
...
...
@@ -228,20 +228,20 @@ def np_allreduce_sum(arr):
return
arr
def
dist
_
axis
(
arr
):
def
distaxis
(
arr
):
return
-
1
def
from_local_data
(
shape
,
arr
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
from_local_data
(
shape
,
arr
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
if
shape
!=
arr
.
shape
:
raise
ValueError
return
data_object
(
arr
)
def
from_global_data
(
arr
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
from_global_data
(
arr
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
return
data_object
(
arr
)
...
...
@@ -252,11 +252,11 @@ def redistribute (arr, dist=None, nodist=None):
return
arr
def
default_dist
_
axis
():
def
default_distaxis
():
return
-
1
def
local_shape
(
glob_shape
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
local_shape
(
glob_shape
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
return
glob_shape
nifty/data_objects/numpy_do.py
View file @
10347246
...
...
@@ -35,20 +35,20 @@ def np_allreduce_sum(arr):
return
arr
def
dist
_
axis
(
arr
):
def
distaxis
(
arr
):
return
-
1
def
from_local_data
(
shape
,
arr
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
from_local_data
(
shape
,
arr
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
if
shape
!=
arr
.
shape
:
raise
ValueError
return
arr
def
from_global_data
(
arr
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
from_global_data
(
arr
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
return
arr
...
...
@@ -59,11 +59,11 @@ def redistribute (arr, dist=None, nodist=None):
return
arr
def
default_dist
_
axis
():
def
default_distaxis
():
return
-
1
def
local_shape
(
glob_shape
,
dist
_
axis
):
if
dist
_
axis
!=-
1
:
def
local_shape
(
glob_shape
,
distaxis
):
if
distaxis
!=-
1
:
raise
NotImplementedError
return
glob_shape
nifty/dobj.py
View file @
10347246
from
.data_objects.my_own_do
import
*
from
.data_objects.distributed_do
import
*
#from .data_objects.my_own_do import *
#from .data_objects.numpy_do import *
nifty/field.py
View file @
10347246
...
...
@@ -351,7 +351,7 @@ class Field(object):
fct
=
tmp
if
spaces
is
None
:
return
fct
*
dobj
.
vdot
(
y
.
val
.
ravel
(),
x
.
val
.
ravel
()
)
return
fct
*
dobj
.
vdot
(
y
.
val
,
x
.
val
)
else
:
spaces
=
utilities
.
cast_iseq_to_tuple
(
spaces
)
active_axes
=
[]
...
...
nifty/operators/fft_operator_support.py
View file @
10347246
...
...
@@ -61,19 +61,20 @@ class RGRGTransformation(Transformation):
axes
=
x
.
domain
.
axes
[
self
.
space
]
p2h
=
x
.
domain
==
self
.
pdom
tdom
=
self
.
hdom
if
p2h
else
self
.
pdom
if
dobj
.
dist
_
axis
(
x
.
val
)
in
axes
:
tmpax
=
(
dobj
.
dist
_
axis
(
x
.
val
),)
if
dobj
.
distaxis
(
x
.
val
)
in
axes
:
tmpax
=
(
dobj
.
distaxis
(
x
.
val
),)
tmp
=
dobj
.
redistribute
(
x
.
val
,
nodist
=
tmpax
)
ldat
=
dobj
.
local_data
(
tmp
)
tmp
=
dobj
.
from_local_data
(
tmp
.
shape
,
hartley
(
ldat
,
tmpax
),
dist
_
axis
=
dobj
.
dist
_
axis
(
tmp
))
tmp
=
dobj
.
from_local_data
(
tmp
.
shape
,
hartley
(
ldat
,
tmpax
),
distaxis
=
dobj
.
distaxis
(
tmp
))
tmp
=
dobj
.
redistribute
(
tmp
,
dist
=
tmpax
[
0
])
tmpax
=
tuple
(
i
for
i
in
axes
if
i
not
in
tmpax
)
ldat
=
dobj
.
local_data
(
tmp
)
tmp
=
dobj
.
from_local_data
(
tmp
.
shape
,
hartley
(
ldat
,
tmpax
),
dist_axis
=
dobj
.
dist_axis
(
tmp
))
if
len
(
tmpax
)
>
0
:
ldat
=
dobj
.
local_data
(
tmp
)
tmp
=
dobj
.
from_local_data
(
tmp
.
shape
,
hartley
(
ldat
,
tmpax
),
distaxis
=
dobj
.
distaxis
(
tmp
))
Tval
=
Field
(
tdom
,
tmp
)
else
:
ldat
=
dobj
.
local_data
(
x
.
val
)
tmp
=
dobj
.
from_local_data
(
x
.
val
.
shape
,
hartley
(
ldat
,
axes
),
dist
_
axis
=
dobj
.
dist
_
axis
(
x
.
val
))
tmp
=
dobj
.
from_local_data
(
x
.
val
.
shape
,
hartley
(
ldat
,
axes
),
distaxis
=
dobj
.
distaxis
(
x
.
val
))
Tval
=
Field
(
tdom
,
tmp
)
fct
=
self
.
fct_p2h
if
p2h
else
self
.
fct_h2p
if
fct
!=
1
:
...
...
@@ -124,19 +125,19 @@ class SphericalTransformation(Transformation):
axes
=
x
.
domain
.
axes
[
self
.
space
]
axis
=
axes
[
0
]
tval
=
x
.
val
if
dobj
.
dist
_
axis
(
tval
)
==
axis
:
if
dobj
.
distaxis
(
tval
)
==
axis
:
tval
=
dobj
.
redistribute
(
tval
,
nodist
=
(
axis
,))
distaxis
=
dobj
.
dist
_
axis
(
tval
)
distaxis
=
dobj
.
distaxis
(
tval
)
p2h
=
x
.
domain
==
self
.
pdom
idat
=
dobj
.
local_data
(
tval
)
if
p2h
:
odat
=
np
.
empty
(
dobj
.
local_shape
(
self
.
hdom
.
shape
,
dist
_
axis
=
distaxis
),
dtype
=
x
.
dtype
)
odat
=
np
.
empty
(
dobj
.
local_shape
(
self
.
hdom
.
shape
,
distaxis
=
distaxis
),
dtype
=
x
.
dtype
)
for
slice
in
utilities
.
get_slice_list
(
idat
.
shape
,
axes
):
odat
[
slice
]
=
self
.
_slice_p2h
(
idat
[
slice
])
odat
=
dobj
.
from_local_data
(
self
.
hdom
.
shape
,
odat
,
distaxis
)
if
distaxis
!=
dobj
.
dist
_
axis
(
x
):
if
distaxis
!=
dobj
.
distaxis
(
x
.
val
):
odat
=
dobj
.
redistribute
(
odat
,
dist
=
distaxis
)
return
Field
(
self
.
hdom
,
odat
)
else
:
...
...
nifty/operators/power_projection_operator.py
View file @
10347246
...
...
@@ -53,7 +53,7 @@ class PowerProjectionOperator(LinearOperator):
def
_times
(
self
,
x
):
pindex
=
self
.
_target
[
self
.
_space
].
pindex
res
=
Field
.
zeros
(
self
.
_target
,
dtype
=
x
.
dtype
)
if
dobj
.
dist
_
axis
(
x
.
val
)
in
x
.
domain
.
axes
[
self
.
_space
]:
# the distributed axis is part of the projected space
if
dobj
.
distaxis
(
x
.
val
)
in
x
.
domain
.
axes
[
self
.
_space
]:
# the distributed axis is part of the projected space
pindex
=
dobj
.
local_data
(
pindex
)
else
:
pindex
=
dobj
.
to_ndarray
(
pindex
)
...
...
@@ -71,7 +71,7 @@ class PowerProjectionOperator(LinearOperator):
def
_adjoint_times
(
self
,
x
):
pindex
=
self
.
_target
[
self
.
_space
].
pindex
res
=
Field
.
empty
(
self
.
_domain
,
dtype
=
x
.
dtype
)
if
dobj
.
dist
_
axis
(
x
.
val
)
in
x
.
domain
.
axes
[
self
.
_space
]:
# the distributed axis is part of the projected space
if
dobj
.
distaxis
(
x
.
val
)
in
x
.
domain
.
axes
[
self
.
_space
]:
# the distributed axis is part of the projected space
pindex
=
dobj
.
local_data
(
pindex
)
else
:
pindex
=
dobj
.
to_ndarray
(
pindex
)
...
...
nifty/spaces/lm_space.py
View file @
10347246
...
...
@@ -21,7 +21,7 @@ import numpy as np
from
.space
import
Space
from
..
import
Field
from
..basic_arithmetics
import
exp
from
..
dobj
import
from_ndarray
as
from_np
from
..
import
dobj
class
LMSpace
(
Space
):
...
...
@@ -103,7 +103,7 @@ class LMSpace(Space):
for
m
in
range
(
1
,
mmax
+
1
):
ldist
[
idx
:
idx
+
2
*
(
lmax
+
1
-
m
)]
=
tmp
[
2
*
m
:]
idx
+=
2
*
(
lmax
+
1
-
m
)
return
Field
((
self
,),
from_np
(
ldist
))
return
Field
((
self
,),
dobj
.
from_global_data
(
ldist
))
def
get_unique_k_lengths
(
self
):
return
np
.
arange
(
self
.
lmax
+
1
,
dtype
=
np
.
float64
)
...
...
nifty/spaces/power_space.py
View file @
10347246
...
...
@@ -144,7 +144,7 @@ class PowerSpace(Space):
tbb
=
binbounds
locdat
=
np
.
searchsorted
(
tbb
,
dobj
.
local_data
(
k_length_array
.
val
))
temp_pindex
=
dobj
.
from_local_data
(
k_length_array
.
val
.
shape
,
locdat
,
dobj
.
dist
_
axis
(
k_length_array
.
val
))
k_length_array
.
val
.
shape
,
locdat
,
dobj
.
distaxis
(
k_length_array
.
val
))
nbin
=
len
(
tbb
)
temp_rho
=
np
.
bincount
(
dobj
.
local_data
(
temp_pindex
).
ravel
(),
minlength
=
nbin
)
...
...
test/test_field.py
View file @
10347246
...
...
@@ -21,7 +21,7 @@ import numpy as np
from
numpy.testing
import
assert_equal
,
assert_allclose
from
itertools
import
product
import
nifty2go
as
ift
from
nifty2go
.dobj
import
to_ndarray
as
to_np
,
from_ndarray
as
from_np
from
nifty2go
import
dobj
from
test.common
import
expand
...
...
@@ -59,12 +59,12 @@ class Test_Functionality(unittest.TestCase):
np
.
random
.
seed
(
11
)
p1
=
ift
.
PowerSpace
(
space1
)
fp1
=
ift
.
Field
(
p1
,
val
=
from_np
(
_spec1
(
p1
.
k_lengths
)))
fp1
=
ift
.
Field
(
p1
,
val
=
dobj
.
from_global_data
(
_spec1
(
p1
.
k_lengths
)))
p2
=
ift
.
PowerSpace
(
space2
)
fp2
=
ift
.
Field
(
p2
,
val
=
from_np
(
_spec2
(
p2
.
k_lengths
)))
fp2
=
ift
.
Field
(
p2
,
val
=
dobj
.
from_global_data
(
_spec2
(
p2
.
k_lengths
)))
outer
=
from_np
(
np
.
outer
(
to_np
(
fp1
.
val
),
to_np
(
fp2
.
val
)))
outer
=
dobj
.
from_global_data
(
np
.
outer
(
dobj
.
to_global_data
(
fp1
.
val
),
dobj
.
to_global_data
(
fp2
.
val
)))
fp
=
ift
.
Field
((
p1
,
p2
),
val
=
outer
)
samples
=
500
...
...
@@ -78,8 +78,8 @@ class Test_Functionality(unittest.TestCase):
ps1
+=
sp
.
sum
(
spaces
=
1
)
/
fp2
.
sum
()
ps2
+=
sp
.
sum
(
spaces
=
0
)
/
fp1
.
sum
()
assert_allclose
(
to_np
(
ps1
.
val
/
samples
),
to_np
(
fp1
.
val
),
rtol
=
0.2
)
assert_allclose
(
to_np
(
ps2
.
val
/
samples
),
to_np
(
fp2
.
val
),
rtol
=
0.2
)
assert_allclose
(
dobj
.
to_global_data
(
ps1
.
val
/
samples
),
dobj
.
to_global_data
(
fp1
.
val
),
rtol
=
0.2
)
assert_allclose
(
dobj
.
to_global_data
(
ps2
.
val
/
samples
),
dobj
.
to_global_data
(
fp2
.
val
),
rtol
=
0.2
)
@
expand
(
product
([
ift
.
RGSpace
((
8
,),
harmonic
=
True
),
ift
.
RGSpace
((
8
,
8
),
harmonic
=
True
,
distances
=
0.123
)],
...
...
@@ -91,10 +91,10 @@ class Test_Functionality(unittest.TestCase):
fulldomain
=
ift
.
DomainTuple
.
make
((
space1
,
space2
))
p1
=
ift
.
PowerSpace
(
space1
)
fp1
=
ift
.
Field
(
p1
,
val
=
from_np
(
_spec1
(
p1
.
k_lengths
)))
fp1
=
ift
.
Field
(
p1
,
val
=
dobj
.
from_global_data
(
_spec1
(
p1
.
k_lengths
)))
p2
=
ift
.
PowerSpace
(
space2
)
fp2
=
ift
.
Field
(
p2
,
val
=
from_np
(
_spec2
(
p2
.
k_lengths
)))
fp2
=
ift
.
Field
(
p2
,
val
=
dobj
.
from_global_data
(
_spec2
(
p2
.
k_lengths
)))
S_1
=
ift
.
create_power_field
(
space1
,
lambda
x
:
np
.
sqrt
(
_spec1
(
x
)))
S_1
=
ift
.
DiagonalOperator
(
S_1
,
domain
=
fulldomain
,
spaces
=
0
)
...
...
@@ -113,8 +113,8 @@ class Test_Functionality(unittest.TestCase):
ps1
+=
sp
.
sum
(
spaces
=
1
)
/
fp2
.
sum
()
ps2
+=
sp
.
sum
(
spaces
=
0
)
/
fp1
.
sum
()
assert_allclose
(
to_np
(
ps1
.
val
/
samples
),
to_np
(
fp1
.
val
),
rtol
=
0.2
)
assert_allclose
(
to_np
(
ps2
.
val
/
samples
),
to_np
(
fp2
.
val
),
rtol
=
0.2
)
assert_allclose
(
dobj
.
to_global_data
(
ps1
.
val
/
samples
),
dobj
.
to_global_data
(
fp1
.
val
),
rtol
=
0.2
)
assert_allclose
(
dobj
.
to_global_data
(
ps2
.
val
/
samples
),
dobj
.
to_global_data
(
fp2
.
val
),
rtol
=
0.2
)
def
test_vdot
(
self
):
s
=
ift
.
RGSpace
((
10
,))
...
...
test/test_operators/test_smoothing_operator.py
View file @
10347246
...
...
@@ -58,7 +58,7 @@ class SmoothingOperator_Tests(unittest.TestCase):
op
=
ift
.
FFTSmoothingOperator
(
space
,
sigma
=
sigma
)
fld
=
np
.
zeros
(
space
.
shape
,
dtype
=
np
.
float64
)
fld
[
0
]
=
1.
rand1
=
ift
.
Field
(
space
,
ift
.
dobj
.
from_global_data
(
fld
,
dist
_
axis
=-
1
))
rand1
=
ift
.
Field
(
space
,
ift
.
dobj
.
from_global_data
(
fld
,
distaxis
=-
1
))
tt1
=
op
.
times
(
rand1
)
assert_allclose
(
1
,
tt1
.
sum
())
...
...
test/test_spaces/test_power_space.py
View file @
10347246
...
...
@@ -24,7 +24,7 @@ from numpy.testing import assert_, assert_equal, assert_allclose,\
import
nifty2go
as
ift
from
test.common
import
expand
from
itertools
import
product
,
chain
from
nifty2go
.dobj
import
to_ndarray
as
to_np
,
from_ndarray
as
from_np
from
nifty2go
import
dobj
HARMONIC_SPACES
=
[
ift
.
RGSpace
((
8
,),
harmonic
=
True
),
ift
.
RGSpace
((
7
,
8
),
harmonic
=
True
),
...
...
@@ -53,7 +53,7 @@ CONSTRUCTOR_CONFIGS = [
'dim'
:
5
,
'harmonic_partner'
:
ift
.
RGSpace
((
8
,),
harmonic
=
True
),
'binbounds'
:
None
,
'pindex'
:
from_np
(
np
.
array
([
0
,
1
,
2
,
3
,
4
,
3
,
2
,
1
])),
'pindex'
:
dobj
.
from_global_data
(
np
.
array
([
0
,
1
,
2
,
3
,
4
,
3
,
2
,
1
])),
'k_lengths'
:
np
.
array
([
0.
,
1.
,
2.
,
3.
,
4.
]),
}],
[
ift
.
RGSpace
((
8
,),
harmonic
=
True
),
True
,
None
,
None
,
{
...
...
@@ -62,7 +62,7 @@ CONSTRUCTOR_CONFIGS = [
'dim'
:
4
,
'harmonic_partner'
:
ift
.
RGSpace
((
8
,),
harmonic
=
True
),
'binbounds'
:
(
0.5
,
1.3228756555322954
,
3.5
),
'pindex'
:
from_np
(
np
.
array
([
0
,
1
,
2
,
2
,
3
,
2
,
2
,
1
])),
'pindex'
:
dobj
.
from_global_data
(
np
.
array
([
0
,
1
,
2
,
2
,
3
,
2
,
2
,
1
])),
'k_lengths'
:
np
.
array
([
0.
,
1.
,
2.5
,
4.
]),
}],
]
...
...
@@ -96,7 +96,7 @@ class PowerSpaceConsistencyCheck(unittest.TestCase):
nbin
)
p
=
ift
.
PowerSpace
(
harmonic_partner
=
harmonic_partner
,
binbounds
=
bb
)
assert_equal
(
np
.
bincount
(
to_np
(
p
.
pindex
.
ravel
())
)
,
p
.
dvol
(),
assert_equal
(
np
.
bincount
(
dobj
.
to_global_data
(
p
.
pindex
)
.
ravel
()),
p
.
dvol
(),
err_msg
=
'rho is not equal to pindex degeneracy'
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment