Scheduled maintenance on Monday 2019-06-24 between 10:00-11:00 CEST

Commit 42cc403d authored by Jiaxin Wang's avatar Jiaxin Wang

fix dockerfile, modified ensemble likelihood, add readme to likelihood dir

parent 6f9fdd35
FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -y build-essential python python-pip python-dev git gfortran autoconf gsl-bin libgsl-dev wget unzip
RUN apt-get install -y build-essential python python-pip python-dev git gfortran autoconf gsl-bin libgsl-dev wget unzip vim
RUN pip install numpy scipy cython astropy ipython==5.3.0
......@@ -15,17 +15,17 @@ RUN ./configure --prefix=/usr/local/ && make && make install
WORKDIR ..
#FFTW
RUN wget http://www.fftw.org/fftw-3.3.5.tar.gz && tar xzf fftw-3.3.5.tar.gz
WORKDIR fftw-3.3.5
RUN wget http://www.fftw.org/fftw-3.3.8.tar.gz && tar xzf fftw-3.3.8.tar.gz
WORKDIR fftw-3.3.8
RUN ./configure --enable-threads --enable-openmp --enable-shared --prefix=/usr/local/ && make && make install
WORKDIR ..
#GSL
RUN wget http://nl.mirror.babylon.network/gnu/gsl/gsl-2.3.tar.gz && tar xzf gsl-2.3.tar.gz
WORKDIR gsl-2.3
RUN ./configure --enable-shared --prefix=/usr/local/ && make && make install
WORKDIR ..
ENV LD_LIBRARY_PATH=/usr/local/lib
#RUN wget http://nl.mirror.babylon.network/gnu/gsl/gsl-2.3.tar.gz && tar xzf gsl-2.3.tar.gz
#WORKDIR gsl-2.3
#RUN ./configure --enable-shared --prefix=/usr/local/ && make && make install
#WORKDIR ..
#ENV LD_LIBRARY_PATH=/usr/local/lib
#HEALPIX
RUN wget http://downloads.sourceforge.net/project/healpix/Healpix_3.31/Healpix_3.31_2016Aug26.tar.gz && tar xzf Healpix_3.31_2016Aug26.tar.gz
......@@ -37,9 +37,8 @@ RUN echo '4\n\
y\n\
0\n'\
> hlpx_config
RUN ./configure < healpy_config && make
RUN ./configure -L < hlpx_config && make
WORKDIR ..
#RUN [ -r /root/.healpix/3_31_Linux/config ] && . /root/.healpix/3_31_Linux/config
ENV HEALPIX_TARGET optimized_gcc
ENV HEALPIX /home/Downloads/Healpix_3.31
......
## likelihood module
### likelihood
provides enssential functions related to calculating likelihood
### simple likelihood
provides the trivial likelihood calculation
### ensemble likelihood
provides ensemble likelihood calculation
......@@ -41,11 +41,9 @@ class EnsembleLikelihood(Likelihood):
def _process_simple_field(self, observable, measured_data,
data_covariance):
# https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula#Generalization
# B = A^{-1} + U U^dagger
# A = data_covariance
# B^{-1} c = (A_inv -
# A_inv U (I_k + U^dagger A_inv U)^{-1} U^dagger A_inv) c
# Theo's scheme of inversing matrices now modifined by Jiaxin
# use numpy.linalg.solve(A,b) to get inv(A)*b
# thus safely avoid any possible difficulties brought by inversing matrices
data_covariance = data_covariance.copy()
k = observable.shape[0]
n = observable.shape[1]
......@@ -54,82 +52,33 @@ class EnsembleLikelihood(Likelihood):
obs_mean = observable.ensemble_mean().val.get_full_data()
U = obs_val - obs_mean
U *= np.sqrt(n)
# compute quantities for OAS estimator
mu = np.vdot(U, U)/k/n
alpha = (np.einsum(U, [0, 1], U, [2, 1])**2).sum()
alpha /= k**2
numerator = (1 - 2./n)*alpha + (mu*n)**2
denominator = (k + 1 - 2./n) * (alpha - ((mu*n)**2)/n)
# OAS calculation modifined by Jiaxin
# emperical S
S = np.vdot(U,U)/k
# trace of S
TrS = np.trace(S)
# trace of S^2
TrS2 = np.trace(np.dot(S,S))
numerator = (1.-2./n)*TrS2 + TrS**2
denominator = (k+1.-2./n)*(TrS2-(TrS**2)/n)
if denominator == 0:
rho = 1
else:
rho = np.min([1, numerator/denominator])
self.logger.debug("rho: %f = %f / %f" % (rho, numerator, denominator))
# rescale U half/half
V = U * np.sqrt(1-rho) / np.sqrt(k)
self.logger.info(('data_cov', np.mean(data_covariance),
'rho*mu', rho*mu,
'rho', rho,
'mu', mu,
'alpha', alpha))
B = data_covariance + rho*mu
V_B = V/B
# build middle-matrix (kxk)
middle = (np.eye(k) +
np.einsum(V.conjugate(), [0, 1],
V_B, [2, 1]))
middle = np.linalg.inv(middle)
c = measured_data - obs_mean
# total covariance
AC = data_covariance + (1-rho)*S + np.eye(n)*rho*TrS/n
# obs - mean(sim)
dc = measured_data - obs_mean
# If the data was incomplete, i.e. contains np.NANs, set those values
# to zero.
c = np.nan_to_num(c)
# assuming that A == A^dagger, this can be shortend
# a_c = A.inverse_times(c)
# u_a_c = a_c.dot(U, spaces=1)
# u_a_c = u_a_c.conjugate()
# and: double conjugate shouldn't make a difference
# u_a_c = c.conjugate().dot(a_u, spaces=1).conjugate()
# Pure NIFTy is
# u_a_c = c.dot(a_u, spaces=1)
# u_a_c_val = u_a_c.val.get_full_data()
V_B_c = np.einsum(c, [1], V_B, [0, 1])
first_summand_val = c/B
second_summand_val = np.einsum(middle, [0, 1], V_B_c, [1])
second_summand_val = np.einsum(V_B, [0, 1],
second_summand_val, [0])
# # second_summand_val *= -1
# second_summand = first_summand.copy_empty()
# second_summand.val = second_summand_val
result_1 = np.vdot(c, first_summand_val)
result_2 = -np.vdot(c, second_summand_val)
# compute regularizing determinant of the covariance
# det(A + UV^T) = det(A) det(I + V^T A^-1 U)
if self.use_determinant:
log_det = np.sum(np.log(data_covariance +
np.sum((obs_val-obs_mean)**2, axis=0)/k))/n
else:
log_det = 0.
result = -0.5*(result_1 + result_2 + log_det)
self.logger.info("Calculated (%s): -1/2(%g + %g + %g) = %g" %
(self.observable_name,
result_1, result_2, log_det, result))
# result_array[i] = result
# total_result = result_array.mean()
dc = np.nan_to_num(dc)
# calculate likelihood
(sign_AC, logdet_AC) = np.linalg.slogdet(AC*2.*np.pi)
result = -0.5*(dc*np.linalg.solve(AC,dc)) - 0.5*sing_AC*logdet_AC
return result
......@@ -37,7 +37,9 @@ class SimpleLikelihood(Likelihood):
diff = data - obs_mean
if self.data_covariance is not None:
right = diff/self.data_covariance
# modified by Jiaxin
# it is not a good practice to directly inverse matrices
right = np.linalg.solve (self.data_covariance,diff)
else:
right = diff
return -0.5 * np.vdot(diff, right)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment