Commit 933824da authored by Carl Poelking's avatar Carl Poelking
Browse files

Segment-based calculations.

parent 167bb7c6
......@@ -187,9 +187,12 @@ class KernelAdaptorSpecificUnique(object):
return IX, IR, types
else:
return IX
def adaptScalar(self, atomic):
def adaptScalar(self, atomic, epsilon=1e-20):
X = reduce_xnklab_atomic(atomic, self.types)
X_norm = X/np.dot(X,X)**0.5
X_mag = np.dot(X,X)**0.5
if X_mag < epsilon:
X_mag = 1.
X_norm = X/X_mag
return X, X_norm
class KernelAdaptorSpecific(object):
......
......@@ -11,6 +11,30 @@ HARTREE_TO_KCALMOL = 627.509469
MP_LOCK = mp.Lock()
def mp_pool_compute_upper_triangle(
kfct,
g_list,
n_procs,
dtype='float64',
mplog=None,
**kwargs):
kfct_primed=fct.partial(kfct, **kwargs)
n_rows = len(g_list)
kmat = np.zeros((n_rows, n_rows), dtype=dtype)
for i in range(n_rows):
if mplog: mplog << mplog.back << "Computing row %d" % i << mplog.endl
g_pair_list = []
gi = g_list[i]
for j in range(i, n_rows):
gj = g_list[j]
g_pair_list.append([gi,gj])
pool = mp.Pool(processes=n_procs)
krow = pool.map(kfct_primed, g_pair_list)
pool.close()
pool.join()
kmat[i,i:] = krow
return kmat
def mp_compute_vector(
kfct,
g_list,
......@@ -65,7 +89,7 @@ def mp_compute_upper_triangle(
g_list,
n_procs,
n_blocks,
log=None,
mplog=None,
tstart_twall=(None,None),
backup=True,
verbose=True,
......@@ -81,7 +105,7 @@ def mp_compute_upper_triangle(
n_blocks: number of column blocks onto which computation is split
kwargs: keyword arguments supplied to kfct
"""
if not verbose: log=None
if not verbose: mplog=None
t_start = tstart_twall[0]
t_wall = tstart_twall[1]
dim = len(g_list)
......@@ -96,7 +120,7 @@ def mp_compute_upper_triangle(
# Column start, column end
c0 = col_div[0]
c1 = col_div[-1]+1
if log: log << "Column block i[%d:%d] j[%d:%d]" % (0, c1, c0, c1) << log.endl
if mplog: mplog << "Column block i[%d:%d] j[%d:%d]" % (0, c1, c0, c1) << mplog.endl
gj_list = g_list[c0:c1]
gi_list = g_list[0:c1]
# Prime kernel function
......@@ -113,7 +137,7 @@ def mp_compute_upper_triangle(
npyfile = 'out.block_i_%d_%d_j_%d_%d.npy' % (0, c1, c0, c1)
# ... but first check for previous calculations of same slice
if backup and npyfile in os.listdir('./'):
if log: log << "Load block from '%s'" % npyfile << log.endl
if mplog: mplog << "Load block from '%s'" % npyfile << mplog.endl
kmat_column_block = np.load(npyfile)
else:
kmat_column_block = pool.map(mp_compute_column_block_primed, gi_list)
......@@ -128,9 +152,9 @@ def mp_compute_upper_triangle(
dt_block = t_out-t_in
if t_start and t_wall:
t_elapsed = t_out-t_start
if log: log << "Time elapsed =" << t_elapsed << " (wall time = %s) (maxmem = %d)" % (t_wall, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) << log.endl
if mplog: mplog << "Time elapsed =" << t_elapsed << " (wall time = %s) (maxmem = %d)" % (t_wall, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) << mplog.endl
if col_div_idx+1 != len(col_div_list) and t_elapsed+dt_block > t_wall-dt_block:
log << "Wall time hit expected for next iteration, break ..." << log.endl
mplog << "Wall time hit expected for next iteration, break ..." << mplog.endl
break
else: pass
return kmat
......
......@@ -47,6 +47,10 @@ void Spectrum::compute() {
this->compute(_structure->particles(), _structure->particles());
}
void Spectrum::compute(Segment *center) {
this->compute(center->particles(), _structure->particles());
}
void Spectrum::compute(Segment *center, Segment *target) {
this->compute(center->particles(), target->particles());
}
......@@ -139,7 +143,7 @@ AtomicSpectrum *Spectrum::computeAtomic(Particle *center, Structure::particle_ar
weight0 *= _basis->getCutoff()->getCenterWeight();
}
GLOG() << (*pit)->getType() << " " << dr.getX() << " " << dr.getY() << " " << dr.getZ() << std::endl;
GLOG() << (*pit)->getType() << " " << dr.getX() << " " << dr.getY() << " " << dr.getZ() << " " << (*pit)->getWeight() << std::endl;
// COMPUTE EXPANSION & ADD TO SPECTRUM
bool gradients = (is_image) ? false : _options->get<bool>("spectrum.gradients");
......@@ -275,6 +279,7 @@ void Spectrum::load(std::string archfile) {
void Spectrum::registerPython() {
using namespace boost::python;
void (Spectrum::*computeAll)() = &Spectrum::compute;
void (Spectrum::*computeSeg)(Segment*) = &Spectrum::compute;
void (Spectrum::*computeSegPair)(Segment*, Segment*) = &Spectrum::compute;
void (Spectrum::*computeCentersTargets)(Structure::particle_array_t&, Structure::particle_array_t&) = &Spectrum::compute;
......@@ -283,6 +288,7 @@ void Spectrum::registerPython() {
.def(init<std::string>())
.def("__iter__", range<return_value_policy<reference_existing_object> >(&Spectrum::beginAtomic, &Spectrum::endAtomic))
.def("compute", computeAll)
.def("compute", computeSeg)
.def("compute", computeSegPair)
.def("compute", computeCentersTargets)
.def("computePower", &Spectrum::computePower)
......
......@@ -46,6 +46,7 @@ public:
void clean();
void compute();
void compute(Segment *centers);
void compute(Segment *centers, Segment *targets);
void compute(Structure::particle_array_t &sources, Structure::particle_array_t &targets);
AtomicSpectrum *computeAtomic(Particle *center);
......
from loadwrite import *
from inverse import *
from extract import *
import partition
......@@ -12,20 +12,36 @@ try:
except ImportError:
print("Note: ase.io import failed. Install PYTHON-ASE to harvest full reader functionality.")
def structures_from_xyz(xyz_file, do_partition=True, add_fragment_com=True):
def structures_from_xyz(xyz_file, **kwargs):
# Read xyz via ASE
ase_configs = ase.io.read(xyz_file, index=':')
return structures_from_ase(
ase_configs=ase_configs,
do_partition=do_partition,
add_fragment_com=add_fragment_com)
**kwargs)
# TODO def structures_from_ase in addition to structure_from_ase
def structures_from_ase(
configs,
return_all=False,
**kwargs):
configs_mod = []
structs = []
return_tuples = []
for config in configs:
return_tuple = \
structure_from_ase(
config,
**kwargs)
configs_mod.append(return_tuple[0])
structs.append(return_tuple[1])
if return_all:
return return_tuples
else:
return configs_mod, structs
def structure_from_ase(
config,
do_partition=True,
add_fragment_com=True,
do_partition=False,
add_fragment_com=False,
use_center_of_geom=False,
log=None):
# NOTE Center of mass is computed without considering PBC => Requires unwrapped coordinates
......@@ -142,6 +158,23 @@ def structure_from_ase(
if log: log << log.endl
return config, structure, top, frag_bond_matrix, atom_bond_matrix, frag_labels, atom_labels
def join_structures_as_segments(structs):
# NOTE Segments of the individual structures will be lost
# NOTE Box will be adopted from first structure in list
label = ':'.join([s.label for s in structs])
struct = soap.Structure(label)
struct.box = structs[0].box
for struct_i in structs:
seg = struct.addSegment()
seg.name = struct_i.label
for p_i in struct_i.particles:
p = struct.addParticle(seg)
p.pos = p_i.pos
p.weight = p_i.weight
p.sigma = p_i.sigma
p.type = p_i.type
return struct
def write_xyz(xyz_file, structure):
ofs = open(xyz_file, 'w')
ofs.write('%d\n%s\n' % (len(list(structure.particles)), structure.label))
......
......@@ -37,6 +37,30 @@ ligand_size_lower = 4
def covalent_cutoff(rad1, rad2):
return 1.15*(rad1+rad2)
def calculate_connectivity_mat(distance_mat, type_vec, cutoff=None):
dim = distance_mat.shape[0]
connectivity_mat = np.zeros((dim,dim), dtype=bool)
if cutoff != None:
for i in range(dim):
for j in range(dim):
if distance_mat[i,j] < constant:
connectivity_mat[i,j] = True
else:
connectivity_mat[i,j] = False
else:
for i in range(dim):
for j in range(dim):
t1 = type_vec[i]
t2 = type_vec[j]
r1 = COVRAD_TABLE[t1]
r2 = COVRAD_TABLE[t2]
r12 = distance_mat[i,j]
if r12 <= covalent_cutoff(r1, r2):
connectivity_mat[i,j] = True
else:
connectivity_mat[i,j] = False
return connectivity_mat
# COVALENCE RADII (from Cambridge Structural Database, table see http://en.wikipedia.org/wiki/Covalent_radius)
COVRAD_TABLE = {}
COVRAD_TABLE['H'] = 0.31
......@@ -50,6 +74,7 @@ COVRAD_TABLE['S'] = 1.05
COVRAD_TABLE['Br'] = 1.20
COVRAD_TABLE['Cl'] = 1.02
COVRAD_TABLE['I'] = 1.39
COVRAD_TABLE['P'] = 1.07
# FORCEFIELD TYPE TABLE
TYPE_TABLE = {\
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment