Commit 69ecbce4 authored by Sebastian Ohlmann's avatar Sebastian Ohlmann

update python example, make it simpler

parent 6f1b2a85
#!/usr/bin/env python
import numpy as np
from pyelpa import ProcessorLayout, DistributedMatrix, Elpa
from mpi4py import MPI
from pyelpa import ProcessorLayout, DistributedMatrix
import sys
# set some parameters for matrix layout
......@@ -9,37 +8,37 @@ na = 1000
nev = 200
nblk = 16
# initialize processor layout, needed for calling ELPA
comm = MPI.COMM_WORLD
layout_p = ProcessorLayout(comm)
# create distributed matrix
a = DistributedMatrix.from_comm_world(na, nev, nblk)
# create arrays
a = DistributedMatrix(layout_p, na, nev, nblk)
eigenvectors = DistributedMatrix(layout_p, na, nev, nblk)
eigenvalues = np.zeros(na, dtype=np.float64)
# initialize elpa
e = Elpa.from_distributed_matrix(a)
# set input matrix (a.data) on this core (a is stored in a block-cyclic
# distributed layout; local size: a.na_rows x a.na_cols)
a.data[:, :] = np.random.rand(a.na_rows, a.na_cols).astype(np.float64)
def set_matrix(a):
for global_row, global_col in a.global_indices():
a.set_data_for_global_index(global_row, global_col,
global_row*global_col)
print("Call ELPA eigenvectors")
sys.stdout.flush()
set_matrix(a)
# now compute nev of na eigenvectors and eigenvalues
e.eigenvectors(a.data, eigenvalues, eigenvectors.data)
data = a.compute_eigenvectors()
eigenvalues = data['eigenvalues']
eigenvectors = data['eigenvectors']
print("Done")
# now eigenvectors.data contains the local part of the eigenvector matrix
# which is stored in a block-cyclic distributed layout
# now eigenvalues contains all computed eigenvalues on all cores
# which is stored in a block-cyclic distributed layout and eigenvalues contains
# all computed eigenvalues on all cores
print("Call ELPA eigenvalues")
sys.stdout.flush()
set_matrix(a)
# now compute nev of na eigenvalues
e.eigenvalues(a.data, eigenvalues)
set_matrix(a)
eigenvalues = a.compute_eigenvalues()
print("Done")
# now eigenvalues contains all computed eigenvalues on all cores
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment