Line data Source code
1 : ! This file is part of ELPA.
2 : !
3 : ! The ELPA library was originally created by the ELPA consortium,
4 : ! consisting of the following organizations:
5 : !
6 : ! - Max Planck Computing and Data Facility (MPCDF), formerly known as
7 : ! Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
8 : ! - Bergische Universität Wuppertal, Lehrstuhl für angewandte
9 : ! Informatik,
10 : ! - Technische Universität München, Lehrstuhl für Informatik mit
11 : ! Schwerpunkt Wissenschaftliches Rechnen ,
12 : ! - Fritz-Haber-Institut, Berlin, Abt. Theorie,
13 : ! - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
14 : ! Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
15 : ! and
16 : ! - IBM Deutschland GmbH
17 : !
18 : !
19 : ! More information can be found here:
20 : ! http://elpa.mpcdf.mpg.de/
21 : !
22 : ! ELPA is free software: you can redistribute it and/or modify
23 : ! it under the terms of the version 3 of the license of the
24 : ! GNU Lesser General Public License as published by the Free
25 : ! Software Foundation.
26 : !
27 : ! ELPA is distributed in the hope that it will be useful,
28 : ! but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : ! GNU Lesser General Public License for more details.
31 : !
32 : ! You should have received a copy of the GNU Lesser General Public License
33 : ! along with ELPA. If not, see <http://www.gnu.org/licenses/>
34 : !
35 : ! ELPA reflects a substantial effort on the part of the original
36 : ! ELPA consortium, and we ask you to respect the spirit of the
37 : ! license that we chose: i.e., please contribute any changes you
38 : ! may have back to the original ELPA library distribution, and keep
39 : ! any derivatives of ELPA under the same license that we chose for
40 : ! the original distribution, the GNU Lesser General Public License.
41 : !
42 : !
43 : #include "config-f90.h"
44 : !>
45 :
46 192 : program test_invert_trm
47 :
48 192 : use elpa1
49 : use elpa_utilities, only : error_unit
50 : use test_util
51 :
52 : use test_read_input_parameters
53 : use test_check_correctness
54 : use test_setup_mpi
55 : use test_blacs_infrastructure
56 : use test_prepare_matrix
57 :
58 : #ifdef HAVE_REDIRECT
59 : use test_redirect
60 : #endif
61 : use test_output_type
62 :
63 : implicit none
64 :
65 : !-------------------------------------------------------------------------------
66 : ! Please set system size parameters below!
67 : ! na: System size
68 : ! nev: Number of eigenvectors to be calculated
69 : ! nblk: Blocking factor in block cyclic distribution
70 : !-------------------------------------------------------------------------------
71 : integer(kind=ik) :: nblk
72 : integer(kind=ik) :: na, nev
73 :
74 : integer(kind=ik) :: np_rows, np_cols, na_rows, na_cols
75 :
76 : integer(kind=ik) :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
77 : integer(kind=ik) :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol
78 :
79 : integer, external :: numroc
80 :
81 192 : real(kind=rk8), allocatable :: ev(:)
82 576 : complex(kind=ck8), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:)
83 192 : complex(kind=ck8), allocatable :: d(:), e(:), bs(:,:)
84 : complex(kind=rk8) :: diagonalElement, subdiagonalElement
85 : integer(kind=ik) :: loctmp ,rowLocal, colLocal
86 : complex(kind=ck8), parameter :: CZERO = (0._rk8,0._rk8), CONE = (1._rk8,0._rk8)
87 : real(kind=rk8) :: norm, normmax
88 : #ifdef WITH_MPI
89 : real(kind=rk8) :: pzlange
90 : #else
91 : real(kind=rk8) :: zlange
92 : #endif
93 :
94 : complex(kind=ck8), parameter :: pi = (3.141592653589793238462643383279_rk8, 0._rk8)
95 :
96 : integer(kind=ik) :: STATUS
97 : #ifdef WITH_OPENMP
98 : integer(kind=ik) :: omp_get_max_threads, required_mpi_thread_level, &
99 : provided_mpi_thread_level
100 : #endif
101 : type(output_t) :: write_to_file
102 : logical :: success
103 : character(len=8) :: task_suffix
104 : integer(kind=ik) :: j
105 : !-------------------------------------------------------------------------------
106 :
107 192 : success = .true.
108 :
109 192 : call read_input_parameters(na, nev, nblk, write_to_file)
110 :
111 : !-------------------------------------------------------------------------------
112 : ! MPI Initialization
113 192 : call setup_mpi(myid, nprocs)
114 :
115 192 : STATUS = 0
116 :
117 192 : do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
118 0 : if(mod(nprocs,np_cols) == 0 ) exit
119 : enddo
120 :
121 : ! at the end of the above loop, nprocs is always divisible by np_cols
122 :
123 192 : np_rows = nprocs/np_cols
124 :
125 192 : if(myid==0) then
126 128 : print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
127 128 : print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
128 128 : print *
129 : endif
130 :
131 : !-------------------------------------------------------------------------------
132 : ! Set up BLACS context and MPI communicators
133 : !
134 : ! The BLACS context is only necessary for using Scalapack.
135 : !
136 : ! For ELPA, the MPI communicators along rows/cols are sufficient,
137 : ! and the grid setup may be done in an arbitrary way as long as it is
138 : ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
139 : ! process has a unique (my_prow,my_pcol) pair).
140 :
141 : call set_up_blacsgrid(mpi_comm_world, np_rows, np_cols, 'C', &
142 192 : my_blacs_ctxt, my_prow, my_pcol)
143 :
144 192 : if (myid==0) then
145 128 : print '(a)','| Past BLACS_Gridinfo.'
146 : end if
147 :
148 : ! All ELPA routines need MPI communicators for communicating within
149 : ! rows or columns of processes, these are set in elpa_get_communicators.
150 :
151 : mpierr = elpa_get_communicators(mpi_comm_world, my_prow, my_pcol, &
152 192 : mpi_comm_rows, mpi_comm_cols)
153 :
154 192 : if (myid==0) then
155 128 : print '(a)','| Past split communicator setup for rows and columns.'
156 : end if
157 :
158 : call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
159 192 : na_rows, na_cols, sc_desc, my_blacs_ctxt, info)
160 :
161 192 : if (myid==0) then
162 128 : print '(a)','| Past scalapack descriptor setup.'
163 : end if
164 :
165 : !-------------------------------------------------------------------------------
166 : ! Allocate matrices and set up a test matrix for the eigenvalue problem
167 192 : allocate(a (na_rows,na_cols))
168 192 : allocate(b (na_rows,na_cols))
169 192 : allocate(bs(na_rows,na_cols))
170 192 : allocate(c (na_rows,na_cols))
171 :
172 192 : allocate(z (na_rows,na_cols))
173 192 : allocate(as(na_rows,na_cols))
174 :
175 192 : allocate(ev(na))
176 192 : call prepare_matrix_random(na, myid, sc_desc, b, z, bs)
177 192 : bs(:,:) = b(:,:)
178 :
179 192 : a(:,:) = CONE - CONE
180 192 : diagonalElement = (2.546_rk8, 0.0_rk8)
181 28992 : do i = 1, na
182 28800 : if (map_global_array_index_to_local_index(i, i, rowLocal, colLocal, nblk, np_rows, np_cols, my_prow, my_pcol)) then
183 19200 : a(rowLocal,colLocal) = diagonalElement * abs(cos( pi*real(i,kind=rk8)/ real(na+1,kind=rk8) ))
184 : endif
185 : enddo
186 192 : as(:,:) = a(:,:)
187 :
188 : !-------------------------------------------------------------------------------
189 : ! Calculate eigenvalues/eigenvectors
190 :
191 192 : if (myid==0) then
192 128 : print '(a)','| Setting up tridiagonal matrix ... '
193 128 : print *
194 : end if
195 : #ifdef WITH_MPI
196 128 : call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
197 : #endif
198 :
199 192 : success = elpa_cholesky_complex_double(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
200 :
201 192 : if (.not.(success)) then
202 0 : write(error_unit,*) " elpa_cholesky_complex produced an error! Aborting..."
203 : #ifdef WITH_MPI
204 0 : call MPI_ABORT(mpi_comm_world, 1, mpierr)
205 : #else
206 0 : call exit(1)
207 : #endif
208 : endif
209 :
210 192 : as(:,:) = a(:,:)
211 :
212 192 : if (myid==0) then
213 128 : print '(a)','| Setting up tridiagonal matrix complete.'
214 128 : print *
215 : end if
216 :
217 192 : if (myid==0) then
218 128 : print '(a)','| Inverting tridiagonal matrix ... '
219 128 : print *
220 : end if
221 : #ifdef WITH_MPI
222 128 : call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
223 : #endif
224 :
225 192 : success = elpa_invert_trm_complex_double(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
226 :
227 192 : if (.not.(success)) then
228 0 : write(error_unit,*) " elpa_invert_trm_complex produced an error! Aborting..."
229 : #ifdef WITH_MPI
230 0 : call MPI_ABORT(mpi_comm_world, 1, mpierr)
231 : #else
232 0 : call exit(1)
233 : #endif
234 : endif
235 :
236 :
237 192 : if (myid==0) then
238 128 : print '(a)','| Inversion of tridiagonal matrix complete.'
239 128 : print *
240 : end if
241 :
242 : !-------------------------------------------------------------------------------
243 : ! Test correctness of result (using plain scalapack routines)
244 192 : allocate(tmp1(na_rows,na_cols))
245 192 : allocate(tmp2(na_rows,na_cols))
246 :
247 192 : tmp1(:,:) = 0.0_ck8
248 :
249 : ! tmp1 = a * a^-1 ! should be unity matrix
250 : #ifdef WITH_MPI
251 : call pzgemm("N","N", na, na, na, CONE, as, 1, 1, sc_desc, a, 1, 1, &
252 128 : sc_desc, CZERO, tmp1, 1, 1, sc_desc)
253 : #else
254 64 : call zgemm("N","N", na, na, na, CONE, as, na, a, na, CZERO, tmp1, na)
255 : #endif
256 :
257 : ! tmp2 = b * tmp1
258 192 : tmp2(:,:) = 0.0_ck8
259 : #ifdef WITH_MPI
260 : call pzgemm("N","N", na, na, na, CONE, b, 1, 1, sc_desc, tmp1, 1, 1, &
261 128 : sc_desc, CZERO, tmp2, 1, 1, sc_desc)
262 : #else
263 64 : call zgemm("N","N", na, na, na, CONE, b, na, tmp1, na, CZERO, tmp2, na)
264 : #endif
265 :
266 : ! compare tmp2 with c
267 192 : tmp2(:,:) = tmp2(:,:) - bs(:,:)
268 192 : tmp1(:,:) = 0.0_ck8
269 : #ifdef WITH_MPI
270 128 : norm = pzlange("M",na, na, tmp2, 1, 1, sc_desc, tmp1)
271 : #else
272 64 : norm = zlange("M",na, na, tmp2, na_rows, tmp1)
273 : #endif
274 : #ifdef WITH_MPI
275 128 : call mpi_allreduce(norm,normmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
276 : #else
277 64 : normmax = norm
278 : #endif
279 192 : if (myid .eq. 0) then
280 128 : print *," Maximum error of result: ", normmax
281 : endif
282 :
283 192 : if (normmax .gt. 5e-11_rk8) then
284 0 : status = 1
285 : endif
286 :
287 192 : deallocate(a)
288 192 : deallocate(b)
289 192 : deallocate(bs)
290 192 : deallocate(c)
291 :
292 192 : deallocate(as)
293 :
294 192 : deallocate(z)
295 192 : deallocate(tmp1)
296 192 : deallocate(tmp2)
297 192 : deallocate(ev)
298 :
299 :
300 : #ifdef WITH_MPI
301 128 : call blacs_gridexit(my_blacs_ctxt)
302 128 : call mpi_finalize(mpierr)
303 : #endif
304 :
305 192 : call EXIT(STATUS)
306 :
307 :
308 : end
309 :
310 : !-------------------------------------------------------------------------------
|