Line data Source code
1 : ! This file is part of ELPA.
2 : !
3 : ! The ELPA library was originally created by the ELPA consortium,
4 : ! consisting of the following organizations:
5 : !
6 : ! - Max Planck Computing and Data Facility (MPCDF), formerly known as
7 : ! Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
8 : ! - Bergische Universität Wuppertal, Lehrstuhl für angewandte
9 : ! Informatik,
10 : ! - Technische Universität München, Lehrstuhl für Informatik mit
11 : ! Schwerpunkt Wissenschaftliches Rechnen ,
12 : ! - Fritz-Haber-Institut, Berlin, Abt. Theorie,
13 : ! - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
14 : ! Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
15 : ! and
16 : ! - IBM Deutschland GmbH
17 : !
18 : !
19 : ! More information can be found here:
20 : ! http://elpa.mpcdf.mpg.de/
21 : !
22 : ! ELPA is free software: you can redistribute it and/or modify
23 : ! it under the terms of the version 3 of the license of the
24 : ! GNU Lesser General Public License as published by the Free
25 : ! Software Foundation.
26 : !
27 : ! ELPA is distributed in the hope that it will be useful,
28 : ! but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : ! GNU Lesser General Public License for more details.
31 : !
32 : ! You should have received a copy of the GNU Lesser General Public License
33 : ! along with ELPA. If not, see <http://www.gnu.org/licenses/>
34 : !
35 : ! ELPA reflects a substantial effort on the part of the original
36 : ! ELPA consortium, and we ask you to respect the spirit of the
37 : ! license that we chose: i.e., please contribute any changes you
38 : ! may have back to the original ELPA library distribution, and keep
39 : ! any derivatives of ELPA under the same license that we chose for
40 : ! the original distribution, the GNU Lesser General Public License.
41 : !
42 : !
43 : #include "config-f90.h"
44 : !>
45 :
46 96 : program test_transpose_multiply
47 :
48 96 : use elpa1
49 : use elpa_utilities, only : error_unit
50 : use test_util
51 :
52 : use test_read_input_parameters
53 : use test_check_correctness
54 : use test_setup_mpi
55 : use test_blacs_infrastructure
56 : use test_prepare_matrix
57 :
58 : #ifdef HAVE_REDIRECT
59 : use test_redirect
60 : #endif
61 : use test_output_type
62 :
63 : implicit none
64 :
65 : !-------------------------------------------------------------------------------
66 : ! Please set system size parameters below!
67 : ! na: System size
68 : ! nev: Number of eigenvectors to be calculated
69 : ! nblk: Blocking factor in block cyclic distribution
70 : !-------------------------------------------------------------------------------
71 : integer(kind=ik) :: nblk
72 : integer(kind=ik) :: na, nev
73 :
74 : integer(kind=ik) :: np_rows, np_cols, na_rows, na_cols
75 :
76 : integer(kind=ik) :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
77 : integer(kind=ik) :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol
78 :
79 : integer, external :: numroc
80 :
81 96 : real(kind=rk4), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:), ev(:)
82 :
83 : real(kind=rk4) :: norm, normmax
84 : #ifdef WITH_MPI
85 : real(kind=rk4) :: pslange
86 : #else
87 : real(kind=rk4) :: slange
88 : #endif
89 :
90 : integer(kind=ik) :: STATUS
91 : #ifdef WITH_OPENMP
92 : integer(kind=ik) :: omp_get_max_threads, required_mpi_thread_level, &
93 : provided_mpi_thread_level
94 : #endif
95 : type(output_t) :: write_to_file
96 : logical :: success
97 : character(len=8) :: task_suffix
98 : integer(kind=ik) :: j
99 : !-------------------------------------------------------------------------------
100 :
101 96 : success = .true.
102 :
103 96 : call read_input_parameters_traditional(na, nev, nblk, write_to_file)
104 :
105 : !-------------------------------------------------------------------------------
106 : ! MPI Initialization
107 96 : call setup_mpi(myid, nprocs)
108 :
109 96 : STATUS = 0
110 :
111 96 : do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
112 0 : if(mod(nprocs,np_cols) == 0 ) exit
113 : enddo
114 :
115 : ! at the end of the above loop, nprocs is always divisible by np_cols
116 :
117 96 : np_rows = nprocs/np_cols
118 :
119 96 : if(myid==0) then
120 64 : print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
121 64 : print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
122 64 : print *
123 : endif
124 :
125 : !-------------------------------------------------------------------------------
126 : ! Set up BLACS context and MPI communicators
127 : !
128 : ! The BLACS context is only necessary for using Scalapack.
129 : !
130 : ! For ELPA, the MPI communicators along rows/cols are sufficient,
131 : ! and the grid setup may be done in an arbitrary way as long as it is
132 : ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
133 : ! process has a unique (my_prow,my_pcol) pair).
134 :
135 : call set_up_blacsgrid(mpi_comm_world, np_rows, np_cols, 'C', &
136 96 : my_blacs_ctxt, my_prow, my_pcol)
137 :
138 96 : if (myid==0) then
139 64 : print '(a)','| Past BLACS_Gridinfo.'
140 : end if
141 :
142 : ! All ELPA routines need MPI communicators for communicating within
143 : ! rows or columns of processes, these are set in elpa_get_communicators.
144 :
145 : mpierr = elpa_get_communicators(mpi_comm_world, my_prow, my_pcol, &
146 96 : mpi_comm_rows, mpi_comm_cols)
147 :
148 96 : if (myid==0) then
149 64 : print '(a)','| Past split communicator setup for rows and columns.'
150 : end if
151 :
152 : call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
153 96 : na_rows, na_cols, sc_desc, my_blacs_ctxt, info)
154 :
155 96 : if (myid==0) then
156 64 : print '(a)','| Past scalapack descriptor setup.'
157 : end if
158 :
159 : !-------------------------------------------------------------------------------
160 : ! Allocate matrices and set up a test matrix for the eigenvalue problem
161 96 : allocate(a (na_rows,na_cols))
162 96 : allocate(b (na_rows,na_cols))
163 96 : allocate(c (na_rows,na_cols))
164 :
165 96 : allocate(z (na_rows,na_cols))
166 96 : allocate(as(na_rows,na_cols))
167 :
168 96 : allocate(ev(na))
169 :
170 96 : call prepare_matrix_random(na, myid, sc_desc, a, z, as)
171 :
172 96 : b(:,:) = 2.0_rk4 * a(:,:)
173 96 : c(:,:) = 0.0_rk4
174 :
175 : !-------------------------------------------------------------------------------
176 : ! Calculate eigenvalues/eigenvectors
177 :
178 96 : if (myid==0) then
179 64 : print '(a)','| Compute c= a**T * b ... '
180 64 : print *
181 : end if
182 : #ifdef WITH_MPI
183 64 : call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
184 : #endif
185 :
186 : success = elpa_mult_at_b_real_single("F","F", na, na, a, na_rows, na_cols, b, na_rows, &
187 : na_cols, nblk, mpi_comm_rows, mpi_comm_cols, c, &
188 96 : na_rows, na_cols)
189 :
190 96 : if (.not.(success)) then
191 0 : write(error_unit,*) "elpa_mult_at_b_real produced an error! Aborting..."
192 : #ifdef WITH_MPI
193 0 : call MPI_ABORT(mpi_comm_world, 1, mpierr)
194 : #else
195 0 : call exit(1)
196 : #endif
197 : endif
198 :
199 :
200 96 : if (myid==0) then
201 64 : print '(a)','| Solve c = a**T * b complete.'
202 64 : print *
203 : end if
204 :
205 :
206 : !-------------------------------------------------------------------------------
207 : ! Test correctness of result (using plain scalapack routines)
208 96 : allocate(tmp1(na_rows,na_cols))
209 96 : allocate(tmp2(na_rows,na_cols))
210 :
211 96 : tmp1(:,:) = 0.0_rk4
212 :
213 : ! tmp1 = a**T
214 : #ifdef WITH_MPI
215 64 : call pstran(na, na, 1.0_rk4, a, 1, 1, sc_desc, 0.0_rk4, tmp1, 1, 1, sc_desc)
216 : #else
217 32 : tmp1 = transpose(a)
218 : #endif
219 : ! tmp2 = tmp1 * b
220 : #ifdef WITH_MPI
221 : call psgemm("N","N", na, na, na, 1.0_rk4, tmp1, 1, 1, sc_desc, b, 1, 1, &
222 64 : sc_desc, 0.0_rk4, tmp2, 1, 1, sc_desc)
223 : #else
224 32 : call sgemm("N","N", na, na, na, 1.0_rk4, tmp1, na, b, na, 0.0_rk4, tmp2, na)
225 : #endif
226 :
227 : ! compare tmp2 with c
228 96 : tmp2(:,:) = tmp2(:,:) - c(:,:)
229 :
230 : #ifdef WITH_MPI
231 64 : norm = pslange("M", na, na, tmp2, 1, 1, sc_desc, tmp1)
232 : #else
233 32 : norm = slange("M", na, na, tmp2, na_rows, tmp1)
234 : #endif
235 :
236 : #ifdef WITH_MPI
237 64 : call mpi_allreduce(norm,normmax,1,MPI_REAL4,MPI_MAX,MPI_COMM_WORLD,mpierr)
238 : #else
239 32 : normmax = norm
240 : #endif
241 96 : if (myid .eq. 0) then
242 64 : print *," Maximum error of result: ", normmax
243 : endif
244 :
245 96 : if (normmax .gt. 5e-2_rk4) then
246 0 : status = 1
247 : endif
248 :
249 96 : deallocate(a)
250 96 : deallocate(b)
251 96 : deallocate(c)
252 :
253 96 : deallocate(as)
254 :
255 96 : deallocate(z)
256 96 : deallocate(tmp1)
257 96 : deallocate(tmp2)
258 96 : deallocate(ev)
259 :
260 : #ifdef WITH_MPI
261 64 : call blacs_gridexit(my_blacs_ctxt)
262 64 : call mpi_finalize(mpierr)
263 : #endif
264 :
265 96 : call EXIT(STATUS)
266 :
267 :
268 : end
269 :
270 : !-------------------------------------------------------------------------------
|