LCOV - code coverage report
Current view: top level - test/Fortran/elpa1/legacy_interface - legacy_complex_cholesky.F90 (source / functions) Hit Total Coverage
Test: coverage_50ab7a7628bba174fc62cee3ab72b26e81f87fe5.info Lines: 70 75 93.3 %
Date: 2018-01-10 09:29:53 Functions: 2 2 100.0 %

          Line data    Source code
       1             : !    This file is part of ELPA.
       2             : !
       3             : !    The ELPA library was originally created by the ELPA consortium,
       4             : !    consisting of the following organizations:
       5             : !
       6             : !    - Max Planck Computing and Data Facility (MPCDF), formerly known as
       7             : !      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
       8             : !    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
       9             : !      Informatik,
      10             : !    - Technische Universität München, Lehrstuhl für Informatik mit
      11             : !      Schwerpunkt Wissenschaftliches Rechnen ,
      12             : !    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
      13             : !    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
      14             : !      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
      15             : !      and
      16             : !    - IBM Deutschland GmbH
      17             : !
      18             : !
      19             : !    More information can be found here:
      20             : !    http://elpa.mpcdf.mpg.de/
      21             : !
      22             : !    ELPA is free software: you can redistribute it and/or modify
      23             : !    it under the terms of the version 3 of the license of the
      24             : !    GNU Lesser General Public License as published by the Free
      25             : !    Software Foundation.
      26             : !
      27             : !    ELPA is distributed in the hope that it will be useful,
      28             : !    but WITHOUT ANY WARRANTY; without even the implied warranty of
      29             : !    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      30             : !    GNU Lesser General Public License for more details.
      31             : !
      32             : !    You should have received a copy of the GNU Lesser General Public License
      33             : !    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
      34             : !
      35             : !    ELPA reflects a substantial effort on the part of the original
      36             : !    ELPA consortium, and we ask you to respect the spirit of the
      37             : !    license that we chose: i.e., please contribute any changes you
      38             : !    may have back to the original ELPA library distribution, and keep
      39             : !    any derivatives of ELPA under the same license that we chose for
      40             : !    the original distribution, the GNU Lesser General Public License.
      41             : !
      42             : !
      43             : #include "config-f90.h"
      44             : !>
      45             : 
      46         192 : program test_cholesky
      47             : 
      48         192 :    use elpa1
      49             :    use elpa_utilities, only : error_unit
      50             :    use test_util
      51             : 
      52             :    use test_read_input_parameters
      53             :    use test_check_correctness
      54             :    use test_setup_mpi
      55             :    use test_blacs_infrastructure
      56             :    use test_prepare_matrix
      57             : 
      58             : #ifdef HAVE_REDIRECT
      59             :    use test_redirect
      60             : #endif
      61             :   use test_output_type
      62             : 
      63             :    implicit none
      64             : 
      65             :    !-------------------------------------------------------------------------------
      66             :    ! Please set system size parameters below!
      67             :    ! na:   System size
      68             :    ! nev:  Number of eigenvectors to be calculated
      69             :    ! nblk: Blocking factor in block cyclic distribution
      70             :    !-------------------------------------------------------------------------------
      71             :    integer(kind=ik)           :: nblk
      72             :    integer(kind=ik)           :: na, nev
      73             : 
      74             :    integer(kind=ik)           :: np_rows, np_cols, na_rows, na_cols
      75             : 
      76             :    integer(kind=ik)           :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
      77             :    integer(kind=ik)           :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol
      78             : 
      79             :    integer, external          :: numroc
      80             : 
      81         192 :    real(kind=rk8), allocatable    :: ev(:)
      82         384 :    complex(kind=ck8), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:)
      83             :    complex(kind=ck8), allocatable :: d(:), e(:)
      84             :    complex(kind=rk8)              :: diagonalElement, subdiagonalElement
      85             :    integer(kind=ik)           :: loctmp ,rowLocal, colLocal
      86             :    complex(kind=ck8), parameter   :: CZERO = (0.0_rk8,0.0_rk8), CONE = (1.0_rk8,0.0_rk8)
      87             :    real(kind=rk8)              :: norm, normmax
      88             : #ifdef WITH_MPI
      89             :    real(kind=rk8)              :: pzlange
      90             : #else
      91             :    real(kind=rk8)              :: zlange
      92             : #endif
      93             : 
      94             :    complex(kind=ck8), parameter   :: pi = (3.141592653589793238462643383279_rk8, 0.0_rk8)
      95             : 
      96             :    integer(kind=ik)           :: STATUS
      97             : #ifdef WITH_OPENMP
      98             :    integer(kind=ik)           :: omp_get_max_threads,  required_mpi_thread_level, &
      99             :                                  provided_mpi_thread_level
     100             : #endif
     101             :    type(output_t)             :: write_to_file
     102             :    logical                    :: success
     103             :    character(len=8)           :: task_suffix
     104             :    integer(kind=ik)           :: j
     105             :    !-------------------------------------------------------------------------------
     106             : 
     107         192 :    success = .true.
     108             : 
     109         192 :    call read_input_parameters(na, nev, nblk, write_to_file)
     110             : 
     111             :    !-------------------------------------------------------------------------------
     112             :    !  MPI Initialization
     113         192 :    call setup_mpi(myid, nprocs)
     114             : 
     115         192 :    STATUS = 0
     116             : 
     117         192 :    do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
     118           0 :       if(mod(nprocs,np_cols) == 0 ) exit
     119             :    enddo
     120             : 
     121             :    ! at the end of the above loop, nprocs is always divisible by np_cols
     122             : 
     123         192 :    np_rows = nprocs/np_cols
     124             : 
     125         192 :    if(myid==0) then
     126         128 :       print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
     127         128 :       print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
     128         128 :       print *
     129             :    endif
     130             : 
     131             :    !-------------------------------------------------------------------------------
     132             :    ! Set up BLACS context and MPI communicators
     133             :    !
     134             :    ! The BLACS context is only necessary for using Scalapack.
     135             :    !
     136             :    ! For ELPA, the MPI communicators along rows/cols are sufficient,
     137             :    ! and the grid setup may be done in an arbitrary way as long as it is
     138             :    ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
     139             :    ! process has a unique (my_prow,my_pcol) pair).
     140             : 
     141             :    call set_up_blacsgrid(mpi_comm_world, np_rows, np_cols, 'C', &
     142         192 :                          my_blacs_ctxt, my_prow, my_pcol)
     143             : 
     144         192 :    if (myid==0) then
     145         128 :      print '(a)','| Past BLACS_Gridinfo.'
     146             :    end if
     147             : 
     148             :    ! All ELPA routines need MPI communicators for communicating within
     149             :    ! rows or columns of processes, these are set in elpa_get_communicators.
     150             : 
     151             :    mpierr = elpa_get_communicators(mpi_comm_world, my_prow, my_pcol, &
     152         192 :                                    mpi_comm_rows, mpi_comm_cols)
     153             : 
     154         192 :    if (myid==0) then
     155         128 :      print '(a)','| Past split communicator setup for rows and columns.'
     156             :    end if
     157             : 
     158             :    call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
     159         192 :                                 na_rows, na_cols, sc_desc, my_blacs_ctxt, info)
     160             : 
     161         192 :    if (myid==0) then
     162         128 :      print '(a)','| Past scalapack descriptor setup.'
     163             :    end if
     164             : 
     165             :    !-------------------------------------------------------------------------------
     166             :    ! Allocate matrices and set up a test matrix for the eigenvalue problem
     167         192 :    allocate(a (na_rows,na_cols))
     168         192 :    allocate(b (na_rows,na_cols))
     169         192 :    allocate(c (na_rows,na_cols))
     170             : 
     171         192 :    allocate(z (na_rows,na_cols))
     172         192 :    allocate(as(na_rows,na_cols))
     173             : 
     174         192 :    allocate(ev(na))
     175             : !   call prepare_matrix_random(na, myid, sc_desc, a, z, as)
     176             : !   b(:,:) = 2.0 * a(:,:)
     177             : !   c(:,:) = 0.0
     178             : 
     179         192 :     a(:,:) = CONE - CONE
     180         192 :     diagonalElement = (2.546_rk8, 0.0_rk8)
     181       28992 :     do i = 1, na
     182       28800 :       if (map_global_array_index_to_local_index(i, i, rowLocal, colLocal, nblk, np_rows, np_cols, my_prow, my_pcol)) then
     183       19200 :         a(rowLocal,colLocal) = diagonalElement * abs(cos( pi*real(i,kind=rk8)/ real(na+1,kind=rk8) ))
     184             :       endif
     185             :     enddo
     186         192 :     as(:,:) = a(:,:)
     187             : 
     188             :    !-------------------------------------------------------------------------------
     189             :    ! Calculate eigenvalues/eigenvectors
     190             : 
     191         192 :    if (myid==0) then
     192         128 :      print '(a)','| Compute cholesky decomposition ... '
     193         128 :      print *
     194             :    end if
     195             : #ifdef WITH_MPI
     196         128 :    call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
     197             : #endif
     198             : 
     199         192 :    success = elpa_cholesky_complex_double(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
     200             : 
     201         192 :    if (.not.(success)) then
     202           0 :       write(error_unit,*) " elpa_cholesky_complex produced an error! Aborting..."
     203             : #ifdef WITH_MPI
     204           0 :       call MPI_ABORT(mpi_comm_world, 1, mpierr)
     205             : #else
     206           0 :       call exit(1)
     207             : #endif
     208             :    endif
     209             : 
     210             : 
     211         192 :    if (myid==0) then
     212         128 :      print '(a)','| Solve cholesky decomposition complete.'
     213         128 :      print *
     214             :    end if
     215             : 
     216             : 
     217             :    !-------------------------------------------------------------------------------
     218             :    ! Test correctness of result (using plain scalapack routines)
     219         192 :    allocate(tmp1(na_rows,na_cols))
     220         192 :    allocate(tmp2(na_rows,na_cols))
     221             : 
     222         192 :    tmp1(:,:) = 0.0_ck8
     223             : 
     224             :    ! tmp1 = a**H
     225             : #ifdef WITH_MPI
     226         128 :    call pztranc(na, na, CONE, a, 1, 1, sc_desc, CZERO, tmp1, 1, 1, sc_desc)
     227             : #else
     228          64 :    tmp1 = transpose(conjg(a))
     229             : #endif
     230             :    ! tmp2 = a * a**H
     231             : #ifdef WITH_MPI
     232             :    call pzgemm("N","N", na, na, na, CONE, a, 1, 1, sc_desc, tmp1, 1, 1, &
     233         128 :                sc_desc, CZERO, tmp2, 1, 1, sc_desc)
     234             : #else
     235          64 :    call zgemm("N","N", na, na, na, CONE, a, na, tmp1, na, CZERO, tmp2, na)
     236             : #endif
     237             : 
     238             :    ! compare tmp2 with c
     239         192 :    tmp2(:,:) = tmp2(:,:) - as(:,:)
     240             : 
     241             : #ifdef WITH_MPI
     242         128 :    norm = pzlange("M",na, na, tmp2, 1, 1, sc_desc, tmp1)
     243             : #else
     244          64 :    norm = zlange("M",na, na, tmp2, na_rows, tmp1)
     245             : #endif
     246             : #ifdef WITH_MPI
     247         128 :    call mpi_allreduce(norm,normmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
     248             : #else
     249          64 :    normmax = norm
     250             : #endif
     251         192 :    if (myid .eq. 0) then
     252         128 :      print *," Maximum error of result: ", normmax
     253             :    endif
     254             : 
     255         192 :    if (normmax .gt. 5e-11_rk8) then
     256           0 :         status = 1
     257             :    endif
     258             : 
     259         192 :    deallocate(a)
     260         192 :    deallocate(b)
     261         192 :    deallocate(c)
     262             : 
     263         192 :    deallocate(as)
     264             : 
     265         192 :    deallocate(z)
     266         192 :    deallocate(tmp1)
     267         192 :    deallocate(tmp2)
     268         192 :    deallocate(ev)
     269             : 
     270             : 
     271             : #ifdef WITH_MPI
     272         128 :    call blacs_gridexit(my_blacs_ctxt)
     273         128 :    call mpi_finalize(mpierr)
     274             : #endif
     275             : 
     276         192 :    call EXIT(STATUS)
     277             : 
     278             : 
     279             : end
     280             : 
     281             : !-------------------------------------------------------------------------------

Generated by: LCOV version 1.12