Commit f786135d authored by Stefano de Gironcoli's avatar Stefano de Gironcoli
Browse files

update of the CB_toy_code stuff

parent 5203ef2e
#!/bin/sh
# run from directory where this script is
cd `echo $0 | sed 's/\(.*\)\/.*/\1/'` # extract pathname
EXAMPLE_DIR=`pwd`
# check whether echo has the -e option
if test "`echo -e`" = "-e" ; then ECHO=echo ; else ECHO="echo -e" ; fi
$ECHO
$ECHO "run_all_examples: starting"
BIN_DIR="$EXAMPLE_DIR/../../bin"
BIN_LIST="cb_cg cb_cg_gamma cb_davidson cb_davidson_gamma cb_ppcg cb_ppcg_gamma cb_paro cb_paro_gamma"
BIN_LIST="cb_paro"
for j in $BIN_LIST
do
$ECHO " ************************************************************"
$ECHO " ************************************************************"
$ECHO " executable being tested is $j "
cd $EXAMPLE_DIR
mkdir -p results_$j
for i in ./*in.no_shift
do
$ECHO " running example: $i ... \c"
$BIN_DIR/$j.x < $i > results_$j/$i.out
$ECHO " done! "
done
$ECHO " Done with executable $j - here are differences from reference"
diff results_$j references_$j
done
$ECHO "run_all_examples: done"
......@@ -56,7 +56,7 @@ set_cb_potential.o \
sort.o \
write_bands.o
QEMODS=../../FFTXlib/libqefft.a ../../KS_Solvers/Davidson_RCI/libdavid_rci.a ../../KS_Solvers/DENSE/libdense.a ../../KS_Solvers/Davidson/libdavid.a ../../KS_Solvers/CG/libcg.a ../../KS_Solvers/PPCG/libppcg.a ../../KS_Solvers/ParO/libparo.a ../../LAXlib/libqela.a ../../UtilXlib/libutil.a ../../clib/clib.a
QEMODS=../../FFTXlib/libqefft.a ../../KS_Solvers/Davidson_RCI/libdavid_rci.a ../../KS_Solvers/Davidson/libdavid.a ../../KS_Solvers/CG/libcg.a ../../KS_Solvers/PPCG/libppcg.a ../../KS_Solvers/ParO/libparo.a ../../KS_Solvers/DENSE/libdense.a ../../LAXlib/libqela.a ../../UtilXlib/libutil.a ../../clib/clib.a
TLDEPS=bindir liblapack libblas
......
......@@ -119,7 +119,6 @@ call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
call print_clock('ffts')
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -118,7 +118,6 @@ call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
call print_clock('ffts')
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -9,6 +9,9 @@
#endif
implicit none
!
include 'laxlib.fh'
!
! local variables (used in the call to cegterg )
logical, parameter :: gamma_only = .true. ! Gamma-only "real" version
complex(DP), allocatable :: evc(:,:)
......@@ -104,7 +107,6 @@
call print_clock('ffts')
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -9,6 +9,9 @@
#endif
implicit none
!
include 'laxlib.fh'
!
! local variables (used in the call to cegterg )
logical, parameter :: gamma_only = .false. ! general k-point version
complex(DP), allocatable :: evc(:,:)
......@@ -39,7 +42,8 @@
! 2) the sub-communicator of the band group
! 3) the communicator used across band groups
! 4) whether the distributed diagonalization is performed inside the band group or at the top level
call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
!--------------------------------------------------------------------------------------------------------------!
#endif
......@@ -103,7 +107,6 @@
call print_clock('ffts')
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -10,6 +10,9 @@ program cb_davidson_rci_main
#endif
implicit none
!
include 'laxlib.fh'
!
! local variables (used in the call to cegterg )
logical,parameter :: gamma_only=.false. ! general k-point version.
complex(DP), allocatable :: evc(:,:)
......@@ -86,7 +89,6 @@ program cb_davidson_rci_main
end do
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -162,6 +162,30 @@ SUBROUTINE cb_hs_1psi( lda, n, psi, hpsi, spsi )
END SUBROUTINE cb_hs_1psi
!----------------------------------------------------------------------------
SUBROUTINE cb_hs_psi( lda, n, m, psi, hpsi, spsi )
!----------------------------------------------------------------------------
!
! ... This routine applies the Hamiltonian and the S matrix
! ... to a vector psi and puts the result in hpsi and spsi
! ... Wrapper routine - calls h_psi and s_psi
!
USE cb_module, only : DP
implicit none
! input variables
INTEGER, INTENT(IN) :: lda, n, m
complex(DP), INTENT(IN) :: psi(lda,m)
complex(DP), INTENT(OUT):: hpsi(lda,m)
complex(DP), INTENT(OUT):: spsi(lda,m)
! local variables
call start_clock('hs_psi')
call cb_h_psi_(lda,n,m,psi,hpsi)
call cb_s_psi_(lda,n,m,psi,spsi)
call stop_clock('hs_psi')
END SUBROUTINE cb_hs_psi
SUBROUTINE cb_h_1psi( lda, n, psi, hpsi )
......
......@@ -121,7 +121,6 @@ PROGRAM cb_paro_gamma_main
call print_clock('ffts')
#if defined(__MPI)
CALL unset_mpi_comm_4_solvers
CALL mp_global_end( )
#endif
......
......@@ -10,7 +10,7 @@ PROGRAM cb_paro_gamma_main
IMPLICIT NONE
! local variables (used in the call to paro )
LOGICAL, PARAMETER :: gamma_only=.TRUE. ! general k-point version
LOGICAL, PARAMETER :: gamma_only=.TRUE. ! Gamma-point version
COMPLEX(DP), ALLOCATABLE :: evc(:,:)
REAL(dp), ALLOCATABLE :: eig(:)
INTEGER, PARAMETER :: npol=1
......@@ -30,10 +30,11 @@ PROGRAM cb_paro_gamma_main
! band group or at the parallelization level above.
#endif
!------------------------------------------------------------------------
EXTERNAL cb_h_psi, cb_s_psi, cb_hs_1psi, cb_g_1psi
EXTERNAL cb_h_psi, cb_s_psi, cb_hs_psi, cb_g_1psi
! subroutine cb_h_psi(npwx,npw,nvec,evc,hpsi) computes H*evc using band parallelization
! subroutine cb_s_psi(npwx,npw,nvec,evc,spsi) computes S*evc using band parallelization
! subroutine cb_hs_psi(npwx,npw,evc,hpsi,spsi) computes H*evc and S*evc for a single band
! subroutine cb_hs_psi(npwx,npw,nvec,evc,hpsi,spsi) computes H*evc and S*evc using band parallelization
! subroutine cb_g_1psi(npwx,npw,psi,eig) computes G*psi -> psi for a single band
!
! ... local variables
!
......@@ -75,7 +76,7 @@ PROGRAM cb_paro_gamma_main
CALL start_clock('paro')
!--- THIS IS THE RELEVANT CALL TO THE ROUTINE IN KS_Solvers/ParO --------------------------------------!
CALL paro_gamma_new(cb_h_psi, cb_s_psi, cb_hs_1psi, cb_g_1psi, overlap, &
CALL paro_gamma_new(cb_h_psi, cb_s_psi, cb_hs_psi, cb_g_1psi, overlap, &
npwx, npw, nbnd, evc, eig, btype, ethr, notconv, nhpsi)
avg_iter = avg_iter + nhpsi/float(nbnd)
......@@ -105,7 +106,6 @@ PROGRAM cb_paro_gamma_main
call print_clock('ffts')
#if defined(__MPI)
CALL unset_mpi_comm_4_solvers
CALL mp_global_end( )
#endif
......
......@@ -114,7 +114,6 @@ PROGRAM cb_paro_main
call print_clock('ffts')
#if defined(__MPI)
CALL unset_mpi_comm_4_solvers
CALL mp_global_end( )
#endif
......
......@@ -30,10 +30,11 @@ PROGRAM cb_paro_main
! band group or at the parallelization level above.
#endif
!------------------------------------------------------------------------
EXTERNAL cb_h_psi, cb_s_psi, cb_hs_1psi, cb_g_1psi
EXTERNAL cb_h_psi, cb_s_psi, cb_hs_psi, cb_g_1psi
! subroutine cb_h_psi(npwx,npw,nvec,evc,hpsi) computes H*evc using band parallelization
! subroutine cb_s_psi(npwx,npw,nvec,evc,spsi) computes S*evc using band parallelization
! subroutine cb_hs_psi(npwx,npw,evc,hpsi,spsi) computes H*evc and S*evc for a single band
! subroutine cb_hs_psi(npwx,npw,nvec,evc,hpsi,spsi) computes H*evc and S*evc using band parallelization
! subroutine cb_g_1psi(npwx,npw,psi,eig) computes G*psi -> psi for a single band
!
! ... local variables
!
......@@ -75,7 +76,7 @@ PROGRAM cb_paro_main
CALL start_clock('paro')
!--- THIS IS THE RELEVANT CALL TO THE ROUTINE IN KS_Solvers/ParO --------------------------------------!
CALL paro_k_new(cb_h_psi, cb_s_psi, cb_hs_1psi, cb_g_1psi, overlap, &
CALL paro_k_new(cb_h_psi, cb_s_psi, cb_hs_psi, cb_g_1psi, overlap, &
npwx, npw, nbnd, npol, evc, eig, btype, ethr, notconv, nhpsi)
avg_iter = avg_iter + nhpsi/float(nbnd)
......@@ -106,7 +107,6 @@ PROGRAM cb_paro_main
call print_clock('ffts')
#if defined(__MPI)
CALL unset_mpi_comm_4_solvers
CALL mp_global_end( )
#endif
......
......@@ -127,7 +127,6 @@ call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
call print_clock('ffts')
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -128,7 +128,6 @@ call set_mpi_comm_4_solvers( world_comm, intra_bgrp_comm, inter_bgrp_comm )
#if defined(__MPI)
call unset_mpi_comm_4_solvers
call mp_global_end( )
#endif
......
......@@ -16,9 +16,11 @@ MODULE mp_global
!
USE mp_world, ONLY: mp_world_start, mp_world_end, nproc
USE mp_bands
USE mp_diag
!
IMPLICIT NONE
!
include 'laxlib.fh'
!
SAVE
!
CONTAINS
......@@ -71,7 +73,8 @@ CONTAINS
ortho_parent_comm = my_comm
END IF
CALL mp_start_diag ( ndiag_, my_comm, ortho_parent_comm, do_distr_diag_inside_bgrp )
CALL laxlib_start ( ndiag_, my_comm, ortho_parent_comm, &
do_distr_diag_inside_bgrp_ = do_distr_diag_inside_bgrp )
ndiag = ndiag_ ! copy input value to output value for latr use
!
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment