communication_planes_m.f90 Source File


Contents


Source Code

module communication_planes_m
    !! Communication among planes 
    !! Non-blocking communication allowing overlap of 
    !! communication and computation
    use MPI
    use precision_grillix_m, only : GP, MPI_GP
    implicit none
    
    public :: start_comm_planes
    public :: finalize_comm_planes
    
contains
    
    subroutine start_comm_planes(comm, ndim, ndim_halo, u, shift, &
                                      uhalo, request)
        !! Starts communication among two  planes
        integer, intent(in) :: comm
        !! MPI communicator
        integer, intent(in) :: ndim
        !! Dimension of data to be sent
        integer, intent(in) :: ndim_halo
        !! Dimension of data to be received
        real(GP), dimension(ndim), intent(in) :: u
        !! Data to be sent
        integer, intent(in) :: shift
        !! Shift to current rank, where data is received from 
        !! (rank+shift, assumung periodicity)
        real(GP), dimension(ndim_halo), intent(out) :: uhalo
        !! Halo values (Available after call to finalize_planes_comm)
        integer, dimension(2), intent(out) :: request
        !! Communicator request

        integer :: rank, nprocs, ierr, source, dest, tag

        ! Communication pattern
        call MPI_comm_rank(comm, rank, ierr)
        call MPI_comm_size(comm, nprocs, ierr)

        source = rank + shift
        source = modulo(source, nprocs)

        dest = rank - shift
        dest = modulo(dest, nprocs)

        tag = 335
        
        ! Non-blocking send and receive
        call MPI_Isend(u, ndim, MPI_GP, dest, tag, comm, request(1), ierr)
        call MPI_Irecv(uhalo, ndim_halo, MPI_GP, source, tag, comm, &
                       request(2), ierr)

    end subroutine

    subroutine finalize_comm_planes(request)
        !! Finalises communication
        integer, intent(in), dimension(2) :: request
        !! Communicator request

        integer :: ierr, request1, request2

        request1 = request(1)
        request2 = request(2)
        
        call MPI_wait(request1, MPI_STATUS_IGNORE, ierr)
        call MPI_wait(request2, MPI_STATUS_IGNORE, ierr)          

    end subroutine
    
end module