parallelisation_setup_m.f90 Source File


Contents


Source Code

 module parallelisation_setup_m
    !! Initialises parallelisation framework
    use MPI
    !$ use OMP_LIB   
#ifdef ENABLE_PETSC
    use petscksp
#endif
    use iso_fortran_env 
    use comm_handler_m, only : comm_handler_t
    use grillix_build_info_m, only : git_tag, git_hash, git_date
    use screen_io_m, only : get_stdout
    use error_handling_grillix_m, only: handle_error
    use status_codes_grillix_m, only : GRILLIX_ERR_NAMELIST
    implicit none
    
    logical, protected :: is_rank_info_writer = .false.
    !! Can be used to select rank, which rank performs output to screen.
    !! Only for GRILLIX code part
    !! Will be set to rank 0 in parallelisation_init

    public :: parallelisation_init
    public :: parallelisation_finalize
    public :: set_rank_info_writer
    
contains

     subroutine parallelisation_init(filename, comm_handler)
        !! Initialises MPI-environment and prints some general information
        character(len=*), intent(in) :: filename
        !! Filename where parameters of MPI topology are read from
        type(comm_handler_t), intent(inout) :: comm_handler
        !! Communicators
            
        integer :: nplanes, nspecies
        integer :: io_error
        character(len=256) :: io_errmsg    
            
        integer :: ierr
        logical :: omp_on, omp_dynamic, omp_nested
        integer :: num_threads, omp_chunk_size
        integer(kind=omp_sched_kind) :: itemp
        character(len=10) :: omp_schedule

        namelist / MPI_topology / nplanes, nspecies

#ifdef ENABLE_PETSC
        call PetscInitialize(PETSC_NULL_CHARACTER, ierr)
#else
        call MPI_init(ierr)
#endif
        call set_rank_info_writer(0)
        
        open(unit = 20, file = filename, status = 'old', action = 'read',&
                 iostat = io_error, iomsg = io_errmsg)
        if (io_error /= 0) then
            call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, __LINE__, __FILE__)
        endif
        
        nplanes  = 0
        nspecies = 0
          
        read(20, nml = MPI_topology, iostat = io_error, iomsg = io_errmsg)
        if (io_error /= 0) then
            call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, __LINE__, __FILE__)
        endif  
        
        close(20)
        
        call comm_handler%init(MPI_COMM_WORLD, nplanes, nspecies)
        
        ! get OMP information
        omp_on = .false.
        !$OMP PARALLEL
        !$OMP SINGLE
        omp_on = .true.
        num_threads = omp_get_num_threads()
        omp_dynamic = omp_get_dynamic()
        omp_nested = omp_get_nested()  
        call omp_get_schedule(itemp, omp_chunk_size)
        select case(itemp)
            case(omp_sched_static)
                omp_schedule = 'static'
            case(omp_sched_dynamic)
                omp_schedule = 'dynamic'
            case(omp_sched_guided)
                omp_schedule = 'guided'
            case(omp_sched_auto)
                omp_schedule = 'auto'
            case default
                omp_schedule = 'unknown'                
        end select
        !$OMP END SINGLE
        !$OMP END PARALLEL
            
        ! write our MPI and OMP information
        if (is_rank_info_writer) then
            write(get_stdout(),*)''
            write(get_stdout(),*)'Running GRILLIX'
            write(get_stdout(),*)'git tag: ',git_tag
            write(get_stdout(),*)'git hash: ',git_hash
            write(get_stdout(),*)'git date: ',git_date
            write(get_stdout(),*)''
            write(get_stdout(),*)''
            write(get_stdout(),*)'This file was compiled by ', &
                           compiler_version(), ' using the options ', &
                           compiler_options()             
            write(get_stdout(),*)''
            write(get_stdout(),*)''
            write(get_stdout(),*)'Number of total MPI processes = ',comm_handler%get_ncart()
            write(get_stdout(),*)'Number of planes              = ',comm_handler%get_nplanes()
            write(get_stdout(),*)'Number of species             = ',comm_handler%get_nspecies()
            write(get_stdout(),*)''
            write(get_stdout(),*)''
            if (omp_on) then
                write(get_stdout(),*)'OMP switched on:'
                write(get_stdout(),*)'    Number of OMP-threads   = ',num_threads
                write(get_stdout(),*)'    omp_dynamic             = ',omp_dynamic
                write(get_stdout(),*)'    omp_nested              = ',omp_nested
                write(get_stdout(),*)'    omp_schedule            = ',omp_schedule
                write(get_stdout(),*)'    omp_chunk_size          = ',omp_chunk_size
            else
                write(get_stdout(),*)'OMP switched off'
            endif
            write(get_stdout(),*)''
        endif    

        ! TODO: Information on binding of each processor (affinity)
           
    end subroutine
     
    subroutine parallelisation_finalize()
        !! Finalises MPI-environment
        integer :: ierr            

#ifdef ENABLE_PETSC
        call PetscFinalize(ierr)
#else
        call MPI_finalize(ierr)
#endif

    end subroutine

    subroutine set_rank_info_writer(rank_info_writer)
        !! Sets rank_info_writer as rank that performs info output within GRILLIX
        integer, intent(in) :: rank_info_writer

        integer :: rank, ierr
        call MPI_comm_rank(MPI_COMM_WORLD, rank, ierr)

        if (rank == rank_info_writer) then
            is_rank_info_writer = .true.
        else
            is_rank_info_writer = .false.
        endif
        
    end subroutine

end module