timestep_diffusion_m.f90 Source File


Contents


Source Code

module timestep_diffusion_m
    !! Implementation of diffusion model
    use MPI
    use comm_handler_m, only : comm_handler_t
    use precision_grillix_m, only : GP, GP_LARGEST
    use parallel_map_m, only : parallel_map_t
    use equilibrium_storage_m, only : equilibrium_storage_t
    use penalisation_m, only : penalisation_t
    use polars_m, only : polars_t
    use variable_m, only: variable_t
    use multistep_m, only : karniadakis_t
    use solver_aligned3d_m, only : solve_aligned3d
    use mms_diffusion_m, only : mms_sol_diffusion, mms_source_diffusion
    
    ! From PARALLAX  
    use perf_m, only : perf_start, perf_stop
    use error_handling_grillix_m, only: handle_error, error_info_t
    use status_codes_grillix_m, only : GRILLIX_ERR_OTHER
    use screen_io_m, only :  get_stdout
    use descriptors_m, only : BND_TYPE_DIRICHLET_ZERO, BND_TYPE_NEUMANN
    use equilibrium_m, only : equilibrium_t
    use mesh_cart_m, only : mesh_cart_t 
    use boundaries_perp_m, only : set_perpbnds, extrapolate_ghost_points
    use helmholtz_solver_m, only : helmholtz_solver_t
    
    ! Parameters
    use params_diffusion_m, only : mms_on, &
        diffcoeff_perp, diffcoeff_par, &
        tstep_implicit_level, dtau, &
        bnd_descr_core, bnd_descr_wall, bnd_descr_dome, bnd_descr_out, &
        bnd_descr_par, bnd_val_par, bnd_par_odd, epsinv, &
        parsolver_select, parsolver_params
    
    implicit none

    public :: timestep_diffusion

    integer, private, save :: cnt = 0
    ! function call counter

    type(variable_t), private :: dvar_dpar
    ! Auxiliary variables for parallel gradient (defined on staggered mesh)
    
    real(GP), allocatable, dimension(:), private, save :: coperp
    ! Perpendicular diffusion coefficient times cylindrical Jacobian

contains

    subroutine timestep_diffusion(comm_handler, equi, &
                                  mesh_cano, mesh_stag, &
                                  equi_on_cano, equi_on_stag, &
                                  hsolver_cano, hsolver_stag, &
                                  map, &
                                  penalisation_cano, penalisation_stag, &
                                  polars_cano, polars_stag, &
                                  tstepkrk, &
                                  bndperp_types, bndperp_vals, &
                                  tau, var, tinfo, rinfo)
        !! Evolves variable a single time-step according to 
        !! anisotropic diffusion model
        type(comm_handler_t), intent(in) :: comm_handler
        !! Communicators
        class(equilibrium_t), intent(inout) :: equi
        !! Equilibrium
        type(mesh_cart_t), intent(in) :: mesh_cano
        !! Mesh (canonical)
        type(mesh_cart_t), intent(in) :: mesh_stag
        !! Mesh (staggered)
        type(equilibrium_storage_t), intent(in):: equi_on_cano
        !! Equilibrim quantities on canonical mesh
        type(equilibrium_storage_t), intent(in) :: equi_on_stag
        !! Equilibrim quantities on staggered mesh
        class(helmholtz_solver_t), intent(inout) :: hsolver_cano
        !! Elliptic (2D) solver on canonical mesh
        class(helmholtz_solver_t), intent(inout) :: hsolver_stag
        !! Elliptic (2D) solver on staggered mesh
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(penalisation_t), intent(in) :: penalisation_cano
        !! Penalisation (canonical)
        type(penalisation_t), intent(in) :: penalisation_stag
        !! Penalisation (staggerd)
        type(polars_t), intent(in) :: polars_cano
        !! Polar grid and operators (canonical)
        type(polars_t), intent(in) :: polars_stag
        !! Polar grid and operators (staggered)
        type(karniadakis_t), intent(inout) :: tstepkrk
        !! Time-step integrator (Karniadakis)
        integer, intent(in), &
                 dimension(mesh_cano%get_n_points_boundary()) :: bndperp_types
        !! Descriptor for perpendicular boundary conditions
        real(GP), intent(inout), &
                  dimension(mesh_cano%get_n_points_boundary()) :: bndperp_vals
        !! Values for perpendicular boundary conditions
        real(GP), intent(inout) :: tau
        !! Time, at output tau + dtau
        type(variable_t), intent(inout) :: var
        !! Variable evolved in time,
        !! defined on canonical mesh
        integer, intent(out), dimension(2) :: tinfo
        !! Status of solvers
        real(GP), intent(out), dimension(3) :: rinfo
        !! Residuals of solvers

        integer :: ki 
        ! Loop index running over numbers of inner grid points
        integer :: kb
        ! Loop index running over numbers of boundary points
        integer :: kg 
        ! Loop index running over ghost points 
        integer :: l
        ! Mesh index
        
        real(GP), dimension(mesh_cano%get_n_points()) :: &
                        d2var_dpar2, dvar, varadv, &
                        rhs, &
                        mapfwd_vals, mapbwd_vals, &
                        lambda_par, xi_par
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: &
                        pen_vals_eff, lambda, xi
        real(GP) :: spacing_sqrd, x, y, phi, pen_dirind, pen_charfun
        real(GP) :: co_center, co_south, co_east, co_west, co_north, &
                    v_center, v_south, v_east, v_west, v_north, dlaplperp        
        integer :: lsouth, lwest, least, lnorth, niter_par
        call perf_start('timestep')
                
        cnt = cnt + 1        
        tinfo = 0
        rinfo = 0.0_GP
        spacing_sqrd = mesh_cano%get_spacing_f()**2
        phi = map%get_phi_cano()

        ! Things to be done at first call    
        if (cnt == 1) then
            call var%create_halos(comm_handler, 1, 1)

            call dvar_dpar%init(comm_handler, mesh_stag%get_n_points(), &
                                staggered=.true.)     
            call dvar_dpar%create_halos(comm_handler, 0, 1)
            
            allocate(coperp(mesh_cano%get_n_points()))
            !$omp parallel default(none) &
            !$omp shared(equi, mesh_cano, coperp, phi, &
            !$omp        diffcoeff_perp) &
            !$omp private(l, x, y)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                coperp(l) = diffcoeff_perp * equi%jacobian(x, y, phi)
            enddo
            !$omp end do
            !$omp end parallel
        endif

        ! Compute parallel diffusion -------------------------------------------
        call perf_start('    pardif')
        
        call perf_start('        pardif_mpi')
        call var%start_comm_halos(comm_handler)
        call var%finalize_comm_halos(comm_handler)
        call perf_stop('        pardif_mpi')
       
        ! Parallel gradient
        if ( (tstep_implicit_level == 0) &
            .or.(tstep_implicit_level == 1) ) then
            
            call perf_start('        pardif_comp')
#ifdef ENABLE_CUDA
            call map%par_grad_cuda(var%vals, var%hfwd, dvar_dpar%vals)  
#else
            !$omp parallel default(none) shared(map, var, dvar_dpar)
            call map%par_grad(var%vals, var%hfwd, dvar_dpar%vals)     
            !$omp end parallel 
#endif
            call perf_stop('        pardif_comp')

            ! Parallel divergence
            call perf_start('        pardif_mpi')
            call dvar_dpar%start_comm_halos(comm_handler)
            call dvar_dpar%finalize_comm_halos(comm_handler)
            call perf_stop('        pardif_mpi')
            call perf_start('        pardif_comp')
#ifdef ENABLE_CUDA
            call map%par_divb_cuda(dvar_dpar%vals, dvar_dpar%hbwd, d2var_dpar2)
#else
            !$omp parallel default(none) shared(map, dvar_dpar, d2var_dpar2)
            call map%par_divb(dvar_dpar%vals, dvar_dpar%hbwd, d2var_dpar2)
            !$omp end parallel
#endif
            call perf_stop('        pardif_comp')
        endif

        call perf_stop('    pardif')

        ! Compute values for penalisation --------------------------------------
        call perf_start('    penvals')
        if (bnd_descr_par == BND_TYPE_DIRICHLET_ZERO) then
            !$omp parallel default(none) &
            !$omp shared(mesh_cano, penalisation_cano, pen_vals_eff, &
            !$omp        bnd_val_par, bnd_par_odd) &
            !$omp private(ki, pen_dirind)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()  
                if (bnd_par_odd) then
                    pen_dirind = penalisation_cano%get_dirindfun_val(ki) 
                    pen_vals_eff(ki) = pen_dirind * bnd_val_par
                else
                    pen_vals_eff(ki) = bnd_val_par
                endif
            enddo

            !$omp end do
            !$omp end parallel
        elseif (bnd_descr_par == BND_TYPE_NEUMANN) then
            !$omp parallel default(none) &
            !$omp shared(map, mesh_cano, penalisation_cano, var, &
            !$omp        mapfwd_vals, mapbwd_vals, pen_vals_eff) &
            !$omp private(ki, l, pen_dirind)
            ! Compute map values from forward plane
            call map%upstream_cano_from_cano_fwd(var%hfwd, mapfwd_vals)
            ! Compute map values from forward plane
            call map%upstream_cano_from_cano_bwd(var%hbwd, mapbwd_vals)

            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()  
                l = mesh_cano%inner_indices(ki)
                pen_dirind = penalisation_cano%get_dirindfun_val(ki) 
                if (pen_dirind > 0.0_GP) then
                    ! if magnetic field directed towards target use back map values
                    pen_vals_eff(ki) = pen_dirind * mapbwd_vals(l)
                else
                    ! if magnetic field directed away from target use forward map values
                    pen_vals_eff(ki) = -pen_dirind * mapfwd_vals(l)
                endif                
            enddo
            !$omp end do
            !$omp end parallel

        else
            call handle_error('Penbnd_tpy not valid', &
                              GRILLIX_ERR_OTHER, __LINE__, __FILE__, &
                              error_info_t('Penbnd_tpy: ',[bnd_descr_par]))
        endif 
        call perf_stop('    penvals')

        ! Compute right hand side of PDE ---------------------------------------
        call perf_start('    explicit_terms')
        !$omp parallel default(none) &
        !$omp shared(equi, mesh_cano, penalisation_cano, &
        !$omp        pen_vals_eff, var, dvar, d2var_dpar2, bndperp_vals, &
        !$omp        lambda, coperp, spacing_sqrd, phi, tau, &
        !$omp        mms_on, diffcoeff_par, tstep_implicit_level, dtau, epsinv) &
        !$omp private(ki, kb, kg, l, x, y, pen_charfun, dlaplperp, &
        !$omp         lsouth, least, lwest, lnorth, &
        !$omp         co_center, co_south, co_east, co_west, co_north, &
        !$omp         v_center, v_south, v_east, v_west, v_north)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()   
            l = mesh_cano%inner_indices(ki)
           
            ! Parallel diffusion
            if ( (tstep_implicit_level == 0) &
                .or.(tstep_implicit_level == 1) ) then
                dvar(l) = diffcoeff_par * d2var_dpar2(l)
            else
                dvar(l) = 0.0_GP
            endif
            
            ! Perpendicular diffusion (explicit)
            if ( (tstep_implicit_level == 0) &
                .or. (tstep_implicit_level == 2)) then
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                ! Indices of neighbours
                lsouth = mesh_cano%get_index_neighbor(0, -1, l)
                least = mesh_cano%get_index_neighbor(-1, 0, l)
                lwest = mesh_cano%get_index_neighbor(1, 0, l)
                lnorth = mesh_cano%get_index_neighbor(0, 1, l)
                ! Diffusion coefficient on neighbours
                co_center = coperp(l)
                co_south = coperp(lsouth)
                co_east = coperp(least)
                co_west = coperp(lwest)
                co_north = coperp(lnorth)
                ! Variable values on neighbours
                v_south = var%vals(lsouth)
                v_east = var%vals(least)
                v_west = var%vals(lwest)
                v_north = var%vals(lnorth)
                v_center = var%vals(l)
                
                dlaplperp = (v_north - v_center) * (co_north + co_center) &
                          - (v_center - v_south) * (co_center + co_south) &
                          + (v_east - v_center) * (co_east + co_center) &
                          - (v_center - v_west) * (co_center + co_west) 
                dlaplperp = dlaplperp / (2.0 * spacing_sqrd * equi%jacobian(x, y, phi))

                dvar(l) = dvar(l) + dlaplperp
            endif

            ! Apply penalisation source
            pen_charfun = penalisation_cano%get_charfun_val(ki)     
            dvar(l) = (1.0_GP - pen_charfun) * dvar(l) &
                      + epsinv * pen_charfun &
                      * pen_vals_eff(ki)
            
            ! MMS source
            if (mms_on) then
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)      
                dvar(l) = dvar(l) + &
                          mms_source_diffusion(equi, x, y, phi, tau, &
                                               pen_charfun, pen_vals_eff(ki))
            endif
                    
            ! Required for coefficient for diagonal solve  
            lambda(ki) = epsinv * pen_charfun
            
        enddo
        !$omp end do
            
        ! Set ghost points to dummy values for time-step integrator
        !$omp do
        do kb = 1, mesh_cano%get_n_points_boundary()        
            l = mesh_cano%boundary_indices(kb)
            dvar(l) = 0.0_GP 
        enddo
        !$omp end do
        !$omp do
        do kg = 1, mesh_cano%get_n_points_ghost()
            l = mesh_cano%ghost_indices(kg)        
            dvar(l) = 0.0_GP 
        enddo
        !$omp end do

        ! Set boundary values at new timestep
        if (mms_on) then
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary()
                l = mesh_cano%boundary_indices(kb)
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                bndperp_vals(kb) = mms_sol_diffusion(equi, x, y, phi, &
                                                     tau+dtau)
            enddo
            !$omp end do
        endif
        !$omp end parallel 
        call perf_stop('    explicit_terms')       
        
        ! Advance explicit terms in time --> varadv ----------------------------
        call perf_start('    karniadakis')        
        call tstepkrk%advance(dvar, var%vals, varadv)
        call tstepkrk%shift_storage((/var%vals, dvar/))
        tau = tau + dtau
        call perf_stop('    karniadakis')        
       
        call perf_start('    implicit_terms')
        ! Treat implicit terms -------------------------------------------------
        if (tstep_implicit_level == 0) then
            ! Explicit treatment of perp and par diffusion, 
            ! penalisation (diagonal term) implicit
            !$omp parallel default(none) &
            !$omp shared(mesh_cano, lambda, var, tstepkrk, varadv, &
            !$omp        bndperp_types, bndperp_vals) &
            !$omp private(ki, l)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()   
                l = mesh_cano%inner_indices(ki)
                lambda(ki) = 1.0_GP + tstepkrk%get_dtau_bdf() * lambda(ki)
                var%vals(l) = varadv(l) / lambda(ki)            
            enddo
            !$omp end do
            
            ! Set boundary conditions explicitly
            call set_perpbnds(mesh_cano, bndperp_types, var%vals, bndperp_vals)
            call extrapolate_ghost_points(mesh_cano, var%vals)
            !$omp end parallel

        elseif (tstep_implicit_level == 1) then
            ! Implicit treatment of only perp diffusion and penalisation (diagonal term)
            ! Set up equation system (elliptic solver)
            !$omp parallel default(none) &
            !$omp shared(equi, mesh_cano, penalisation_cano, &
            !$omp        tstepkrk, var, varadv, rhs, phi, &
            !$omp        lambda, xi, bndperp_vals) &
            !$omp private(ki, kb, kg, l, pen_charfun, x, y)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()        
                l = mesh_cano%inner_indices(ki)
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                pen_charfun = penalisation_cano%get_charfun_val(ki) 

                var%vals(l) = varadv(l) ! Initial guess 
                rhs(l) = varadv(l) ! Right hand side 
                lambda(ki) = 1.0_GP + tstepkrk%get_dtau_bdf() * lambda(ki) 
                xi(ki) = (1.0_GP - pen_charfun) / equi%jacobian(x, y, phi) &
                         * tstepkrk%get_dtau_bdf() 
            enddo
            !$omp end do
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary() 
                l = mesh_cano%boundary_indices(kb)
                rhs(l) = bndperp_vals(kb) ! Boundary values/fluxes
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()        
                l = mesh_cano%ghost_indices(kg)
                rhs(l) = 0.0_GP
            enddo
            !$omp end do
            !$omp end parallel
            
            !  Perform solve (implicit)
            call perf_start('        hsolver_update')
            call hsolver_cano%update(coperp, lambda, xi, &
                                     bnd_descr_core, bnd_descr_wall, bnd_descr_dome, bnd_descr_out)
            call perf_stop('        hsolver_update')

            call perf_start('        hsolver_solve')
            call hsolver_cano%solve(rhs, var%vals, rinfo(1), tinfo(1))
            call perf_stop('        hsolver_solve')
            
        elseif (tstep_implicit_level == 2) then
            ! Implicit treatment of only parallel diffusion and penalisation (diagonal term)

            ! Set up coefficients and rhs for parallel helmholtz problem
            !$omp parallel default(none) &
            !$omp shared(mesh_cano, penalisation_cano, tstepkrk, &
            !$omp        var, varadv, bndperp_vals, lambda_par, xi_par, rhs, &
            !$omp        diffcoeff_par, epsinv) &
            !$omp private(ki, kb, kg, l, pen_charfun)   
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()        
                l = mesh_cano%inner_indices(ki)
                pen_charfun = penalisation_cano%get_charfun_val(ki)  

                lambda_par(l) = 1.0_GP + tstepkrk%get_dtau_bdf() &
                                * epsinv * pen_charfun
                xi_par(l) = diffcoeff_par * (1.0_GP - pen_charfun) * tstepkrk%get_dtau_bdf()
                rhs(l) = varadv(l)
            enddo
            !$omp end do
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary() 
                l = mesh_cano%boundary_indices(kb)
                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                rhs(l) = bndperp_vals(kb)   
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()        
                l = mesh_cano%ghost_indices(kg)
                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                rhs(l) = var%vals(l)
            enddo
            !$omp end do
            !$omp end parallel

            ! Solve parallel helmholtz problem
            call perf_start('    solve_aligned3d')
            call solve_aligned3d(comm_handler, &
                                 equi_on_cano, equi_on_stag, &
                                 mesh_cano, mesh_stag, &
                                 map, & 
                                 parsolver_params, parsolver_select, &
                                 rhs, var%vals, &
                                 niter=tinfo(2), &
                                 res=rinfo(2), &
                                 true_res=rinfo(3), &
                                 lambda=lambda_par, &
                                 xi=xi_par, &
                                 bnd_descrs=bndperp_types)
            call perf_stop('    solve_aligned3d')
        else
            call handle_error('Tstep%implicit_level not valid', &
                              GRILLIX_ERR_OTHER, __LINE__, __FILE__, &
                              error_info_t('Implicit_level: ',[tstep_implicit_level]))
        endif   
        call perf_stop('    implicit_terms')  
        
        ! set ghost points ------------------------------------------------------------------------
        if (mms_on) then
            !$omp parallel default(none) &
            !$omp shared(equi, mesh_cano, var, phi, tau) &
            !$omp private(kg, l, x, y)
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()
                l = mesh_cano%ghost_indices(kg)          
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                var%vals(l) = mms_sol_diffusion(equi, x, y, phi, tau)
            enddo
            !$omp end do
            !$omp end parallel
        else
            !$omp parallel default(none) shared(mesh_cano, var)
            call extrapolate_ghost_points(mesh_cano, var%vals)
            !$omp end parallel
        endif

        call perf_stop('timestep')

    end subroutine

end module