recycling_m.f90 Source File


Contents

Source Code


Source Code

module recycling_m
    !! Module for computing plasma/neutrals source rates due to recycling
    use precision_grillix_m, only : GP, GP_EPS
    use status_codes_grillix_m, only : GRILLIX_ERR_OTHER
    use error_handling_grillix_m, only: handle_error, error_info_t
    use variable_m, only: variable_t
    use screen_io_m, only :  get_stdout
    use csrmat_m, only : csrmat_t
    use comm_handler_m, only : comm_handler_t
    use equilibrium_m, only : equilibrium_t
    use mesh_cart_m, only : mesh_cart_t
    use parallel_map_m, only : parallel_map_t
    use equilibrium_storage_m, only : equilibrium_storage_t
    use penalisation_m, only : penalisation_t
    use parallel_target_flux_m, only : parallel_target_flux_t
    use perp_bnd_flux_m, only : perp_bnd_flux_t, compute_drift_flux, &
                                get_drift_flux_local, get_drift_flux_total
    use polygon_in_mesh_m, only : polygon_in_mesh_t
    use diagnostics_helpers_m, only : inner_product

    use params_neut_model_m, only : &
        rcy_coeff
    use params_neut_switches_m, only : &
        swn_src_rcy_partarget, &
        swn_src_rcy_perpouter, &
        swn_src_rcy_perpcore, &
        lswn_src_rcy_perp_local
    use params_brag_model_m, only: &
        delta, beta

    implicit none

    public :: compute_recycling_source
    private :: distribute_driftflux_local

contains 
            
    subroutine compute_recycling_source(comm_handler, equi, mesh, map, equi_storage, penalisation, &
                                        parflux_utils, perp_bnd_flux, ne, pot, te, nvpar_cano, apar_fluct_cano, &
                                        neutrals_dens, neutrals_parmom_cano, src_rcy)
        !! Compute source contribution from recycling
        type(comm_handler_t), intent(in) :: comm_handler
        !! Communicator
        class(equilibrium_t), intent(in) :: equi
        !! Equilibrium
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh (canonical)
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(penalisation_t), intent(in) :: penalisation
        !! Penalisation (canonical)
        type(equilibrium_storage_t), intent(in) :: equi_storage
        !! Equilibrium storage (canonical)
        type(parallel_target_flux_t), intent(in) :: parflux_utils
        !! Parallel target flux utility (canonical)
        type(perp_bnd_flux_t), intent(in) :: perp_bnd_flux
        !! Perpendicular boundary flux utility (canonical)
        type(variable_t), intent(in) :: ne
        !! Electron density
        type(variable_t), intent(in) :: pot
        !! Electrostatic potential
        type(variable_t), intent(in) :: te
        !! Electron temperature
        real(GP), dimension(mesh%get_n_points()), intent(in) :: nvpar_cano
        !! Parallel electron flux on canonical grid
        real(GP), dimension(mesh%get_n_points()), intent(in) :: apar_fluct_cano
        !! Fluctuation of apar used for flutter operators on canonical grid, auxiliary field
        type(variable_t), intent(in) :: neutrals_dens
        !! Neutrals density
        real(GP), dimension(mesh%get_n_points()), intent(in) :: neutrals_parmom_cano
        !! Neutrals parallel momentum on canonical grid
        real(GP), dimension(mesh%get_n_points()), intent(inout) :: src_rcy
        !! Neutrals density source rate from recycling

        integer :: i, j, ki, l, ierr
        real(GP) :: flux, flux_fwd, flux_bwd

        real(GP), dimension(mesh%get_n_points()) :: src_rcy_local, aux_1, aux_2
        real(GP) :: ddt_ne, ddt_neutrals_dens
        real(GP) :: src_rcy_nonlocal
        real(GP) :: src_rcy_int, src_ratio
        real(GP), dimension(:), allocatable :: driftflux, driftflux_wrk
        logical :: l_is_wall

        ! Properly reset recycling source first
        !$omp parallel do default(none) private(l) shared(mesh, src_rcy, src_rcy_local)
        do l = 1, mesh%get_n_points()
            src_rcy(l) = 0.0_GP
            src_rcy_local(l) = 0.0_GP
        enddo
        !$omp end parallel do
        src_rcy_nonlocal = 0.0_GP

        ! Local contributions from parallel fluxes
        if ( swn_src_rcy_partarget > GP_EPS ) then
            !$omp parallel default(none) private(i, ki, l, flux_fwd, flux_bwd) &
            !$omp          shared(mesh, map, penalisation, parflux_utils, equi_storage, swn_src_rcy_partarget, &
            !$omp                 nvpar_cano, neutrals_dens, neutrals_parmom_cano, rcy_coeff, src_rcy_local)
            !$omp do
            do i = 1, parflux_utils%n_inds_fwd
                ki = parflux_utils%inds_fwd(i)
                l = mesh%inner_indices(ki)
                flux_fwd =  parflux_utils%compute_single(mesh, penalisation, equi_storage, &
                                                i, nvpar_cano, dirind='fwd', mode='DEFAULT') &
                          + parflux_utils%compute_single(mesh, penalisation, equi_storage, &
                                                i, neutrals_parmom_cano, dirind='fwd', mode='DEFAULT')

                src_rcy_local(l) = src_rcy_local(l) + rcy_coeff * swn_src_rcy_partarget * flux_fwd / map%fluxbox_vol_cano(l)
            enddo
            !$omp end do
            !$omp do
            do i = 1, parflux_utils%n_inds_bwd
                ki = parflux_utils%inds_bwd(i)
                l = mesh%inner_indices(ki)
                flux_bwd = parflux_utils%compute_single(mesh, penalisation, equi_storage, &
                                                i, nvpar_cano, dirind='bwd', mode='DEFAULT') &
                           + parflux_utils%compute_single(mesh, penalisation, equi_storage, &
                                                i, neutrals_parmom_cano, dirind='bwd', mode='DEFAULT')

                src_rcy_local(l) = src_rcy_local(l) + rcy_coeff * swn_src_rcy_partarget * flux_bwd / map%fluxbox_vol_cano(l)
            enddo
            !$omp end do
            !$omp end parallel
        endif

        ! Drift fluxes through outer boundary
        if (swn_src_rcy_perpouter > GP_EPS) then
            allocate(driftflux_wrk(perp_bnd_flux%outer%get_np_total()), source=0.0_GP)
            allocate(driftflux(perp_bnd_flux%outer%get_np_total()), source=0.0_GP)

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%outer, pot%vals, driftflux, ne%vals)

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%outer, te%vals, driftflux_wrk, ne%vals)
            driftflux = driftflux - driftflux_wrk

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%outer, ne%vals, driftflux_wrk, te%vals)
            driftflux = driftflux - driftflux_wrk

            !$omp parallel do default(none) private(l) shared(mesh, aux_1, aux_2, nvpar_cano, apar_fluct_cano, equi_storage)
            do l = 1, mesh%get_n_points()
                aux_1(l) = - nvpar_cano(l) * equi_storage%btor(l)
                aux_2(l) = apar_fluct_cano(l) / equi_storage%btor(l)
            enddo
            !$omp end parallel do

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%outer, aux_2, driftflux_wrk, &
                                    aux_1) 
            driftflux = driftflux + driftflux_wrk * beta

            driftflux = swn_src_rcy_perpouter * rcy_coeff * driftflux / delta

            deallocate(driftflux_wrk)

            if (lswn_src_rcy_perp_local) then
                call distribute_driftflux_local(mesh, map, perp_bnd_flux%outer, &
                                                perp_bnd_flux%conn_outer, driftflux, src_rcy_local)
            else
                 ! Distribute perpendicular drift fluxes evenly, compute summed flux here
                src_rcy_nonlocal = src_rcy_nonlocal + &
                                   get_drift_flux_total(comm_handler, perp_bnd_flux%outer, &
                                                        driftflux, sum_over_planes=.true.)
            endif
            deallocate(driftflux)
        endif

        ! Drift fluxes through core boundary
        if (swn_src_rcy_perpcore > GP_EPS) then
            allocate(driftflux_wrk(perp_bnd_flux%core%get_np_total()), source=0.0_GP)
            allocate(driftflux(perp_bnd_flux%core%get_np_total()), source=0.0_GP)

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%core, pot%vals, driftflux, ne%vals)

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%core, te%vals, driftflux_wrk, ne%vals)
            driftflux = driftflux - driftflux_wrk

            call compute_drift_flux(equi, equi_storage, mesh, map%get_dphi(), &
                                    perp_bnd_flux%core, ne%vals, driftflux_wrk, te%vals)
            driftflux = driftflux - driftflux_wrk

            driftflux = swn_src_rcy_perpcore * rcy_coeff * driftflux / delta

            deallocate(driftflux_wrk)

            if (lswn_src_rcy_perp_local) then
                call distribute_driftflux_local(mesh, map, perp_bnd_flux%core, &
                                                perp_bnd_flux%conn_core, driftflux, src_rcy_local)
            else
                src_rcy_nonlocal = src_rcy_nonlocal + &
                                   get_drift_flux_total(comm_handler, perp_bnd_flux%core, &
                                                        driftflux, sum_over_planes=.true.)
            endif
            deallocate(driftflux)
        endif

        ! Plasma drift flux may point into domain, causing "negative" recycling sources. 
        ! To avoid local negative densities, absorb local negative contribution in nonlocal source 
        !$omp parallel do default(none) private(l) &
        !$omp             shared(mesh, map, src_rcy_local) reduction(+:src_rcy_nonlocal)
        do l = 1, mesh%get_n_points()
            if (src_rcy_local(l) < 0.0_GP) then
                src_rcy_nonlocal = src_rcy_nonlocal + src_rcy_local(l) * map%fluxbox_vol_cano(l)
                src_rcy_local(l) = 0.0_GP
            endif
        enddo
        !$omp end parallel do

        if (.not. lswn_src_rcy_perp_local) then
            ! If sources were not already distributed locally, the nonlocal source is 
            ! now applied by amplifying existing local sources by a constant factor.
            ! This only works if the local source itself is larger than zero

            ! Compute total contribution from local sources
            src_rcy_int = inner_product(comm_handler, mesh, map, penalisation, is_stag=.false., &
                                        u_in=src_rcy_local, use_abs=.false., use_vol_avg=.false., &
                                        use_full_domain=.false., sum_over_planes=.true.)
    
            ! Safety check if total source is (almost) zero
            if (src_rcy_int < 100.0_GP * GP_EPS) then
                if (abs(src_rcy_nonlocal) < 100.0_GP * GP_EPS) then
                    ! Both local and nonlocal sources are negligible, 
                    ! can just proceed by doing nothing
                    src_ratio = 0.0_GP
                else
                    call handle_error('recycling_m: src_rcy_int too small to apply src_rcy_nonlocal', &
                                    GRILLIX_ERR_OTHER, __LINE__, __FILE__, &
                                    error_info_t('src_rcy_int, src_rcy_nonlocal: ', &
                                    [src_rcy_int, src_rcy_nonlocal]))
                endif
            endif

            ! If ok, proceed with source amplification
            src_ratio = src_rcy_nonlocal / src_rcy_int
            if (src_ratio < -1.0_GP) then
                    call handle_error('recycling_m: src_ratio below threshold', &
                                        GRILLIX_ERR_OTHER, __LINE__, __FILE__, &
                                        error_info_t('src_ratio: ', [src_ratio]))
            endif
            !$omp parallel do default(none) private(l) shared(mesh, src_rcy, src_rcy_local, src_ratio)
            do l = 1, mesh%get_n_points()
                src_rcy(l) = src_rcy_local(l) * ( 1.0_GP + src_ratio )  
            enddo
            !$omp end parallel do
        endif

    end subroutine

    subroutine distribute_driftflux_local(mesh, map, polygon, conn, drift_flux, src)
        !! Given the flux through a boundary polygon, this routine adds this flux locally
        !! to src_rcy at the next inner mesh point, given via the connectivity matrix conn.
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(parallel_map_t), intent(in) :: map
        !! Map
        type(polygon_in_mesh_t), intent(in) :: polygon
        !! Polygon defining the boundary
        type(csrmat_t), intent(in) :: conn
        !! Connectivity matrix associated with polygon
        real(GP), intent(in), dimension(polygon%get_np_total()) :: drift_flux
        !! Drift flux through polygon
        real(GP), dimension(mesh%get_n_points()), intent(inout) :: src
        !! Source

        integer :: is, ks, ks_glob, nz_row, iz, l
        real(GP) :: flux_local

        do is = 1, polygon%get_nsegs()
            do ks = 1, polygon%get_nps(is)
                ks_glob = polygon%ki_seg(is) - 1 + ks
                flux_local = get_drift_flux_local(polygon, drift_flux, ks, is)
                ! nz_row is the number of inner points connected to boundary polygon points
                ! ( = number of entries within this row of connectivity matrix)
                nz_row = conn%i(ks_glob+1) - conn%i(ks_glob)
                ! This is basically a transposed matrix vector multiplication in csr format
                do iz = conn%i(ks_glob), conn%i(ks_glob+1) - 1
                    l = conn%j(iz)
                    src(l) = src(l) + flux_local / (nz_row * map%fluxbox_vol_cano(l)) 
                enddo
            enddo
        enddo

    end subroutine 

end module recycling_m