penalisation_wip3d_build_s.f90 Source File


Contents


Source Code

submodule (penalisation_m) penalisation_wip3d_build_s
    !! Routines related with building penalisation for 3D stellarator geometries
    !! This is yet an experimental feature
    implicit none
    
    integer :: nt_chi_first = 100
    !! Number of time-steps for first diffusion on chi
    integer :: nt_chi_sec = 1000
    !! Number of time-steps for second diffusion on chi
    integer :: nt_xi = 100
    !! Number of time-steps for diffusion on xi
    real(GP) :: dtau = 1.0E-2_GP
    !! Timestep size for diffusions
    real(GP) :: dperp = 1.0E-5_GP
    !! Perpendicular diffusion coefficient
    real(GP) :: chi_thresh = 0.3_GP
    !! Threshold for chi function
    real(GP) :: xi_thresh = 1.0E-2_GP
    !! Threshold for xi function
    
    namelist / params_penalisation_wip3d / &
        nt_chi_first, nt_chi_sec, nt_xi, dtau, dperp, chi_thresh, xi_thresh 
    
contains

    module subroutine build_wip3d_penalisation(pen_cano, pen_stag, comm_handler, &
                                               equi, equi_on_cano, equi_on_stag, &
                                               mesh_cano, mesh_stag, multigrid_cano, multigrid_stag, &
                                               map, dbgout)
        class(penalisation_t), intent(inout) :: pen_cano
        class(penalisation_t), intent(inout) :: pen_stag
        type(comm_handler_t), intent(in) :: comm_handler
        class(equilibrium_t) :: equi
        class(equilibrium_storage_t), intent(inout) :: equi_on_cano
        class(equilibrium_storage_t), intent(inout) :: equi_on_stag
        type(mesh_cart_t), intent(inout) :: mesh_cano
        type(mesh_cart_t), intent(inout) :: mesh_stag
        type(multigrid_t), intent(inout) :: multigrid_cano
        type(multigrid_t), intent(inout) :: multigrid_stag
        type(parallel_map_t), intent(in) :: map
        integer, intent(in), optional :: dbgout
                   
        integer :: outi, l, district, it, nrecv, ki, ir, nt, io_unit, io_error
        real(GP) :: x, y
        character(len=256) :: io_errmsg 
        type(inplane_operators_t) :: opsinplane_cano, opsinplane_stag
        
        real(GP), dimension(mesh_cano%get_n_points()) :: chi_cano, xi_cano, mask_cano, wrk_canosize
        real(GP), dimension(mesh_stag%get_n_points()) :: chi_stag, xi_stag, mask_stag, wrk_stagsize
        real(GP), allocatable, dimension(:) :: wrk_halo, wrk_alloc, solv_xi, solv_lambda
                
        real(GP), dimension(mesh_stag%get_n_points_inner()) :: xis, lambdas
        real(GP), dimension(mesh_stag%get_n_points()) :: co
        
        type(parameters_helmholtz_solver_factory) :: parhelm
        class(helmholtz_solver_t), allocatable :: hsolver 
        real(GP) :: res
        integer :: sinfo
        
        if (pen_cano%pen_method == PEN_METHOD_NONE) then
            call pen_cano%build(comm_handler, equi, mesh_cano, multigrid_cano, dbgout)
            call pen_stag%build(comm_handler, equi, mesh_stag, multigrid_stag, dbgout)
            return
        endif   
    
        ! Set debug output level
        outi = 0
        if ( present(dbgout) ) then
            if ( is_rank_info_writer ) then
                outi = dbgout
            endif
            if ( dbgout >= 3) then
                outi = dbgout ! Every rank prints debug info
            endif
        endif
        
        if ( outi > 0 ) then
            write(get_stdout(),*)''
            write(get_stdout(),*)'Building 3D penalisation (WIP)'
        endif        
        
        ! Read parameters         
        open(newunit=io_unit, file='params.in', status='old', action='read', &
            iostat=io_error, iomsg=io_errmsg)
        if (io_error /= 0) then
            call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, &
                              __LINE__, __FILE__)
        endif
  
        read(io_unit, nml=params_penalisation_wip3d, iostat=io_error, iomsg=io_errmsg)
        if (io_error /= 0) then
            call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, &
                              __LINE__, __FILE__)
        endif
        close(io_unit)
        
        if (outi > 0) then
            write(get_stdout(), nml=params_penalisation_wip3d, iostat=io_error, iomsg=io_errmsg)
        endif
        
        ! Initialise in-plane operators for smoothing
        call opsinplane_cano%init(equi, mesh_cano, equi_on_cano, norm_scale=1.0_GP, norm_flutter=0.0_GP)
        call opsinplane_stag%init(equi, mesh_stag, equi_on_stag, norm_scale=1.0_GP, norm_flutter=0.0_GP)
        
        if ( outi > 0 ) then
            write(get_stdout(),*)'building chi-function'
        endif
        
        ! Initilise characteristic function and mask
        do ir = 1, 2
            if (ir == 1) then
                nt = nt_chi_first
                if ( outi > 0 ) then
                    write(get_stdout(),*)'... first diffusion loop'
                endif
            else
                nt = nt_chi_sec
                if ( outi > 0 ) then
                    write(get_stdout(),*)'... second diffusion loop'
                endif
            endif
            !$omp parallel default(none) private(l, district, it) &
            !$omp shared(ir, chi_thresh, mesh_cano, chi_cano, mask_cano, mesh_stag, chi_stag, mask_stag)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                district = mesh_cano%get_district(l)
                select case(district)
                    case(DISTRICT_WALL, DISTRICT_OUT)
                        mask_cano(l) = 0.0_GP                       
                        chi_cano(l) = 1.0_GP
                    case default
                        if (ir == 1) then
                            mask_cano(l) = 1.0_GP
                        else
                            if (chi_cano(l) > chi_thresh) then
                                mask_cano(l) = 1.0_GP
                            else
                                mask_cano(l) = 0.0_GP
                            endif
                        endif
                        chi_cano(l) = 0.0_GP
                end select
            enddo
            !$omp end do
            !$omp do
            do l = 1, mesh_stag%get_n_points()
                district = mesh_stag%get_district(l)
                select case(district)
                    case(DISTRICT_WALL, DISTRICT_OUT)
                        chi_stag(l) = 1.0_GP
                        mask_stag(l) = 0.0_GP
                    case default
                        if (ir == 1) then
                            mask_stag(l) = 1.0_GP
                        else
                            if (chi_stag(l) > chi_thresh) then
                                mask_stag(l) = 1.0_GP
                            else
                                mask_stag(l) = 0.0_GP
                            endif
                        endif
                        chi_stag(l) = 0.0_GP
                end select
            enddo
            !$omp end do
            !$omp end parallel
                           
            ! Spread chi_cano along field lines
            allocate(wrk_alloc(mesh_cano%get_n_points()), source=chi_cano)            
            do it = 1, nt
                call getdata_fwdbwdplane(comm_handler%get_comm_planes(), +1,  &
                                         mesh_cano%get_n_points(), &
                                         chi_cano, nrecv, wrk_halo)
                !$omp parallel default(none) shared(map, chi_cano, wrk_halo, wrk_stagsize)
                call map%par_grad(chi_cano, wrk_halo, wrk_stagsize)
                !$omp end parallel
                deallocate(wrk_halo)
                call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1,  &
                                         mesh_stag%get_n_points(), &
                                         wrk_stagsize, nrecv, wrk_halo)
                !$omp parallel default(none) shared(map, wrk_stagsize, wrk_halo, wrk_canosize)
                call map%par_divb(wrk_stagsize, wrk_halo, wrk_canosize)
                !$omp end parallel
                deallocate(wrk_halo)
                
                !$omp parallel default(none) private(ki, l) &
                !$omp shared(dtau, dperp, mesh_cano, opsinplane_cano, &
                !$omp        chi_cano, mask_cano, wrk_canosize, wrk_alloc)
                !$omp do
                do ki = 1, mesh_cano%get_n_points_inner()
                    l = mesh_cano%inner_indices(ki)
                    wrk_alloc(l) = chi_cano(l) + mask_cano(l) * dtau * ( &
                                        dperp * opsinplane_cano%nabla_pol_sqrd(chi_cano, l) &
                                        + wrk_canosize(l) )    
                enddo
                !$omp end do
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    chi_cano(l) = wrk_alloc(l)
                enddo
                !$omp end do
                !$omp end parallel
            enddo
            deallocate(wrk_alloc)
            
            ! Spread chi_stag along field lines
            allocate(wrk_alloc(mesh_stag%get_n_points()), source=chi_stag)            
            do it = 1, nt
            
                !$omp parallel default(none) private(l, x, y) &
                !$omp shared(mesh_stag, equi_on_stag, chi_stag, wrk_stagsize)
                !$omp do
                do l = 1, mesh_stag%get_n_points()
                    x = mesh_stag%get_x(l)
                    y = mesh_stag%get_y(l)
                    wrk_stagsize(l) = equi_on_stag%absb(l) * chi_stag(l)
                enddo
                !$omp end do
                !$omp end parallel    
                        
                call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1,  &
                                         mesh_stag%get_n_points(), &
                                         wrk_stagsize, nrecv, wrk_halo)
                !$omp parallel default(none) shared(map, wrk_stagsize, wrk_halo, wrk_canosize)
                call map%par_divb(wrk_stagsize, wrk_halo, wrk_canosize)
                !$omp end parallel
                deallocate(wrk_halo)
                
                !$omp parallel default(none) private(l, x, y) &
                !$omp shared(mesh_cano, equi_on_cano, wrk_canosize)
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    x = mesh_cano%get_x(l)
                    y = mesh_cano%get_y(l)
                    wrk_canosize(l) = wrk_canosize(l) / equi_on_cano%absb(l)**2
                enddo
                !$omp end do
                !$omp end parallel 
                
                call getdata_fwdbwdplane(comm_handler%get_comm_planes(), +1,  &
                                         mesh_cano%get_n_points(), &
                                         wrk_canosize, nrecv, wrk_halo)
                !$omp parallel default(none) shared(map, wrk_canosize, wrk_halo, wrk_stagsize)
                call map%par_grad(wrk_canosize, wrk_halo, wrk_stagsize)
                !$omp end parallel
                deallocate(wrk_halo)
                
                !$omp parallel default(none) private(ki, l, x, y) &
                !$omp shared(dtau, dperp, mesh_stag, equi_on_stag, opsinplane_stag, &
                !$omp        chi_stag, mask_stag, wrk_stagsize, wrk_alloc)
                !$omp do
                do ki = 1, mesh_stag%get_n_points_inner()
                    l = mesh_stag%inner_indices(ki)
                    x = mesh_stag%get_x(l)
                    y = mesh_stag%get_y(l)
                    wrk_alloc(l) = chi_stag(l) + mask_stag(l) * dtau * ( &
                                        dperp * opsinplane_stag%nabla_pol_sqrd(chi_stag, l) &
                                        + wrk_stagsize(l) * equi_on_stag%absb(l))    
                enddo
                !$omp end do
                !$omp do
                do l = 1, mesh_stag%get_n_points()
                    chi_stag(l) = wrk_alloc(l)
                enddo
                !$omp end do
                !$omp end parallel
            enddo
            deallocate(wrk_alloc)                     
                
        enddo
                
        if ( outi > 0 ) then
            write(get_stdout(),*)'building xi-function'
        endif
                
        ! Initialise xi as grad_parallel(chi)
        call getdata_fwdbwdplane(comm_handler%get_comm_planes(), +1,  &
                                 mesh_cano%get_n_points(), &
                                 chi_cano, nrecv, wrk_halo)
        !$omp parallel default(none) private(l) &
        !$omp shared(mesh_stag, map, chi_cano, wrk_halo, xi_stag, mask_stag)
        call map%par_grad(chi_cano, wrk_halo, xi_stag)
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            xi_stag(l) = xi_stag(l) * mask_stag(l)
        enddo
        !$omp enddo
        !$omp end parallel   
        deallocate(wrk_halo)
        
        !$omp parallel default(none) private(l) &
        !$omp shared(mesh_stag, equi_on_stag, chi_stag, wrk_stagsize)
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            wrk_stagsize(l) = chi_stag(l) * equi_on_stag%absb(l)
        enddo
        !$omp end do
        !$omp end parallel
        call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1,  &
                                 mesh_stag%get_n_points(), &
                                 wrk_stagsize, nrecv, wrk_halo)
        !$omp parallel default(none) private(l) &
        !$omp shared(mesh_cano, map, wrk_stagsize, wrk_halo, xi_cano, mask_cano)
        call map%par_divb(wrk_stagsize, wrk_halo, xi_cano)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            xi_cano(l) = xi_cano(l) * mask_cano(l)
        enddo
        !$omp enddo
        !$omp end parallel
        deallocate(wrk_halo)
        
        ! Spread xi_stag along field line
        allocate(wrk_alloc(mesh_stag%get_n_points()), source = 0.0_GP)      
        do it = 1, nt_xi
            call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1,  &
                                     mesh_stag%get_n_points(), &
                                     xi_stag, nrecv, wrk_halo)
            !$omp parallel default(none) shared(map, xi_stag, wrk_halo, wrk_canosize)
            call map%par_divb(xi_stag, wrk_halo, wrk_canosize)
            !$omp end parallel
            deallocate(wrk_halo)
            call getdata_fwdbwdplane(comm_handler%get_comm_planes(), +1,  &
                                     mesh_cano%get_n_points(), &
                                     wrk_canosize, nrecv, wrk_halo)
            !$omp parallel default(none) shared(map, wrk_canosize, wrk_halo, wrk_stagsize)
            call map%par_grad(wrk_canosize, wrk_halo, wrk_stagsize)
            !$omp end parallel     
            deallocate(wrk_halo)
                      
            !$omp parallel default(none) private(ki, l) &
            !$omp shared(dtau, dperp, mesh_stag, opsinplane_stag, &
            !$omp        xi_stag, wrk_stagsize, wrk_alloc)
            !$omp do
            do ki = 1, mesh_stag%get_n_points_inner()
                l = mesh_stag%inner_indices(ki)
                wrk_alloc(l) = xi_stag(l) + dtau * (&
                                    dperp * opsinplane_stag%nabla_pol_sqrd(xi_stag, l) &
                                    + wrk_stagsize(l) )
            enddo
            !$omp end do
            !$omp do
            do l = 1, mesh_stag%get_n_points()
                xi_stag(l) = wrk_alloc(l)
            enddo
            !$omp end do
            !$omp end parallel
        enddo
        deallocate(wrk_alloc)
        
        ! Spread xi_cano along field line
        allocate(wrk_alloc(mesh_cano%get_n_points()), source = 0.0_GP)      
        do it = 1, nt_xi
            call getdata_fwdbwdplane(comm_handler%get_comm_planes(), +1,  &
                                     mesh_cano%get_n_points(), &
                                     xi_cano, nrecv, wrk_halo)
            !$omp parallel default(none) shared(map, xi_cano, wrk_halo, wrk_stagsize)
            call map%par_grad(xi_cano, wrk_halo, wrk_stagsize)
            !$omp end parallel
            deallocate(wrk_halo)
            call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1,  &
                                     mesh_stag%get_n_points(), &
                                     wrk_stagsize, nrecv, wrk_halo)
            !$omp parallel default(none) shared(map, wrk_stagsize, wrk_halo, wrk_canosize)
            call map%par_divb(wrk_stagsize, wrk_halo, wrk_canosize)
            !$omp end parallel     
            deallocate(wrk_halo)
                      
            !$omp parallel default(none) private(ki, l) &
            !$omp shared(dtau, dperp, mesh_cano, opsinplane_cano, &
            !$omp        xi_cano, wrk_canosize, wrk_alloc)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()
                l = mesh_cano%inner_indices(ki)
                wrk_alloc(l) = xi_cano(l) + dtau * (&
                                    dperp * opsinplane_cano%nabla_pol_sqrd(xi_cano, l) &
                                    + wrk_canosize(l) )
            enddo
            !$omp end do
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                xi_cano(l) = wrk_alloc(l)
            enddo
            !$omp end do
            !$omp end parallel
        enddo
        deallocate(wrk_alloc)
        
        ! Set xi thresholds
        !$omp parallel default(none) private(l) &
        !$omp shared(xi_thresh, mesh_cano, xi_cano, mask_cano, mesh_stag, xi_stag, mask_stag)
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            if (xi_stag(l) > xi_thresh) then
                xi_stag(l) = 1.0_GP
                mask_stag(l) = 0.0_GP
            elseif (xi_stag(l) < -xi_thresh) then
                xi_stag(l)= -1.0_GP
                mask_stag(l) = 0.0_GP
            else
                xi_stag(l) = 0.0_GP
                mask_stag(l) = 1.0_GP
            endif
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            if (xi_cano(l) > xi_thresh) then
                xi_cano(l) = 1.0_GP
                mask_cano(l) = 0.0_GP
            elseif (xi_cano(l) < -xi_thresh) then
                xi_cano(l)= -1.0_GP
                mask_cano(l) = 0.0_GP
            else
                xi_cano(l) = 0.0_GP
                mask_cano(l) = 1.0_GP
            endif
        enddo
        !$omp end do
        !$omp end parallel

        ! Extrapolate xi_stag (fill up region) via in-plane elliptic solver 
        allocate(wrk_alloc(mesh_stag%get_n_points()), source=xi_stag)
        allocate(solv_xi(mesh_stag%get_n_points_inner()))
        allocate(solv_lambda(mesh_stag%get_n_points_inner()))
        !$omp parallel default(none) private(l, ki, x, y) &
        !$omp shared(equi, mesh_stag, mask_stag, solv_xi, solv_lambda, wrk_stagsize)
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            x = mesh_stag%get_x(l)
            y = mesh_stag%get_y(l)
            solv_xi(ki) = mask_stag(l) / equi%jacobian(x, y, mesh_stag%get_phi()) 
            solv_lambda(ki) = 1.0_GP - mask_stag(l)          
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            x = mesh_stag%get_x(l)
            y = mesh_stag%get_y(l)
            wrk_stagsize(l) = equi%jacobian(x, y, mesh_stag%get_phi())
        enddo
        !$omp end do
        !$omp end parallel
                
        call helmholtz_solver_factory(multigrid_stag, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      wrk_stagsize, solv_lambda, solv_xi, &
                                      parhelm, 'DIRECT', hsolver)
                                      
        call hsolver%solve(wrk_alloc, xi_stag, res, sinfo)
        deallocate(hsolver)
        deallocate(solv_lambda)
        deallocate(solv_xi)
        deallocate(wrk_alloc)      
        if (sinfo < 0) then
             call handle_error('Solver for penalisation failed', &
                               GRILLIX_ERR_SOLVER2D, __LINE__, __FILE__, &
                               error_info_t('sinfo, res: ',[sinfo],[res]))
        endif
        
        ! Extrapolate xi_cano
        allocate(wrk_alloc(mesh_cano%get_n_points()), source=xi_cano)
        allocate(solv_xi(mesh_cano%get_n_points_inner()))
        allocate(solv_lambda(mesh_cano%get_n_points_inner()))
        !$omp parallel default(none) private(l, ki, x, y) &
        !$omp shared(equi, mesh_cano, mask_cano, solv_xi, solv_lambda, wrk_canosize)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)
            solv_xi(ki) = mask_cano(l) / equi%jacobian(x, y, mesh_cano%get_phi()) 
            solv_lambda(ki) = 1.0_GP - mask_cano(l)          
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)
            wrk_canosize(l) = equi%jacobian(x, y, mesh_cano%get_phi())
        enddo
        !$omp end do
        !$omp end parallel
                
        call helmholtz_solver_factory(multigrid_cano, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      BND_TYPE_NEUMANN, &
                                      wrk_canosize, solv_lambda, solv_xi, &
                                      parhelm, 'DIRECT', hsolver)
                                      
        call hsolver%solve(wrk_alloc, xi_cano, res, sinfo)
        deallocate(hsolver)
        deallocate(solv_lambda)
        deallocate(solv_xi)
        deallocate(wrk_alloc)
        if (sinfo < 0) then
             call handle_error('Solver for penalisation failed', &
                               GRILLIX_ERR_SOLVER2D, __LINE__, __FILE__, &
                               error_info_t('sinfo, res: ',[sinfo],[res]))
        endif
                
        ! Set to xi to \pm 1
        !$omp parallel default(none) private(l) &
        !$omp shared(mesh_stag, xi_stag, mesh_cano, xi_cano)
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            if (xi_stag(l) > 0.0_GP) then
                xi_stag(l) = 1.0_GP
            else
                xi_stag(l) = -1.0_GP
            endif
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            if (xi_cano(l) > 0.0_GP) then
                xi_cano(l) = 1.0_GP
            else
                xi_cano(l) = -1.0_GP
            endif
        enddo
        !$omp end do
        !$omp end parallel        
        
        ! Put results into penalisation types    
        allocate(pen_cano%charfun(mesh_cano%get_n_points_inner()))
        allocate(pen_cano%dirindfun(mesh_cano%get_n_points_inner()), source=0.0_GP)
        
        allocate(pen_stag%charfun(mesh_stag%get_n_points_inner()), source=0.0_GP)
        allocate(pen_stag%dirindfun(mesh_stag%get_n_points_inner()))
                  
        !$omp parallel default(none) private(ki,l) &
        !$omp shared(pen_cano, mesh_cano, chi_cano, xi_cano, &
        !$omp        pen_stag, mesh_stag, chi_stag, xi_stag)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l  = mesh_cano%inner_indices(ki)
            pen_cano%charfun(ki) = chi_cano(l)
            pen_cano%dirindfun(ki) = xi_cano(l)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l  = mesh_stag%inner_indices(ki)
            pen_stag%charfun(ki) = chi_stag(l)
            pen_stag%dirindfun(ki) = xi_stag(l)
        enddo
        !$omp end do        
        !$omp end parallel
        
        ! Build penalisation indices
        call pen_cano%build_pen_inds(mesh_cano, dbgout)
        call pen_stag%build_pen_inds(mesh_stag, dbgout)
        
        if ( outi > 0 ) then
            write(get_stdout(),*)'Building 3D penalisation (WIP) complete'
            write(get_stdout(),*)''
        endif   
        
    end subroutine 

end submodule