iol_source_eval_s.f90 Source File


Contents

Source Code


Source Code

submodule(iol_source_m) iol_source_eval_s
    !! Routines related with evaluation of IOL sources
    implicit none
    
contains
        
    module subroutine driver_iol(self, comm_handler, equi, mesh, polars, &
        tau, dtau, ne, pot, ti, src_vort, update_performed, force_no_update)
        class(iol_source_t), intent(inout) :: self
        type(comm_handler_t), intent(in) :: comm_handler
        class(equilibrium_t), intent(inout) :: equi
        type(mesh_cart_t), intent(inout) :: mesh
        type(polars_t), intent(in) :: polars
        real(GP), intent(in) :: tau
        real(GP), intent(in) :: dtau
        type(variable_t), intent(in) :: ne 
        type(variable_t), intent(in) :: pot
        type(variable_t), intent(in) :: ti
        real(GP), dimension(mesh%get_n_points_inner()), intent(inout) :: &
            src_vort
        logical, intent(out) :: update_performed
        logical, intent(in), optional :: force_no_update
        
        integer :: ki, ierr
        real(GP) :: tau_mod
        
        call perf_start('source_iol')
                
        ! Add contribution to internally stored averaged fields        
        call add_fields_to_averaged_internal_fields(self, comm_handler, &
            equi,  mesh, polars, ne, pot, ti)
        
        ! Check if update shall be performed
        update_performed = .false.
        
        tau_mod = mod(tau, self%tau_av_frame)
        if ( (tau_mod < dtau/2.0_GP) .or. &
            (tau_mod > self%tau_av_frame - dtau/2.0_GP) ) then
            update_performed = .true.
        endif
                
        if (present(force_no_update)) then
            if (force_no_update) then
                update_performed = .false.
                call MPI_Bcast(self%src_iol, mesh%get_n_points_inner(), MPI_FP, 0, &
                    comm_handler%get_comm_planes(), ierr )
            endif
        endif
        
        if ( update_performed) then
            
            if (is_rank_info_writer) then
                write(get_stdout(),*)''
                write(get_stdout(),*)'Updating ion-orbit loss source at tau = ', tau
            endif
            
            call perf_start('../update_iol_source')
            call self%update_iol_source(comm_handler, equi, mesh, polars) 
            call perf_stop('../update_iol_source')
            
                
            ! Reset internal fields and time average
            self%nt_av = 0
            self%dens = 0.0_GP
            self%temp_ion = 0.0_GP
            self%pot = 0.0_GP
            self%pot_sp = 0.0_GP    
                
            if (is_rank_info_writer) then  
                write(get_stdout(),*)''
            endif
            
        endif
        
        ! Add IOL contribution to vorticity source
        !$OMP PARALLEL DO PRIVATE(ki)
        do ki = 1, mesh%get_n_points_inner()
            src_vort(ki) = src_vort(ki) + self%src_iol(ki)
        enddo
        !$OMP END PARALLEL DO 
        
        call perf_stop('source_iol')

    end subroutine
                   
    module subroutine add_fields_to_averaged_internal_fields(self, comm_handler, &
        equi,  mesh, polars, ne, pot, ti)
        class(iol_source_t), intent(inout) :: self
        type(comm_handler_t), intent(in) :: comm_handler
        class(equilibrium_t), intent(inout) :: equi
        type(mesh_cart_t), intent(inout) :: mesh
        type(polars_t), intent(in) :: polars
        type(variable_t), intent(in) :: ne 
        type(variable_t), intent(in) :: pot
        type(variable_t), intent(in) :: ti
    
        integer :: ip, jp, k, kc, ierr
        real(GP) :: rho, df, dft
        real(GP), dimension(self%jmax, self%imax) :: ne_pol, ti_pol
        real(GP), dimension(self%nrho_sp) :: tmp_sp
    
        ! Map fields to polar grid
        call map_cart_to_polar(mesh, polars, ne%vals,  ne_pol)
        call map_cart_to_polar(mesh, polars, ti%vals,  ti_pol)
                
        ! Average density and ion temperature on iol grid
        call MPI_allreduce(MPI_IN_PLACE, ne_pol,  self%imax*self%jmax, &
            MPI_FP, MPI_SUM, comm_handler%get_comm_planes(), ierr)
        call MPI_allreduce(MPI_IN_PLACE, ti_pol,  self%imax*self%jmax, &
            MPI_FP, MPI_SUM, comm_handler%get_comm_planes(), ierr)
            
        ! Flip indices of ne_pol and ti_pol for flipped equilibra    
        select type(equi)
            type is(numerical_t)
                if (equi%flip_z) then
                    do ip = 1, self%imax
                        call flip_array_direction(self%jmax, ne_pol(:, ip))
                        call flip_array_direction(self%jmax, ti_pol(:, ip))
                    enddo
                endif
        end select            
        
        !$OMP PARALLEL PRIVATE(ip, jp)
        !$OMP DO    
        do ip = 1, self%imax
        
            do jp = 1, self%jmax
                self%dens(jp, ip) = self%dens(jp, ip) * self%nt_av + &
                    self%ne_nrm * ne_pol(jp, ip) / comm_handler%get_nplanes()
                self%dens(jp, ip) =  self%dens(jp, ip) / (self%nt_av + 1)
                
                
                self%temp_ion(jp, ip)  = self%temp_ion(jp, ip) * self%nt_av + &
                        self%ti_nrm * elementary_charge_SI * ti_pol(jp, ip) &
                        / comm_handler%get_nplanes() 
                self%temp_ion(jp, ip) =  self%temp_ion(jp, ip) / (self%nt_av + 1) 
            enddo
        enddo
        !$OMP END DO
        !$OMP END PARALLEL
        
        ! Average electrostatic potential on spline grid at OMP
        !$OMP PARALLEL DO PRIVATE(k)
        do k = 1, self%nrho_sp
            tmp_sp(k) = pot%vals(self%indmesh_sp(k))
        enddo
        !$OMP END PARALLEL DO
        
        call MPI_allreduce(MPI_IN_PLACE, tmp_sp, self%nrho_sp, &
            MPI_FP, MPI_SUM, comm_handler%get_comm_planes(), ierr)
        
        !$OMP PARALLEL PRIVATE(k, ip, rho, kc, df, dft)
        !$OMP DO
        do k = 1, self%nrho_sp
            self%pot_sp(k) = self%pot_sp(k) * self%nt_av + &
                    self%ti_nrm * tmp_sp(k) / comm_handler%get_nplanes()
            self%pot_sp(k) = self%pot_sp(k) / (self%nt_av + 1) 
        enddo
        !$OMP END DO
        
        ! Electrostatic potential on iol grid constant along theta
        ! For consistency with spline grid use nearest neighbour interpolation 
        ! from pot_sp
        !$OMP DO
        do ip = 1, self%imax
            rho = polars%grid%get_rho(ip)
            df = GP_LARGEST
            kc = 0
            do k = 1, self%nrho_sp
                dft = abs(rho-self%rhopts_sp(k))
                if (dft < df) then
                    df = dft
                    kc = k
                endif
            enddo
            self%pot(:,ip) = self%pot_sp(kc)
        enddo
        !$OMP END DO
        !$OMP END PARALLEL
        
        ! Advance averaging number
        self%nt_av = self%nt_av + 1

    end subroutine
    
    module subroutine update_iol_source(self, comm_handler, equi, mesh, polars)
        class(iol_source_t), intent(inout) :: self
        type(comm_handler_t), intent(in) :: comm_handler
        class(equilibrium_t), intent(inout) :: equi
        type(mesh_cart_t), intent(inout) :: mesh
        type(polars_t), intent(in) :: polars
        
        real(GP), dimension(self%jmax, self%imax) :: Npers
        real(GP), dimension(mesh%get_n_points()) :: npers_cart
        
        integer :: iplane, ierr, ki, l, ip
                                             
        Npers       = 0.0_GP        
        iplane =  comm_handler%get_plane()
        if (iplane == 0) then
            ! For memory reasons only execute on iplane=0 
            ! (computation is identical for all ranks)
        
            call iol_eval(self%imax, self%jmax, self%signBt, self%signIp, &
                          self%nrho_sp, self%xplane_n, &
                          self%RX, self%zX, self%BphiX, self%psirange_conf_red, &
                          self%idx_imp, self%idx_X, &
                          self%invs_aspc, self%R_magaxis, &
                          self%Rmin_m, self%Rmin_b, &
                          self%mass, self%charge, &
                          self%kmax, self%lmax, self%mmax, &
                          self%er_mult, self%ti_mult, &
                          self%rhopts_sp, &
                          self%Rmincontour_sp, &
                          self%pot_sp, &
                          self%sep_Rcoord, &            
                          self%sep_cw, &
                          self%sep_cw_mod, &
                          self%sep_ccw, &
                          self%sep_ccw_mod, &
                          self%Rcoord, &
                          self%zcoord, &
                          self%Babs, &
                          self%Bphi, &
                          self%psi, &
                          self%temp_ion, &
                          self%pot, &
                          self%safetyf, &
                          self%dens, &
                          self%xplane_R, &
                          self%xplane_psi, &
                          self%xplane_Babs, &
                          self%xplane_Bphi, &
                          Npers, &
                          dbgout = .false. )

            ! Flip indices of Npers back for flipped equilibra    
            select type(equi)
            type is(numerical_t)
                if (equi%flip_z) then
                    do ip = 1, self%imax
                        call flip_array_direction(self%jmax, Npers(:,ip))
                    enddo
                endif
            end select

        endif      
                           
        call MPI_Bcast(Npers, self%imax*self%jmax, MPI_FP, 0, &
            comm_handler%get_comm_planes(), ierr )
        
        ! Map source from IOL=polar grid to Cartesian grid (inner points)
        call map_polar_to_cart(equi, mesh, polars, Npers, npers_cart, 1)
        !$OMP PARALLEL DO PRIVATE(ki, l)
        do ki = 1, mesh%get_n_points_inner()
            l = mesh%inner_indices(ki)
            self%src_iol(ki) = npers_cart(l) * self%tau_nrm / self%ne_nrm
        enddo
        !$OMP END PARALLEL DO
        
    end subroutine
    
end submodule