submodule(init_braginskii_m) init_braginskii_profile_s !! Routines for initialisation of variables with profile use communication_planes_m, only : start_comm_planes, & finalize_comm_planes use mpi_mapping_auxiliaries_m, only : getdata_fwdbwdplane use solver_aligned3d_m, only : solve_aligned3d_adjoint, params_solver_aligned3d_t use descriptors_braginskii_m, only : BND_TYPE_NEUMANN, BND_TYPE_DIRICHLET_ZERO, & BND_BRAGTYPE_FLOATING_POTENTIAL_LOCAL, & BND_BRAGTYPE_ZONAL_NEUMANN use descriptors_m, only : DISTRICT_CLOSED, DISTRICT_CORE use boundaries_perp_m, only : extrapolate_ghost_points use helmholtz_operator_m, only : helmholtz_single_inner use helmholtz_solver_m, only : helmholtz_solver_t use helmholtz_solver_factory_m, only : helmholtz_solver_factory use solver3d_m, only : solver3d_t use solver3d_factory_m, only : solver3d_factory use interpolation_m, only : interpol1d use polar_operators_m, only : surface_average ! Parameters use params_brag_model_m, only : & rhos, tratio use params_brag_boundaries_perp_m, only : & bnddescr_pot_core use params_brag_boundaries_parpen_m, only : & pen_epsinv, sheath_potential_lambda_sh implicit none contains module subroutine init_state_profile(comm_handler, & equi, equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, & hsolver_cano, hsolver_stag, & map, & penalisation_cano, penalisation_stag, & polars_cano, polars_stag, & opsinplane_cano, opsinplane_stag, & filename, & boundaries, & ne, pot, vort, upar, jpar, apar, te, ti) type(comm_handler_t), intent(in) :: comm_handler class(equilibrium_t), intent(inout) :: equi class(equilibrium_storage_t), intent(in) :: equi_on_cano class(equilibrium_storage_t), intent(in) :: equi_on_stag type(mesh_cart_t), intent(inout) :: mesh_cano type(mesh_cart_t), intent(inout) :: mesh_stag class(helmholtz_solver_t), intent(inout) :: hsolver_cano class(helmholtz_solver_t), intent(inout) :: hsolver_stag type(parallel_map_t), intent(in) :: map type(penalisation_t), intent(in) :: penalisation_cano type(penalisation_t), intent(in) :: penalisation_stag type(polars_t), intent(in) :: polars_cano type(polars_t), intent(in) :: polars_stag type(inplane_operators_t), intent(in) :: opsinplane_cano type(inplane_operators_t), intent(in) :: opsinplane_stag character(len=*), intent(in) :: filename type(boundaries_braginskii_t), intent(inout) :: boundaries type(variable_t), intent(inout) :: ne type(variable_t), intent(inout) :: pot type(variable_t), intent(inout) :: vort type(variable_t), intent(inout) :: upar type(variable_t), intent(inout) :: jpar type(variable_t), intent(inout) :: apar type(variable_t), intent(inout) :: te type(variable_t), intent(inout) :: ti integer :: io_error character(len=256) :: io_errmsg real(GP) :: rho_ped_n, rho_sol_n, val_ped_n, val_sol_n, flucamp_n, & rho_ped_te, rho_sol_te, val_ped_te, val_sol_te, flucamp_te, & rho_ped_ti, rho_sol_ti, val_ped_ti, val_sol_ti, flucamp_ti ! For parallel ion velocity (time stepping) integer :: par_initupar_nt real(GP) :: par_initupar_dtau real(GP) :: jpar_resmax, upar_resmax integer :: jpar_maxiter, jpar_nrestart, jpar_dbgout, & upar_maxiter, upar_nrestart, upar_dbgout character(len=16) :: jpar_solver, jpar_krylov_method, upar_solver, upar_krylov_method integer :: ki, l, nseed, plane, district real(GP) :: phi_cano, x, y, rho, rnd_val, cs_sol integer, allocatable, dimension(:) :: seed namelist / init_profile / rho_ped_n, rho_sol_n, val_ped_n, val_sol_n, flucamp_n, & rho_ped_te, rho_sol_te, val_ped_te, val_sol_te, flucamp_te, & rho_ped_ti, rho_sol_ti, val_ped_ti, val_sol_ti, flucamp_ti, & par_initupar_nt, par_initupar_dtau, & jpar_solver, jpar_krylov_method, & jpar_resmax, jpar_maxiter, jpar_nrestart, jpar_dbgout, & upar_solver, upar_krylov_method, & upar_resmax, upar_maxiter, upar_nrestart, upar_dbgout ! Read parameters of init_profile ------------------------------------- open(unit = 20, file = filename, status = 'old', action = 'read',& iostat = io_error, iomsg = io_errmsg) if (io_error /= 0) then call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, __LINE__, __FILE__) endif ! default values rho_ped_n = equi%rhomin rho_sol_n = 1.0_GP val_ped_n = 1.0_GP val_sol_n = 1.0E-1_GP flucamp_n = 1.0E-5_GP rho_ped_te = equi%rhomin rho_sol_te = 1.0_GP val_ped_te = 1.0_GP val_sol_te = 1.0E-1_GP flucamp_te = 1.0E-5_GP rho_ped_ti = equi%rhomin rho_sol_ti = 1.0_GP val_ped_ti = 1.0_GP val_sol_ti = 1.0E-1_GP flucamp_ti = 1.0E-5_GP par_initupar_nt = 50 par_initupar_dtau = 1.0_GP jpar_solver = 'PIM' jpar_krylov_method = 'RGMRES' jpar_resmax = 1.0E-8_GP jpar_maxiter = 50 jpar_nrestart = 50 jpar_dbgout = 0 upar_solver = 'PIM' upar_krylov_method = 'RGMRES' upar_resmax = 1.0E-8_GP upar_maxiter = 10 upar_nrestart = 10 upar_dbgout= 0 read(20, nml = init_profile, iostat = io_error, iomsg = io_errmsg) if (io_error /= 0) then call handle_error(io_errmsg, GRILLIX_ERR_NAMELIST, __LINE__, __FILE__) endif if (is_rank_info_writer) then write(get_stdout(),*)'' write(get_stdout(),*)'Creating profile as initial state --------------------------------------' write(get_stdout(),202)rho_ped_n, rho_sol_n, val_ped_n, val_sol_n, flucamp_n, & rho_ped_te, rho_sol_te, val_ped_te, val_sol_te, flucamp_te, & rho_ped_ti, rho_sol_ti, val_ped_ti, val_sol_ti, flucamp_ti, & par_initupar_nt, par_initupar_dtau, & jpar_solver, jpar_krylov_method, jpar_resmax, jpar_maxiter, jpar_nrestart, jpar_dbgout, & upar_solver, upar_krylov_method, upar_resmax, upar_maxiter, upar_nrestart, upar_dbgout 202 FORMAT(1X,'rho_ped_n = ',ES14.6E3 /, & 1X,'rho_sol_n = ',ES14.6E3 /, & 1X,'val_ped_n = ',ES14.6E3 /, & 1X,'val_sol_n = ',ES14.6E3 /, & 1X,'flucamp_n = ',ES14.6E3 /, & 1X,'rho_ped_te = ',ES14.6E3 /, & 1X,'rho_sol_te = ',ES14.6E3 /, & 1X,'val_ped_te = ',ES14.6E3 /, & 1X,'val_sol_te = ',ES14.6E3 /, & 1X,'flucamp_te = ',ES14.6E3 /, & 1X,'rho_ped_ti = ',ES14.6E3 /, & 1X,'rho_sol_ti = ',ES14.6E3 /, & 1X,'val_ped_ti = ',ES14.6E3 /, & 1X,'val_sol_ti = ',ES14.6E3 /, & 1X,'flucamp_ti = ',ES14.6E3 /, & 1X,'par_initupar_nt = ',I8 /, & 1X,'par_initupar_dtau= ',ES14.6E3 /, & 1X,'jpar_solver = ',A16 /, & 1X,'jpar_krylov_method = ',A16 /, & 1X,'jpar_resmax = ',ES14.6E3 /, & 1X,'jpar_maxiter = ',I8 /, & 1X,'jpar_nrestart = ',I8 /, & 1X,'jpar_dbgout = ',I8 /, & 1X,'upar_solver = ',A16 /, & 1X,'upar_krylov_method = ',A16 /, & 1X,'upar_resmax = ',ES14.6E3 /, & 1X,'upar_maxiter = ',I8 /, & 1X,'upar_nrestart = ',I8 /, & 1X,'upar_dbgout = ',I8 ,X ) endif close(20) phi_cano = mesh_cano%get_phi() ! Initialise variables call ne%init(comm_handler, mesh_cano%get_n_points(), staggered = .false.) call pot%init(comm_handler, mesh_cano%get_n_points(), staggered = .false.) call vort%init(comm_handler, mesh_cano%get_n_points(), staggered = .false.) call upar%init(comm_handler, mesh_stag%get_n_points(), staggered = .true.) call jpar%init(comm_handler, mesh_stag%get_n_points(), staggered = .true.) call apar%init(comm_handler, mesh_stag%get_n_points(), staggered = .true.) call te%init(comm_handler, mesh_cano%get_n_points(), staggered = .false.) call ti%init(comm_handler, mesh_cano%get_n_points(), staggered = .false.) if (is_rank_info_writer) then write(get_stdout(),*)' ...setting up profiles for ne, te, ti' endif ! Set profiles for ne, te, ti -------------------------------------------- !$omp parallel default(none) private(l, x, y, rho, district) & !$omp shared(equi, mesh_cano, phi_cano, ne, te, ti, & !$omp rho_ped_n, rho_sol_n, val_ped_n, val_sol_n, & !$omp rho_ped_te, rho_sol_te, val_ped_te, val_sol_te, & !$omp rho_ped_ti, rho_sol_ti, val_ped_ti, val_sol_ti) !$omp do do l = 1, mesh_cano%get_n_points() x = mesh_cano%get_x(l) y = mesh_cano%get_y(l) rho = equi%rho(x, y, phi_cano) district = equi%district(x, y, phi_cano) if ((district == DISTRICT_CLOSED) .or. (district == DISTRICT_CORE)) then ne%vals(l) = profile_sin(rho_ped_n, rho_sol_n, val_ped_n, val_sol_n, rho) te%vals(l) = profile_sin(rho_ped_te, rho_sol_te, val_ped_te, val_sol_te, rho) ti%vals(l) = profile_sin(rho_ped_ti, rho_sol_ti, val_ped_ti, val_sol_ti, rho) else ne%vals(l) = val_sol_n te%vals(l) = val_sol_te ti%vals(l) = val_sol_ti endif enddo !$omp end do !$omp end parallel ! Compute potentil ------------------------------------------------------ if (is_rank_info_writer) then write(get_stdout(),*)' ...initialising electrostatic potential' endif call compute_pot_init_profile(comm_handler, equi, equi_on_cano, mesh_cano, hsolver_cano, polars_cano, & boundaries, & ne%vals, te%vals, ti%vals, pot%vals) ! Compute parallel current------------------------------------------------ if (is_rank_info_writer) then write(get_stdout(),*)' ...initialising parallel current' endif call compute_jpar_init_profile(comm_handler, equi, equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, map, & opsinplane_cano, opsinplane_stag, boundaries, & ne%vals, te%vals, ti%vals, jpar%vals, & jpar_solver, jpar_krylov_method, & jpar_resmax, jpar_maxiter, jpar_nrestart, jpar_dbgout) ! Compute parallel electromagnetic potential ----------------------------- if (is_rank_info_writer) then write(get_stdout(),*)' ...initialising parallel electromagnetic potential' endif call compute_apar_init_profile(equi, mesh_stag, hsolver_stag, boundaries, jpar%vals, apar%vals) ! Compute parallel ion velocity ------------------------------------------ if (is_rank_info_writer) then write(get_stdout(),*)' ...initialising parallel ion velocity' endif cs_sol = sqrt(val_sol_te + tratio * val_sol_ti) call compute_upar_init_profile(comm_handler, & equi, equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, & map, & penalisation_cano, penalisation_stag, & par_initupar_nt, par_initupar_dtau, boundaries, & cs_sol, upar%vals, & upar_solver, upar_krylov_method, & upar_resmax, upar_maxiter, upar_nrestart, upar_dbgout) ! Add fluctuations to ne, te, ti ----------------------------------------- ! create seed for random number generator plane = comm_handler%get_plane() call random_seed(size = nseed) allocate(seed(nseed)) seed(1:nseed) = plane call random_seed(put = seed) !$omp parallel default(none) private(ki, l, rnd_val) & !$omp shared(mesh_cano, ne, te, ti, flucamp_n, flucamp_te, flucamp_ti) !$omp do do ki = 1, mesh_cano%get_n_points_inner() l = mesh_cano%inner_indices(ki) call random_number(rnd_val) ne%vals(l) = ne%vals(l) + flucamp_n*sin(TWO_PI*rnd_val) call random_number(rnd_val) te%vals(l) = te%vals(l) + flucamp_te*sin(TWO_PI*rnd_val) call random_number(rnd_val) ti%vals(l) = ti%vals(l) + flucamp_ti*sin(TWO_PI*rnd_val) enddo !$omp end do !$omp end parallel if (is_rank_info_writer) then write(get_stdout(),*)'Profile (initial state) set up -----------------------------------------' write(get_stdout(),*)'' endif end subroutine pure real(GP) function profile_sin(rho_ped, rho_sol, val_ped, val_sol, rho) !! Returns profile based on sin function, i.e. real(GP), intent(in) :: rho_ped !! Radial position of pedestal top real(GP), intent(in) :: rho_sol !! Radial position of pedestal bottom real(GP), intent(in) :: val_ped !! Value at pedestal top real(GP), intent(in) :: val_sol !! Value at pedestal bottom real(GP), intent(in) :: rho !! Radial position real(GP) :: amp, offset, omega, phase if (rho < rho_ped) then profile_sin = val_ped elseif (rho > rho_sol) then profile_sin = val_sol else amp = (val_ped - val_sol) / 2.0_GP offset = val_ped - amp omega = PI / ( rho_sol - rho_ped ) phase = - 0.5_GP*PI * ( rho_sol + rho_ped ) / ( rho_sol - rho_ped ) profile_sin = -amp * sin(omega*rho + phase) + offset endif end function subroutine compute_pot_init_profile(comm_handler, equi, equi_on_cano, mesh_cano, hsolver_cano, polars_cano, & boundaries, & nev, tev, tiv, potv) !! Computes initial state for potential based on generalised vorticity = 0 type(comm_handler_t), intent(in) :: comm_handler !! Communicators class(equilibrium_t), intent(inout) :: equi !! Equilibrium class(equilibrium_storage_t), intent(in) :: equi_on_cano !! Equilibrim quantities on canonical mesh type(mesh_cart_t), intent(inout) :: mesh_cano !! Mesh (canonical) class(helmholtz_solver_t), intent(inout) :: hsolver_cano !! Elliptic (2D) solver on canonical mesh type(polars_t), intent(in) :: polars_cano !! Polar grid and operators (canonical) type(boundaries_braginskii_t), intent(in) :: boundaries !! Boundary information real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: nev !! Values of density real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: tev !! Values of electron temperature real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: tiv !! Values of ion temperature real(GP), dimension(mesh_cano%get_n_points()), intent(inout) :: potv !! Values of electrostatic potential (on input initial guess) integer, parameter :: intorder=3 integer :: l, ki, kb, kg, nrho, district real(GP) :: fac, phi_cano, x, y, drho, rho_core_rel, pot_core real(GP), dimension(mesh_cano%get_n_points()) :: co, codia, piv, rhs real(GP), dimension(mesh_cano%get_n_points_inner()) :: xi, lambda integer :: sinfo real(GP) :: res real(GP), dimension(polars_cano%grid%get_nrho()) :: pot_zon fac = rhos**2 phi_cano = mesh_cano%get_phi() ! Set up coefficients and right hand side !$omp parallel default(none) private(ki, kb, kg, l, x, y) & !$omp shared(equi, equi_on_cano, mesh_cano, phi_cano, & !$omp fac, tratio, sheath_potential_lambda_sh, boundaries, & !$omp nev, tiv, tev, co, codia, piv, lambda, xi, rhs) !$omp do do l = 1, mesh_cano%get_n_points() x = mesh_cano%get_x(l) y = mesh_cano%get_y(l) co(l) = nev(l) / equi_on_cano%btor(l)**2 * equi%jacobian(x, y, phi_cano) codia(l) = co(l) / nev(l) piv(l) = nev(l) * tiv(l) enddo !$omp end do !$omp do do ki = 1, mesh_cano%get_n_points_inner() l = mesh_cano%inner_indices(ki) x = mesh_cano%get_x(l) y = mesh_cano%get_y(l) lambda(ki) = 0.0_GP xi(ki) = fac / equi%jacobian(x, y, phi_cano) rhs(l) = - tratio * helmholtz_single_inner(mesh_cano, piv, l, codia, xi(ki)) enddo !$omp end do !$omp do do kb = 1, mesh_cano%get_n_points_boundary() l = mesh_cano%boundary_indices(kb) select case(boundaries%pot%types(kb)) case(BND_BRAGTYPE_FLOATING_POTENTIAL_LOCAL) rhs(l) = sheath_potential_lambda_sh * tev(l) case default rhs(l) = 0.0_GP end select enddo !$omp end do !$omp do do kg = 1, mesh_cano%get_n_points_ghost() l = mesh_cano%ghost_indices(kg) rhs(l) = 0.0_GP enddo !$omp end do !$omp end parallel call hsolver_cano%update(co, lambda, xi, & bnd_type_core=BND_TYPE_NEUMANN, & bnd_type_wall=BND_TYPE_DIRICHLET_ZERO, & bnd_type_dome=BND_TYPE_DIRICHLET_ZERO, & bnd_type_out=BND_TYPE_DIRICHLET_ZERO) call hsolver_cano%solve(rhs, potv, res, sinfo) if (sinfo < 0) then call handle_error('Solve for potential at initial state initialisation failed', & GRILLIX_ERR_SOLVER2D, __LINE__, __FILE__, & error_info_t('Sinfo: ',[sinfo])) endif ! re-solve for zonal Neumann boundary condition with obtained zonal average value at core as Dirichlet condition at core if (bnddescr_pot_core == BND_BRAGTYPE_ZONAL_NEUMANN) then ! Compute zonal value of potential at core nrho = polars_cano%grid%get_nrho() drho = polars_cano%grid%get_drho() call surface_average(comm_handler%get_comm_planes(), mesh_cano, polars_cano, potv, pot_zon) rho_core_rel = equi%rhomin - polars_cano%grid%get_rhopol_min() pot_core = interpol1d(intorder, nrho, drho, pot_zon, rho_core_rel) !$omp parallel default(none) private(kb, l, district) & !$omp shared(mesh_cano, rhs, pot_core) !$omp do do kb = 1, mesh_cano%get_n_points_boundary() l = mesh_cano%boundary_indices(kb) district = mesh_cano%get_district(l) if (district == DISTRICT_CORE) then rhs(l) = pot_core endif enddo !$omp end do !$omp end parallel call hsolver_cano%update(co, lambda, xi, & bnd_type_core=BND_TYPE_DIRICHLET_ZERO, & bnd_type_wall=BND_TYPE_DIRICHLET_ZERO, & bnd_type_dome=BND_TYPE_DIRICHLET_ZERO, & bnd_type_out=BND_TYPE_DIRICHLET_ZERO) call hsolver_cano%solve(rhs, potv, res, sinfo) if (sinfo < 0) then call handle_error('Re-solve for potential at initial state initialisation failed', & GRILLIX_ERR_SOLVER2D, __LINE__, __FILE__, & error_info_t('Sinfo: ',[sinfo])) endif endif !$omp parallel default(none) shared(mesh_cano, potv) call extrapolate_ghost_points(mesh_cano, potv) !$omp end parallel end subroutine subroutine compute_jpar_init_profile(comm_handler, equi, equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, map, & opsinplane_cano, opsinplane_stag, boundaries, & nev, tev, tiv, jparv, & jpar_solver, jpar_krylov_method, jpar_resmax, & jpar_maxiter, jpar_nrestart, jpar_dbgout) !! Computes initial state for parallel current according to Pfirsch-Schlueter type(comm_handler_t), intent(in) :: comm_handler !! COmmunicators class(equilibrium_t), intent(inout) :: equi !! Equilibrium class(equilibrium_storage_t), intent(in) :: equi_on_cano !! Equilibrim quantities on canonical mesh class(equilibrium_storage_t), intent(in) :: equi_on_stag !! Equilibrim quantities on staggered mesh type(mesh_cart_t), intent(inout) :: mesh_cano !! Mesh (canonical) type(mesh_cart_t), intent(inout) :: mesh_stag !! Mesh (staggered) type(parallel_map_t), intent(in) :: map !! Map type(inplane_operators_t), intent(in) :: opsinplane_cano !! In-plane operators (canonical) type(inplane_operators_t), intent(in) :: opsinplane_stag !! In-plane operators (staggered) type(boundaries_braginskii_t), intent(in) :: boundaries !! Boundary information real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: nev !! Values of density real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: tev !! Values of electron temperature real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: tiv !! Values of ion temperature real(GP), dimension(mesh_stag%get_n_points()), intent(inout) :: jparv !! Values of parallel current (on input initial guess) character(len=*), intent(in) :: jpar_solver !! Type of parallel solver character(len=*), intent(in) :: jpar_krylov_method !! Krylov method to be used as solver real(GP), intent(in) :: jpar_resmax !! Maximum allowed residual (stopping criterion), default 1E-8 integer, intent(in) :: jpar_maxiter !! Maximum number of (outer) iterations, default = 10 integer, intent(in) :: jpar_nrestart !! Maximum number of iterations before restart (~inner iterations) integer, intent(in) :: jpar_dbgout !! Debug output level integer :: ki, kb, kg, l, district, niter, np_cano_fwd real(GP) :: res, true_res real(GP), allocatable, dimension(:) :: rhs, ptot, ptot_halo, ptot_stag class(solver3d_t), allocatable :: solver3d ! Compute total pressure allocate(ptot(mesh_cano%get_n_points())) !$omp parallel default(none) private(l) & !$omp shared(mesh_cano, tratio, nev, tev, tiv, ptot) !$omp do do l = 1, mesh_cano%get_n_points() ptot(l) = nev(l) * (tev(l) + tratio * tiv(l)) enddo !$omp end do !$omp end parallel if (equi%is_axisymmetric()) then ! Compute divergence of diamagnetic curvature (=rhs) on canonical mesh allocate(rhs(mesh_cano%get_n_points())) !$omp parallel default(none) private(ki, l, district) & !$omp shared(mesh_cano, opsinplane_cano, ptot, rhs, jparv) !$omp do do ki = 1, mesh_cano%get_n_points_inner() l = mesh_cano%inner_indices(ki) district = mesh_cano%get_district(l) if (district == DISTRICT_CLOSED) then rhs(l) = opsinplane_cano%curvature(ptot, l) else rhs(l) = 0.0_GP endif jparv(l) = 0.0_GP enddo !$omp end do !$omp do do kb = 1, mesh_cano%get_n_points_boundary() l = mesh_cano%boundary_indices(kb) rhs(l) = 0.0_GP jparv(l) = 0.0_GP enddo !$omp end do !$omp do do kg = 1, mesh_cano%get_n_points_ghost() l = mesh_cano%ghost_indices(kg) rhs(l) = 0.0_GP jparv(l) = 0.0_GP enddo !$omp end do !$omp end parallel deallocate(ptot) ! Initialise 3d solver call solver3d_factory(jpar_solver, solver3d) call solver3d%create(comm_handler%get_comm_planes(), & krylov_method=jpar_krylov_method, & ndim_loc=mesh_stag%get_n_points(), & resmax=jpar_resmax, & matvec=matvec_divpar, & maxiter=jpar_maxiter, & nrestart=jpar_nrestart, & dbgout=jpar_dbgout) else ! Compute divergence of diamagnetic current (=rhs)on staggered mesh call getdata_fwdbwdplane(comm_handler%get_comm_planes(), & 1, mesh_cano%get_n_points(), & ptot, np_cano_fwd, ptot_halo) allocate(ptot_stag(mesh_stag%get_n_points())) !$omp parallel default(none) shared(map, ptot, ptot_halo, ptot_stag) call map%lift_cano_to_stag(ptot, ptot_halo, ptot_stag) !$omp end parallel deallocate(ptot_halo) deallocate(ptot) allocate(rhs(mesh_stag%get_n_points())) !$omp parallel default(none) private(ki, l, district) & !$omp shared(mesh_stag, opsinplane_stag, ptot_stag, rhs, jparv) !$omp do do ki = 1, mesh_stag%get_n_points_inner() l = mesh_stag%inner_indices(ki) district = mesh_stag%get_district(l) if (district == DISTRICT_CLOSED) then rhs(l) = opsinplane_stag%curvature(ptot_stag, l) else rhs(l) = 0.0_GP endif jparv(l) = 0.0_GP enddo !$omp end do !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) rhs(l) = 0.0_GP jparv(l) = 0.0_GP enddo !$omp end do !$omp do do kg = 1, mesh_stag%get_n_points_ghost() l = mesh_stag%ghost_indices(kg) rhs(l) = 0.0_GP jparv(l) = 0.0_GP enddo !$omp end do !$omp end parallel deallocate(ptot_stag) ! Initialise 3d solver call solver3d_factory(jpar_solver, solver3d) call solver3d%create(comm_handler%get_comm_planes(), & krylov_method=jpar_krylov_method, & ndim_loc=mesh_stag%get_n_points(), & resmax=jpar_resmax, & matvec=matvec_divpar_centered, & maxiter=jpar_maxiter, & nrestart=jpar_nrestart, & dbgout=jpar_dbgout) endif ! Perform solve call solver3d%solve(comm_handler%get_comm_planes(), & rhs, jparv, res, niter, true_res) deallocate(solver3d) deallocate(rhs) if (niter < 0) then call handle_error('Solver for jpar at initial state initialisation did not converge', & GRILLIX_WRN_INITIALISATION, __LINE__, __FILE__, & error_info_t('niter, res, true_res: ',[niter],[res, true_res])) endif contains subroutine matvec_divpar(u, v, ipar) !! Applies parallel divergence v = div_par(u) !! u is located on canonical mesh and v on staggered mesh. !! Used for for axisymmetric case, !! where number of points for mesh_cano and mesh_stag are equal real(GP), dimension(*) :: u real(GP), dimension(*) :: v integer :: ipar(*) integer :: np_stag_bwd, ki, kb, kg, l real(GP), allocatable, dimension(:) :: uhalo ! MPI communication with the backward staggered plane call getdata_fwdbwdplane(comm_handler%get_comm_planes(), & -1, mesh_stag%get_n_points(), & u, np_stag_bwd, uhalo) !$omp parallel default(none) private(ki, kb, kg, l, district) & !$omp shared(mesh_cano, map, u, v, uhalo) !$omp do do ki = 1, mesh_cano%get_n_points_inner() l = mesh_cano%inner_indices(ki) district = mesh_cano%get_district(l) if (district == DISTRICT_CLOSED) then v(l) = map%par_divb_single(u, uhalo, l) else v(l) = u(l) endif enddo !$omp end do !$omp do do kb = 1, mesh_cano%get_n_points_boundary() l = mesh_cano%boundary_indices(kb) v(l) = u(l) enddo !$omp end do !$omp do do kg = 1, mesh_cano%get_n_points_ghost() l = mesh_cano%ghost_indices(kg) v(l) = u(l) enddo !$omp end do !$omp end parallel deallocate(uhalo) end subroutine subroutine matvec_divpar_centered(u, v, ipar) !! Applies parallel divergence v = div_par(u) !! Both u and v are located on staggered mesh. !! v = B*grad_par(u/B) via central finite difference along magnetic field lines !! Used for for non-axisymmetric case, !! where number of points for mesh_cano and mesh_stag are different real(GP), dimension(*) :: u real(GP), dimension(*) :: v integer :: ipar(*) integer :: np_stag_fwd, np_stag_bwd, ki, kb, kg, l real(GP), dimension(mesh_stag%get_n_points()) :: ub real(GP), allocatable, dimension(:) :: uhalo_bwd, uhalo_fwd !$omp parallel default(none) private(l) shared(equi_on_stag, mesh_stag, u, ub) !$omp do do l = 1, mesh_stag%get_n_points() ub(l) = u(l) / equi_on_stag%absb(l) enddo !$omp end do !$omp end parallel ! MPI communication with the backward and forward staggered plane call getdata_fwdbwdplane(comm_handler%get_comm_planes(), & -1, mesh_stag%get_n_points(), & ub, np_stag_bwd, uhalo_bwd) call getdata_fwdbwdplane(comm_handler%get_comm_planes(), & 1, mesh_stag%get_n_points(), & ub, np_stag_fwd, uhalo_fwd) !$omp parallel default(none) private(ki, kb, kg, l, district) & !$omp shared(equi_on_stag, mesh_stag, map, u, v, uhalo_bwd, uhalo_fwd) !$omp do do ki = 1, mesh_stag%get_n_points_inner() l = mesh_stag%inner_indices(ki) district = mesh_stag%get_district(l) if (district == DISTRICT_CLOSED) then v(l) = ( map%upstream_stag_from_stag_fwd_single(uhalo_fwd, l) & - map%upstream_stag_from_stag_bwd_single(uhalo_bwd, l) ) & / (map%dpar_stag_stag_fwd(l) + map%dpar_stag_stag_bwd(l)) v(l) = v(l) * equi_on_stag%absb(l) else v(l) = u(l) endif enddo !$omp end do !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) v(l) = u(l) enddo !$omp end do !$omp do do kg = 1, mesh_stag%get_n_points_ghost() l = mesh_stag%ghost_indices(kg) v(l) = u(l) enddo !$omp end do !$omp end parallel deallocate(uhalo_bwd) deallocate(uhalo_fwd) end subroutine end subroutine subroutine compute_apar_init_profile(equi, mesh_stag, hsolver_stag, boundaries, jparv, aparv) !! Computes initial state for potential based on generalised vorticity = 0 class(equilibrium_t), intent(inout) :: equi !! Equilibrium type(mesh_cart_t), intent(inout) :: mesh_stag !! Mesh (staggered) class(helmholtz_solver_t), intent(inout) :: hsolver_stag !! Elliptic (2D) solver on staggered mesh type(boundaries_braginskii_t), intent(in) :: boundaries !! Boundary information real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: jparv !! Values of parallel current real(GP), dimension(mesh_stag%get_n_points()), intent(inout) :: aparv !! Values of parallel electromagnetic potential (on input initial guess) integer :: ki, kb, kg, l, sinfo real(GP) :: fac, phi_stag, x, y, res real(GP), dimension(mesh_stag%get_n_points()) :: rhs, co real(GP), dimension(mesh_stag%get_n_points_inner()) :: xi, lambda fac = rhos**2 phi_stag = mesh_stag%get_phi() ! Set up coefficients and right hand side !$omp parallel default(none) private(ki, kb, kg, l, x, y) & !$omp shared(equi, mesh_stag, phi_stag, fac, & !$omp lambda, xi, co, rhs, jparv) !$omp do do ki = 1, mesh_stag%get_n_points_inner() l = mesh_stag%inner_indices(ki) x = mesh_stag%get_x(l) y = mesh_stag%get_y(l) lambda(ki) = 0.0_GP xi(ki) = fac / equi%jacobian(x, y, phi_stag) co(l) = equi%jacobian(x, y, phi_stag) rhs(l) = jparv(l) enddo !$omp end do !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) x = mesh_stag%get_x(l) y = mesh_stag%get_y(l) co(l) = equi%jacobian(x, y, phi_stag) rhs(l) = 0.0_GP enddo !$omp end do !$omp do do kg = 1, mesh_stag%get_n_points_ghost() l = mesh_stag%ghost_indices(kg) x = mesh_stag%get_x(l) y = mesh_stag%get_y(l) co(l) = equi%jacobian(x, y, phi_stag) rhs(l) = 0.0_GP enddo !$omp end do !$omp end parallel call hsolver_stag%update(co, lambda, xi, & bnd_type_core=BND_TYPE_DIRICHLET_ZERO, & bnd_type_wall=BND_TYPE_DIRICHLET_ZERO, & bnd_type_dome=BND_TYPE_DIRICHLET_ZERO, & bnd_type_out=BND_TYPE_DIRICHLET_ZERO) call hsolver_stag%solve(rhs, aparv, res, sinfo) if (sinfo < 0) then call handle_error('Solver for apar at initial state initialisation did not converge', & GRILLIX_ERR_SOLVER2D, __LINE__, __FILE__, & error_info_t('Sinfo: ',[sinfo])) endif end subroutine subroutine compute_upar_init_profile(comm_handler, & equi, equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, & map, & penalisation_cano, penalisation_stag, & par_initupar_nt, par_initupar_dtau, boundaries, & cs_sol, uparv, & upar_solver, upar_krylov_method, & upar_resmax, upar_maxiter, upar_nrestart, upar_dbgout) !! Computes initial state for parallel ion velocity !! A diffusion of the parallel velocity along magnetic field lines is performed type(comm_handler_t), intent(in) :: comm_handler !! Communicators class(equilibrium_t), intent(inout) :: equi !! Equilibrium class(equilibrium_storage_t), intent(in) :: equi_on_cano !! Equilibrim quantities on canonical mesh class(equilibrium_storage_t), intent(in) :: equi_on_stag !! Equilibrim quantities on staggered mesh type(mesh_cart_t), intent(inout) :: mesh_cano !! Mesh (canonical) type(mesh_cart_t), intent(inout) :: mesh_stag !! Mesh (staggered) type(parallel_map_t), intent(in) :: map !! Parallel map type(penalisation_t), intent(in) :: penalisation_cano !! Penalisation (canonical) type(penalisation_t), intent(in) :: penalisation_stag !! Penalisation (staggered) integer, intent(in) :: par_initupar_nt !! Number of timesteps to be performed for diffusion on parallel velocity real(GP), intent(in) :: par_initupar_dtau !! Size of timestep timestep for diffusion on parallel velocity type(boundaries_braginskii_t), intent(in) :: boundaries !! Boundary information real(GP), intent(in) :: cs_sol !! Sound speed at boundaries real(GP), dimension(mesh_stag%get_n_points()), intent(inout) :: uparv !! Values of parallel ion velocity (on input initial guess) character(len=*), intent(in) :: upar_solver !! Type of solver for upar character(len=*), intent(in) :: upar_krylov_method !! Krylov method to be used real(GP), intent(in) :: upar_resmax !! Maximum allowed residual (stopping criterion), default 1E-8 integer, intent(in) :: upar_maxiter !! Maximum number of (outer) iterations, default = 10 integer, intent(in) :: upar_nrestart !! Maximum number of iterations before restart (~inner iterations) integer, intent(in) :: upar_dbgout !! Debug output level integer :: ki, kb, kg, l, it real(GP) :: penchar, pendir, bohm_cs real(GP), dimension(mesh_stag%get_n_points()) :: rhs, lambda_par, xi_par integer, dimension(mesh_stag%get_n_points_inner()) :: bnd_descrs type(params_solver_aligned3d_t) :: params_parsolver_uparinit integer :: niter real(GP) :: res, true_res !$omp parallel default(none) private(l) shared(mesh_stag, uparv) !$omp do do l = 1, mesh_stag%get_n_points() uparv(l) = 0.0_GP enddo !$omp end do !$omp end parallel if (.not.equi%is_axisymmetric()) then call handle_error('Skipping computation of upar & on non-axisymmetric equilibrium', & GRILLIX_WRN_INITIALISATION, __LINE__, __FILE__) return endif params_parsolver_uparinit%krylov_method = upar_krylov_method params_parsolver_uparinit%resmax = upar_resmax params_parsolver_uparinit%maxiter = upar_maxiter params_parsolver_uparinit%nrestart = upar_nrestart params_parsolver_uparinit%precond_type = 'JAC' params_parsolver_uparinit%dbgout = upar_dbgout ! Time-loop do it = 1, par_initupar_nt !$omp parallel default(none) & !$omp private(ki, kb, kg, l, penchar, pendir, bohm_cs) & !$omp shared(mesh_stag, penalisation_stag, & !$omp pen_epsinv, par_initupar_dtau, cs_sol, bnd_descrs, & !$omp uparv, lambda_par, xi_par, rhs) call extrapolate_ghost_points(mesh_stag, uparv) !$omp do do ki = 1, mesh_stag%get_n_points_inner() l = mesh_stag%inner_indices(ki) penchar = penalisation_stag%get_charfun_val(ki) pendir = penalisation_stag%get_dirindfun_val(ki) lambda_par(l) = (1.0_GP + par_initupar_dtau * penchar * pen_epsinv) xi_par(l) = (1.0_GP - par_initupar_dtau * penchar) bohm_cs = pendir * cs_sol rhs(l) = uparv(l) + par_initupar_dtau * penchar * pen_epsinv * bohm_cs enddo !$omp end do !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) rhs(l) = 0.0_GP bnd_descrs(kb) = BND_TYPE_NEUMANN lambda_par(l) = 1.0_GP xi_par(l) = 0.0_GP enddo !$omp end do !$omp do do kg = 1, mesh_stag%get_n_points_ghost() l = mesh_stag%ghost_indices(kg) rhs(l) = uparv(l) lambda_par(l) = 1.0_GP xi_par(l) = 0.0_GP enddo !$omp end do !$omp end parallel call solve_aligned3d_adjoint(comm_handler, & equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, & map, & params_parsolver_uparinit, upar_solver, & rhs, uparv, & niter, res, true_res, & lambda=lambda_par, xi=xi_par, & bnd_descrs=bnd_descrs) if (is_rank_info_writer) then if (params_parsolver_uparinit%dbgout > 0) then write(*,*)'it = ', it, '; res = ', res,'; niter = ',niter endif endif if (niter < 0) then call handle_error('Solver for upar at initial state initialisation did not converge', & GRILLIX_ERR_SOLVER3D, __LINE__, __FILE__, & error_info_t('niter, it, res, true_res: ',[niter,it],[res, true_res])) endif enddo end subroutine end submodule