submodule(solver_aligned3d_m) solve_aligned3d_adjoint_s !! Contains routine that performs a 3D aligned solve for adjoint elliptic problem implicit none contains module subroutine solve_aligned3d_adjoint(comm_handler, & equi_on_cano, equi_on_stag, & mesh_cano, mesh_stag, & map, & params_solver, solver_type, & rhs, sol, & niter, res, true_res, cnt_matvec, & lambda, xi, co_cano, fcx, & bnd_descrs, & flutter_fac_cano, flutter_fac_stag, & opsinplane_cano, opsinplane_stag, & apar_stag, apar_cano, & bnd_descrs_flux) type(comm_handler_t), intent(in) :: comm_handler type(mesh_cart_t), intent(in) :: mesh_cano type(mesh_cart_t), intent(in) :: mesh_stag type(equilibrium_storage_t), intent(in):: equi_on_cano type(equilibrium_storage_t), intent(in) :: equi_on_stag type(parallel_map_t), intent(in) :: map type(params_solver_aligned3d_t), intent(in) :: params_solver character(len=*), intent(in) :: solver_type real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: rhs real(GP), dimension(mesh_stag%get_n_points()), intent(inout) :: sol integer, intent(out) :: niter real(GP), intent(out) :: res real(GP), intent(out) :: true_res integer, intent(out), optional :: cnt_matvec real(GP), dimension(mesh_stag%get_n_points()), intent(in), optional :: lambda real(GP), dimension(mesh_stag%get_n_points()), intent(in), optional :: xi real(GP), dimension(mesh_cano%get_n_points()), intent(in), optional :: co_cano real(GP), dimension(mesh_stag%get_n_points()), intent(in), optional :: fcx integer, dimension(mesh_stag%get_n_points_boundary()), intent(in), optional :: bnd_descrs real(GP), intent(in), optional :: flutter_fac_cano real(GP), intent(in), optional :: flutter_fac_stag type(inplane_operators_t), intent(in), optional :: opsinplane_cano type(inplane_operators_t), intent(in), optional :: opsinplane_stag real(GP), dimension(mesh_stag%get_n_points()), intent(in), optional :: apar_stag real(GP), dimension(mesh_cano%get_n_points()), intent(in), optional :: apar_cano integer, dimension(mesh_cano%get_n_points_boundary()), intent(in), optional :: bnd_descrs_flux logical :: with_flutter, any_flutter_input integer :: np, np_cano, np_stag_bwd, np_cano_fwd, nrecv integer, allocatable, dimension(:) :: np_recv real(GP), allocatable, dimension(:) :: ufc_hbwd, pdivb_hfwd, diag_inv integer :: cnt_matvec_eff class(solver3d_t), allocatable :: solver3d ! Check consistency of input with_flutter = present(flutter_fac_cano) & .and. present(flutter_fac_stag) & .and. present(opsinplane_cano) & .and. present(opsinplane_stag) & .and. present(apar_stag) & .and. present(apar_cano) any_flutter_input = present(flutter_fac_cano) & .or. present(flutter_fac_stag) & .or. present(opsinplane_cano) & .or. present(opsinplane_stag) & .or. present(apar_stag) & .or. present(apar_cano) if (.not.with_flutter .and. any_flutter_input) then call handle_error('Input of flutter quantities not consistent', & GRILLIX_ERR_SOLVER3D, __LINE__, __FILE__) endif ! Switch off flutter computation if flutter_facs=0 if (with_flutter) then if ( (abs(flutter_fac_cano) < GP_EPS) .and. (abs(flutter_fac_stag) < GP_EPS) ) then with_flutter = .false. endif endif ! Number of points of meshes on adjacent planes np = mesh_stag%get_n_points() np_cano = mesh_cano%get_n_points() call getdata_fwdbwdplane(comm_handler%get_comm_planes(), -1, 1, [np], nrecv, np_recv) np_stag_bwd = np_recv(1) deallocate(np_recv) call getdata_fwdbwdplane(comm_handler%get_comm_planes(), 1, 1, [np_cano], nrecv, np_recv) np_cano_fwd = np_recv(1) deallocate(np_recv) ! Allocate work variables used within matvec operation allocate(ufc_hbwd(np_stag_bwd)) allocate(pdivb_hfwd(np_cano_fwd)) ! Compute inverse diagonal for Jacobi preconditioning if (params_solver%precond_type == 'JAC') then call compute_adjoint_diag_inv_approx() endif ! Initialise 3D solver call solver3d_factory(solver_type, solver3d) select case(params_solver%precond_type) case('NONE') call solver3d%create(comm_handler%get_comm_planes(), & krylov_method=params_solver%krylov_method, & ndim_loc=mesh_stag%get_n_points(), & resmax=params_solver%resmax, & matvec=matvec_adjoint, & maxiter=params_solver%maxiter, & nrestart=params_solver%nrestart, & dbgout=params_solver%dbgout) case('JAC') call solver3d%create(comm_handler%get_comm_planes(), & krylov_method=params_solver%krylov_method, & ndim_loc=mesh_stag%get_n_points(), & resmax=params_solver%resmax, & matvec=matvec_adjoint, & precondl=precond_adjoint_jacobi, & maxiter=params_solver%maxiter, & nrestart=params_solver%nrestart, & dbgout=params_solver%dbgout) case default call handle_error('Preconditioner not valid', & GRILLIX_ERR_SOLVER3D, __LINE__, __FILE__, & error_info_t(params_solver%precond_type)) end select ! Solve system cnt_matvec_eff = 0 call solver3d%solve(comm_handler%get_comm_planes(), & rhs, sol, res, niter, true_res) if (present(cnt_matvec)) then cnt_matvec = cnt_matvec_eff endif deallocate(solver3d) deallocate(ufc_hbwd) deallocate(pdivb_hfwd) if (allocated(diag_inv)) deallocate(diag_inv) contains subroutine matvec_adjoint(u, v, ipar) !! Matrix Vector Multiplication for adjoint parallel helmholtz operator real(GP), dimension(*) :: u real(GP), dimension(*) :: v integer :: ipar(*) integer :: l, ki, kb, kg, jb, bndnm_nnz, request(2) integer, dimension(3) :: bndnmn_cols real(GP), dimension(3) :: bndnmn_vals real(GP) :: smb real(GP), dimension(mesh_stag%get_n_points()) :: ufc, pdivb_stag real(GP), dimension(mesh_cano%get_n_points()) :: pdivb, ufc_cano cnt_matvec_eff = cnt_matvec_eff + 1 if (.not.present(fcx)) then !$omp parallel default(none) & !$omp shared(mesh_stag, u, ufc) & !$omp private(l) !$omp do do l = 1, mesh_stag%get_n_points() ufc(l) = u(l) enddo !$omp end do !$omp end parallel else !$omp parallel default(none) & !$omp shared(mesh_stag, u, ufc, fcx) & !$omp private(l) !$omp do do l = 1, mesh_stag%get_n_points() ufc(l) = fcx(l) * u(l) enddo !$omp end do !$omp end parallel endif ! MPI communication with the backward plane for the parallel divergence call start_comm_planes(comm_handler%get_comm_planes(), & mesh_stag%get_n_points(), np_stag_bwd, ufc, -1, ufc_hbwd, request) call finalize_comm_planes(request) !$omp parallel default(none) & !$omp shared(mesh_cano, map, equi_on_cano, opsinplane_cano, & !$omp ufc, ufc_hbwd, pdivb, ufc_cano, co_cano, & !$omp with_flutter, flutter_fac_cano, apar_cano, bnd_descrs_flux) & !$omp private(l, ki, kb) !$omp do do l = 1, mesh_cano%get_n_points() pdivb(l) = map%par_divb_single(ufc, ufc_hbwd, l) enddo !$omp end do if (with_flutter) then call map%flat_stag_to_cano(ufc, ufc_hbwd, ufc_cano) !$omp do do ki = 1, mesh_cano%get_n_points_inner() l = mesh_cano%inner_indices(ki) pdivb(l) = pdivb(l) + flutter_fac_cano & * opsinplane_cano%flutter_div(apar_cano, ufc_cano, l) enddo !$omp end do if (present(bnd_descrs_flux)) then ! Calculate boundary points using boundary condition ! Linear operation acting as a matrix-vector multiplication call set_perpbnds(mesh_cano, bnd_descrs_flux, pdivb) else ! Calculate boundary points using ghost points !$omp do do kb = 1, mesh_cano%get_n_points_boundary() l = mesh_cano%boundary_indices(kb) pdivb(l) = pdivb(l) + flutter_fac_cano & * opsinplane_cano%flutter_div(apar_cano, ufc_cano, l) enddo !$omp end do end if ! Extrapolation on ghost points ! Linear operation acting as a matrix-vector multiplication call extrapolate_ghost_points(mesh_cano, pdivb) endif if (present(co_cano)) then !$omp do do l = 1, mesh_cano%get_n_points() pdivb(l) = pdivb(l) * co_cano(l) enddo !$omp end do endif !$omp end parallel ! MPI communication with the forward plane for the parallel gradient call start_comm_planes(comm_handler%get_comm_planes(), & mesh_cano%get_n_points(), np_cano_fwd, pdivb, +1, pdivb_hfwd, request) call finalize_comm_planes(request) !$omp parallel default(none) & !$omp shared(mesh_stag, map, equi_on_stag, opsinplane_stag, & !$omp v, ufc, xi, lambda, pdivb, pdivb_hfwd, pdivb_stag, & !$omp with_flutter, flutter_fac_stag, apar_stag, bnd_descrs) & !$omp private(l, ki, kb, kg, & !$omp bndnmn_cols, bndnmn_vals, bndnm_nnz, jb, smb) !$omp do do l = 1, mesh_stag%get_n_points() v(l) = (-1.0_GP) * map%par_grad_single(pdivb, pdivb_hfwd, l) enddo !$omp end do if (with_flutter) then call map%lift_cano_to_stag(pdivb, pdivb_hfwd, pdivb_stag) ! Calculate flutter gradient on inner points !$omp do do ki = 1, mesh_stag%get_n_points_inner() l = mesh_stag%inner_indices(ki) v(l) = v(l) - flutter_fac_stag & * opsinplane_stag%flutter_grad(apar_stag, pdivb_stag, l) enddo !$omp end do endif !$omp do do l = 1, mesh_stag%get_n_points() if (present(xi)) then v(l) = v(l) * xi(l) endif if (present(lambda)) then v(l) = lambda(l) * u(l) + v(l) endif enddo !$omp end do ! Adaption for boundary conditions if (present(bnd_descrs)) then !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) if ( bnd_descrs(kb) == BND_TYPE_DIRICHLET_ZERO ) then v(l) = u(l) elseif ( bnd_descrs(kb) == BND_TYPE_NEUMANN ) then call compute_bndnmn_matrix_row(mesh_stag, l, bndnmn_cols, bndnmn_vals, bndnm_nnz) smb = 0.0_GP do jb = 1, bndnm_nnz smb = smb + bndnmn_vals(jb)*u(bndnmn_cols(jb)) enddo v(l) = smb else call handle_error('Matvec_adjoint, boundary_descriptor not valid', & GRILLIX_ERR_SOLVER3D, __LINE__, __FILE__, & error_info_t('bnd_descr: ',[kb, bnd_descrs(kb)])) endif enddo !$omp end do !$omp do do kg = 1, mesh_stag%get_n_points_ghost() ! On ghost points identity is returned l = mesh_stag%ghost_indices(kg) v(l) = u(l) enddo !$omp end do endif !$omp end parallel end subroutine subroutine precond_adjoint_jacobi(u, v, ipar) !! Jacobi preconditioner, i.e. diagonal part of matrix !! with inversion of perpendicular neumann boundary conditions !! and ghost points real(GP), dimension(*) :: u real(GP), dimension(*) :: v integer :: ipar(*) integer :: l, kb, kg real(GP), dimension(mesh_stag%get_n_points_boundary()) :: bnd_vals !$omp parallel default(none) & !$omp shared(mesh_stag, u, v, diag_inv, bnd_descrs, bnd_vals) & !$omp private(l, kb, kg) !$omp do do l = 1, mesh_stag%get_n_points() v(l) = diag_inv(l) * u(l) enddo !$omp end do ! Adaption for boundary conditions if (present(bnd_descrs)) then !$omp do do kb = 1, mesh_stag%get_n_points_boundary() l = mesh_stag%boundary_indices(kb) bnd_vals(kb) = u(l) enddo !$omp end do call set_perpbnds(mesh_stag, bnd_descrs, v , bnd_vals) !$omp do do kg = 1, mesh_stag%get_n_points_ghost() l = mesh_stag%ghost_indices(kg) v(l) = u(l) enddo !$omp end do endif !$omp end parallel end subroutine subroutine compute_adjoint_diag_inv_approx() !! Computes an approximate for the inverse diagonal of the adjoint parallel helmholtz matrix !! \f[ !! D = \lambda + 2 * \xi *fcx * co_cano / (dpar_stag_stag_fwd * dpar_stag_stag_bwd) !! \f] !! !! Flutter terms are not taken into account integer :: l, request(2) real(GP), allocatable, dimension(:) :: co_cano_hfwd allocate(diag_inv(mesh_stag%get_n_points())) if (present(co_cano)) then allocate(co_cano_hfwd(np_cano_fwd)) ! MPI communication for co_cano with the forward plane call start_comm_planes(comm_handler%get_comm_planes(), & mesh_cano%get_n_points(), np_cano_fwd, & co_cano, 1, co_cano_hfwd, request) call finalize_comm_planes(request) endif !$omp parallel default(none) & !$omp shared(mesh_stag, map, diag_inv, & !$omp fcx, xi, lambda, co_cano, co_cano_hfwd) & !$omp private(l) !$omp do do l = 1, mesh_stag%get_n_points() if (present(fcx)) then diag_inv(l) = fcx(l) else diag_inv(l) = 1.0_GP endif if (present(xi)) then diag_inv(l) = diag_inv(l) * xi(l) endif if (present(co_cano)) then diag_inv(l) = diag_inv(l) * map%lift_cano_to_stag_single(co_cano, co_cano_hfwd, l) endif diag_inv(l) = diag_inv(l) * 2.0_GP / (map%dpar_stag_stag_fwd(l) * map%dpar_stag_stag_bwd(l)) if (present(lambda)) then diag_inv(l) = diag_inv(l) + lambda(l) endif diag_inv(l) = 1.0_GP / diag_inv(l) enddo !$omp end do !$omp end parallel if (allocated(co_cano_hfwd)) deallocate(co_cano_hfwd) end subroutine end subroutine end submodule