timestep_braginskii_m.f90 Source File


Contents


Source Code

module timestep_braginskii_m
    !! Implementation of BRAGINSKII model
    use MPI
    use error_handling_grillix_m, only: handle_error, error_info_t
    use status_codes_grillix_m, only : GRILLIX_ERR_OTHER, GRILLIX_WRN_GENERAL
    use perf_m, only : perf_start, perf_stop
    use precision_grillix_m, only : GP, GP_EPS
    use comm_handler_m, only : comm_handler_t
    use mpi_mapping_auxiliaries_m, only : getdata_fwdbwdplane
    use equilibrium_m, only : equilibrium_t
    use equilibrium_storage_m, only : equilibrium_storage_t
    use mesh_cart_m, only : mesh_cart_t
    use helmholtz_solver_m, only : helmholtz_solver_t
    use polars_m, only : polars_t
    use parallel_map_m, only : parallel_map_t
    use penalisation_m, only : penalisation_t
    use variable_m, only: variable_t
    use multistep_m, only : karniadakis_t, multistep_storage_t
    use inplane_operators_m, only : inplane_operators_t
    use solver_aligned3d_m, only : solve_aligned3d, solve_aligned3d_adjoint
    use boundaries_perp_m, only : set_perpbnds, extrapolate_ghost_points
    use boundaries_braginskii_m, only : boundaries_braginskii_t, zonal_neumann_core
    use descriptors_m, only : BND_TYPE_NEUMANN, BND_TYPE_DIRICHLET_ZERO
    use descriptors_braginskii_m, only : BND_BRAGTYPE_ZONAL_NEUMANN, BND_BRAGTYPE_FLOATING_POTENTIAL_LOCAL
    use heat_flux_m, only : heat_flux_landau, heat_flux_braginskii, heat_flux_free_streaming
    use penalisation_braginskii_m, only : logne_penalisation, logte_penalisation, logti_penalisation, &
                                          upar_penalisation, vort_penalisation, pot_penalisation, psipar_penalisation
    use polarisation_braginskii_m, only : polarisation_advance, parallel_polarisation_advection
    use ohm_electromagnetic_braginskii_m, only : ohm_advance
    use gyroviscosity_m, only : gyroviscosity_t
    use mms_braginskii_m, only : mms_sol_braginskii_ne, mms_sol_braginskii_pot, mms_sol_braginskii_vort, mms_sol_braginskii_upar, &
                                 mms_sol_braginskii_jpar, mms_sol_braginskii_apar, mms_sol_braginskii_te, mms_sol_braginskii_ti, &
                                 mms_source_braginskii_continuity, mms_source_braginskii_parmomentum, mms_source_braginskii_etemp, &
                                 mms_source_braginskii_itemp, mms_source_braginskii_vorticity, mms_source_braginskii_ohm
    use facade_params_m, only : tinfo_size
    use params_feature_selection_m, only : &
        mms_on, mms_potvort_solve_on, mms_aparjpar_solve_on
    use params_tstep_m, only : &
        dtau, tstep_order
    use params_brag_model_m, only : &
        delta, beta, mass_ratio_ei, heat_fac, tratio, etapar0, nu_e0, eta_i0, thermal_force_coeff, boussinesq_on
    use params_brag_pardiss_model_m, only : &   
        heatflux_model, heatflux_timplicit_e, heatflux_timplicit_i, chipar0_e, chipar0_i, heatflux_cutoff_e, heatflux_cutoff_i, &
        free_streaming_fraction_i, free_streaming_fraction_e, free_streaming_limiter_qfac, &
        viscosity_timplicit_upar
    use params_brag_numdiss_m, only : &
        perpdiss_order_ne, perpdiss_coeff_ne, pardiss_coeff_ne, &
        perpdiss_order_te, perpdiss_coeff_te, perpdiss_order_pe, perpdiss_coeff_pe, &
        perpdiss_order_ti, perpdiss_coeff_ti, perpdiss_order_pi, perpdiss_coeff_pi, &
        perpdiss_order_vort, perpdiss_coeff_vort, pardiss_coeff_vort, &
        perpdiss_order_upar, perpdiss_coeff_upar, perpdiss_order_ohm, perpdiss_coeff_ohm
    use params_brag_buffer_m, only : &
        buffer_coeff_ne, buffer_coeff_te, buffer_coeff_ti, buffer_coeff_pe, buffer_coeff_pi, &
        buffer_coeff_vort, buffer_coeff_upar, buffer_coeff_ohm
    use params_brag_boundaries_perp_m, only : &
        bnddescr_pot_core, bnddescr_ne_core, bnddescr_te_core, bnddescr_ti_core
    use params_brag_boundaries_parpen_m, only : &
        pen_epsinv, sheath_potential_lambda_sh
    use params_brag_floors_m, only : &
        floor_ne, floor_te, floor_ti
    use params_brag_parsolver_te_m, only : &
        parsolver_te_select, params_parsolver_te
    use params_brag_parsolver_ti_m, only : &
        parsolver_ti_select, params_parsolver_ti
    use params_brag_parsolver_upar_m, only : &
        parsolver_upar_select, params_parsolver_upar 
    use params_brag_switches_m, only : &
        swb_cont_exb, swb_cont_curv_pot, swb_cont_curv_te, swb_cont_curv_ne, swb_cont_dissperp, &
        swb_cont_paradv, swb_cont_divupar, swb_cont_divjpar, swb_cont_dissparallel, swb_cont_source, &
        swb_cont_flutter_paradv, swb_cont_flutter_divjpar, swb_cont_flutter_divupar, &
        swb_etemp_exb, swb_etemp_curv_pot, swb_etemp_curv_ne, swb_etemp_curv_te, swb_etemp_dissperp, &
        swb_etemp_paradv, swb_etemp_divjpar, swb_etemp_divvpar, swb_etemp_resist, swb_etemp_pardiff, &
        swb_etemp_flutter_pardiff_grad, swb_etemp_flutter_pardiff_div, swb_etemp_flutter_paradv, &
        swb_etemp_flutter_divvpar, swb_etemp_flutter_divjpar, swb_etemp_equipart, swb_etemp_source, & 
        swb_itemp_exb, swb_itemp_curv_pot, swb_itemp_curv_ne, swb_itemp_curv_te, swb_itemp_curv_ti, &
        swb_itemp_dissperp, swb_itemp_equipart, swb_itemp_vischeat, swb_itemp_source, &
        swb_itemp_paradv, swb_itemp_pardiff, swb_itemp_divupar, swb_itemp_divjpar, &
        swb_itemp_flutter_paradv, swb_itemp_flutter_divupar, swb_itemp_flutter_divjpar, &
        swb_itemp_flutter_pardiff_grad, swb_itemp_flutter_pardiff_div, &
        swb_vort_tecurvne, swb_vort_necurvte, swb_vort_necurvti, swb_vort_ticurvne, swb_vort_dissperp, &
        swb_vort_viscion, swb_vort_source, swb_vort_divjpar, swb_vort_dissparallel, &
        swb_vort_flutter_divjpar, swb_vort_exb, swb_vort_flutter_paradv, swb_vort_dia, &
        swb_upar_exb, swb_upar_curv, swb_upar_dissperp, swb_upar_source, swb_upar_viscion, &
        swb_upar_paradv, swb_upar_gradpar_n, swb_upar_gradparte, swb_upar_gradparti, &
        swb_upar_flutter_paradv, swb_upar_flutter_gradne, &
        swb_upar_flutter_gradte, swb_upar_flutter_gradti, &
        swb_ohm_exb, swb_ohm_dissperp, swb_ohm_paradv, swb_ohm_physdiss, swb_ohm_gradpar_pot, &
        swb_ohm_gradpar_ne, swb_ohm_gradpar_te, swb_ohm_flutter_paradv, swb_ohm_flutter_gradpot, &
        swb_ohm_flutter_gradne, swb_ohm_flutter_gradte, &
        swb_flutter_visc, swb_upar_flutter_viscion_grad
    implicit none

    public :: timestep_braginskii

    ! Auxiliary variables
    type(variable_t), private, save :: logne, logte, logti
    ! Logarithmic density and temperatures
    type(variable_t), private, save :: cano_upar, cano_jpar
    ! Parallel velocity, current mapped to canonical grid
    type(variable_t), private, save :: cano_apar_fluct
    ! Fluctuation of apar used for flutter operators on the canonical grid
    type(variable_t), private, save :: upar_gradpar_logne
    ! Parallel ion flux n*upar 
    type(variable_t), private, save :: stag_pot
    ! Electrostatic potential mapped to the staggered grid
    type(variable_t), private, save :: stag_ne
    ! Density mapped to the staggered grid
    type(variable_t), private, save :: stag_te, qcond_te, heat_gradpar_logte_stag, cano_qcond_te
    ! Conductive parallel electron heat flux auxiliaries
    type(variable_t), private, save :: stag_ti, qcond_ti, heat_gradpar_logti_stag, cano_qcond_ti
    ! Conductive parallel ion heat flux auxiliaries
    type(variable_t), private, save :: upar_gradpar_logti
    ! Convective parallel ion heat flux auxiliaries
    type(variable_t), private, save :: vpar, vpar_gradpar_logte
    ! Convective parallel electron heat flux auxiliaries
    type(variable_t), private, save :: gradpar_logne, gradpar_logte, gradpar_logti
    ! Parallel gradients of logarithmic density and temperatures
    type(variable_t), private, save :: jpar_over_n_stag
    ! jparallel/ne on staggered grid
    type(variable_t), private, save :: jpar_over_n_full
    ! jparallel/ne on canonical grid
    type(variable_t), private, save :: nevar_adv, nevar_adv_stag
    ! Density on canonical and staggered grids at time point t+1 (needed for Ohm's law)
    type(variable_t), private, save :: tevar_adv, tevar_adv_stag
    ! Electron temperature on canonical and staggered grids at time point t+1 (needed for Ohm's law)
    type(variable_t), private, save :: gradpar_ne
    ! Parallel density gradient
    type(variable_t), private, save :: gradpar_vort
    ! Parallel vorticity gradient    
    type(variable_t), private, save :: psipar
    ! generalised electromagnetic potential
    type(variable_t), private, save :: jpar_t_extrapolate
    ! Parallel current extrapolated to t+1

    type(multistep_storage_t), private, save :: pot_store, ne_store, ti_store
    ! Storage of potential, density and ion temperature values at time-steps t,t-1,t-2,...
    type(multistep_storage_t), private, save :: pot_firstsolve_store
    ! Storage of potential specifically for the initial guess in the solver for Zonal Neumann core b.c.
    type(multistep_storage_t), private, save :: jpar_store, apar_pen_store, apar_store
    ! Storage of jpar, apar and apar_per for initial guesses in the Ohm's law solver and penalisation
    type(multistep_storage_t), private, save :: logte_firstsolve_store, logti_firstsolve_store
    ! Storage of logte and logti for the initial guess in the solver for Zonal Neumann core b.c.
    
    real(GP), dimension(:), allocatable, save :: apar_pen_guess
    ! Guess values for penalisation values of penalised parallel electromagnetic potential (accelerates solve)

    real(GP), dimension(:), allocatable, public, save :: src_floor_ne
    ! Indirect density source created by applying floor value
contains

    subroutine timestep_braginskii(comm_handler, &
                                   equi, equi_on_cano, equi_on_stag, &
                                   mesh_cano, mesh_stag, &
                                   hsolver_cano, hsolver_stag, &
                                   map, &
                                   penalisation_cano, penalisation_stag, &
                                   polars_cano, polars_stag, &
                                   opsinplane_cano, opsinplane_stag, &
                                   boundaries, &
                                   cf_buffer_cano, cf_buffer_stag, &
                                   cf_diss_cano, cf_diss_stag, &
                                   tau, &
                                   ne, src_ne, tstep_continuity, &
                                   pot, vort, src_vort, tstep_vort, & 
                                   upar, src_upar, tstep_parmom, &
                                   jpar, apar, apar_fluct, tstep_ohm, &
                                   te, src_te, tstep_etemp, &
                                   ti, src_ti, tstep_itemp, &
                                   gstress, &
                                   tinfo, rinfo, success_plasma)
        !! Evolves variables a single time-step according to BRAGINSKII model
        type(comm_handler_t), intent(in) :: comm_handler
        !! Communicators
        class(equilibrium_t), intent(inout) :: equi
        !! Equilibrium
        class(equilibrium_storage_t), intent(inout) :: equi_on_cano
        !! Equilibrim quantities on canonical mesh
        class(equilibrium_storage_t), intent(inout) :: equi_on_stag
        !! Equilibrim quantities on staggered mesh
        type(mesh_cart_t), intent(inout) :: mesh_cano
        !! Mesh (canonical) within poloidal plane
        type(mesh_cart_t), intent(inout) :: mesh_stag
        !! Mesh (staggered) within poloidal plane
        class(helmholtz_solver_t), intent(inout) :: hsolver_cano
        !! Elliptic (2D) solver on canonical mesh
        class(helmholtz_solver_t), intent(inout) :: hsolver_stag
        !! Elliptic (2D) solver on staggered mesh
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(penalisation_t), intent(in) :: penalisation_cano
        !! Penalisation (canonical)
        type(penalisation_t), intent(in) :: penalisation_stag
        !! Penalisation (staggered)
        type(polars_t), intent(in) :: polars_cano
        !! Polar grid and operators (canonical)
        type(polars_t), intent(in) :: polars_stag
        !! Polar grid and operators (staggered)
        type(inplane_operators_t), intent(inout) :: opsinplane_cano
        !! In-plane operators (canonical)
        type(inplane_operators_t), intent(inout) :: opsinplane_stag
        !! In-plane operators (staggered)
        type(boundaries_braginskii_t), intent(inout) :: boundaries
        !! Boundary information for the BRAGINSKII model 
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: cf_buffer_cano
        !! Buffer function on canonical mesh
        real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: cf_buffer_stag
        !! Buffer function on staggered mesh
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: cf_diss_cano
        !! Dissipation envelope function on canonical mesh
        real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: cf_diss_stag
        !! Dissipation envelope function on canonical mesh
        real(GP), intent(inout) :: tau
        !! Time, at output tau + dtau
        type(variable_t), intent(inout) :: ne 
        !! Electron density
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in) :: src_ne
        !! Particle source on inner grid points
        type(karniadakis_t), intent(inout) :: tstep_continuity
        !! Time-step integrator for electron density
        type(variable_t), intent(inout) :: pot
        !! Electrostatic potential
        type(variable_t), intent(inout) :: vort
        !! Generalised vorticity
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in) :: src_vort
        !! Vorticity source on inner grid points
        type(karniadakis_t), intent(inout) :: tstep_vort
        !! Time-step integrator for vorticity equation
        type(variable_t), intent(inout) :: upar
        !! Parallel ion velocity
        real(GP), dimension(mesh_stag%get_n_points_inner()), intent(in) :: src_upar
        !! Parallel ion velocity source on inner grid points
        type(karniadakis_t), intent(inout) :: tstep_parmom
        !! Time-step integrator for parallel momentum equation
        type(variable_t), intent(inout) :: jpar
        !! Parallel current
        type(variable_t), intent(inout) :: apar
        !! Parallel electromagnetic potential
        type(variable_t), intent(inout) :: apar_fluct
        !! Fluctuation of apar used for flutter operators
        type(karniadakis_t), intent(inout) :: tstep_ohm
        !! Time-step integrator for Ohm's law
        type(variable_t), intent(inout) :: te
        !! Electron temperature
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in) :: src_te 
        !! Electron temperature source on inner grid points
        type(karniadakis_t), intent(inout) :: tstep_etemp
        !! Time-step integrator for electron temperature equation
        type(variable_t), intent(inout) :: ti
        !! Ion temperature
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in) :: src_ti 
        !! Ion temperature source on inner grid points
        type(karniadakis_t), intent(inout) :: tstep_itemp
        !! Time-step integrator for ion temperature equation
        type(gyroviscosity_t), intent(inout) :: gstress
        !! Gyroviscosity
        integer, intent(inout), dimension(tinfo_size) :: tinfo
        !! Info status (integers)
        real(GP), intent(inout), dimension(tinfo_size,2) :: rinfo
        !! Info status (float)
        logical, intent(out) :: success_plasma
        !! Success flag for timestep
        
        integer, save :: cnt = 0
        ! Function call counter 

        real(GP), parameter :: twothirds = 2.0_GP / 3.0_GP
        integer :: l, ki, kb, kg, kt, niter_parslv, cnt_matvec_parslv, tinfo_min, ierr
        real(GP) :: x, y, phi_cano, phi_stag, tau_adv, pen_fac, true_res
        real(GP) :: heatflux_coeff_e, heatflux_coeff_i, flutter_fac_cano, flutter_fac_stag, dia_fac
        real(GP) :: fac_free_streaming_e, fac_free_streaming_i
        real(GP) :: crv_pot, crv_logne, crv_logte, crv_logti, &
                    visc_ion, visc_cg, visc_parmom, &
                    divjpar, divupar, divvpar, divqcond_te, divqcond_ti, diss_par_ne, diss_par_vort, &
                    upar_gradpar_logne_full, vpar_gradpar_logte_full, upar_gradpar_logti_full, &
                    gradpar_upar, gradpar_pot, gradpar_jpar_over_n, &
                    flutter_grad_logne_full, flutter_grad_logte_full, flutter_grad_logti_full, flutter_div_jpar_full, &
                    flutter_div_upar_full, flutter_div_vpar_full, flutter_grad_jpar_over_n_stag, flutter_grad_upar_stag, &
                    flutter_grad_pot_stag, flutter_grad_te_stag, flutter_grad_ti_stag, flutter_grad_logne_stag, &
                    perpdiss_coeff_ne_in_teeq, perpdiss_coeff_ne_in_tieq, buffer_coeff_ne_in_teeq, buffer_coeff_ne_in_tieq

        integer, dimension(mesh_cano%get_n_points_boundary()) :: bnd_descrs_nmn_cano
        integer, dimension(mesh_stag%get_n_points_boundary()) :: bnd_descrs_nmn_stag

        real(GP), dimension(mesh_cano%get_n_points()) :: pe, pi

        real(GP), dimension(mesh_cano%get_n_points_inner()) :: logne_pen, logte_pen, logti_pen, vort_pen, pot_pen
        real(GP), dimension(mesh_stag%get_n_points_inner()) :: upar_pen, psipar_pen
        
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: diss_perp_ne, diss_perp_te, diss_perp_ti, diss_perp_pe, diss_perp_pi
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: diss_perp_vort, crv_gstress
        real(GP), dimension(mesh_stag%get_n_points_inner()) :: diss_perp_upar, diss_perp_ohm, pargrad_gstress32

        real(GP), dimension(mesh_cano%get_n_points()) :: dlogne, dlogte, dlogti, dvort, gflow
        real(GP), dimension(mesh_stag%get_n_points()) :: dupar, dohm
        
        real(GP), dimension(mesh_cano%get_n_points()) :: ne_adv, logne_adv, te_adv, logte_adv, ti_adv, logti_adv, pot_adv, vort_adv
        real(GP), dimension(mesh_stag%get_n_points()) :: upar_adv, apar_adv, jpar_adv

        real(GP), dimension(mesh_cano%get_n_points()) ::  lambda_par, xi_par, rhs_par
        real(GP), dimension(mesh_stag%get_n_points()) ::  co_par
        real(GP), dimension(mesh_stag%get_n_points()) ::  lambda_par_adj, xi_par_adj, fcx_par_adj, rhs_par_adj
        real(GP), dimension(mesh_cano%get_n_points()) ::  co_par_adj
        
        real(GP), dimension(mesh_cano%get_n_points()) :: polarisation_coeff, flutter_paradv_coeff, cano_apar_over_btor
        
        ! Set up some basic parameters ---------------------------------------------------------------------------------------------
        call perf_start('timestep')
        
        cnt = cnt + 1 
        phi_cano = mesh_cano%get_phi() 
        phi_stag = mesh_stag%get_phi()
        tau_adv = tau + dtau
        
        ! Factors for free streaming heat fluxes
        fac_free_streaming_e = chipar0_e * sqrt(mass_ratio_ei) / (free_streaming_fraction_e * free_streaming_limiter_qfac )   
        fac_free_streaming_i = chipar0_i / (sqrt(tratio) * free_streaming_fraction_i * free_streaming_limiter_qfac )
        
        ! Setup structures at first call -------------------------------------------------------------------------------------------
        if (cnt == 1) then
            call setup_work_variables()
            call setup_halo_structure()

            ! Allocate and initialise additional storage
            allocate(apar_pen_guess(mesh_stag%get_n_points()),source = 0.0_GP)
            allocate(src_floor_ne(mesh_cano%get_n_points()), source=0.0_GP)

            call pot_store%init_storage(mesh_cano%get_n_points(), tstep_order-1, 1)
            call ne_store%init_storage(mesh_cano%get_n_points(), tstep_order-1, 1)
            call ti_store%init_storage(mesh_cano%get_n_points(), tstep_order-1, 1)
            call jpar_store%init_storage(mesh_stag%get_n_points(), tstep_order-1, 1)
            call apar_pen_store%init_storage(mesh_stag%get_n_points(), tstep_order, 1)
            call apar_store%init_storage(mesh_stag%get_n_points(), tstep_order-1, 1)
            !$omp parallel default(none) private(l, kt) &
            !$omp          shared(mesh_cano, mesh_stag, tstep_order, &
            !$omp                 pot, jpar, apar, &
            !$omp                 pot_store, jpar_store, apar_store, apar_pen_store)
            do kt = 1, tstep_order-1
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    pot_store%vstore(l, kt, 1) = pot%vals(l)
                enddo
                !$omp end do
                !$omp do
                do l = 1, mesh_stag%get_n_points()
                    jpar_store%vstore(l, kt, 1) = jpar%vals(l)
                    apar_store%vstore(l, kt, 1) = apar%vals(l)
                enddo
                !$omp end do
            enddo
            do kt = 1, tstep_order
                !$omp do
                do l = 1, mesh_stag%get_n_points()
                    apar_pen_store%vstore(l, kt, 1) = apar%vals(l)
                enddo
                !$omp end do
            enddo
            !$omp end parallel

            if (bnddescr_pot_core == BND_BRAGTYPE_ZONAL_NEUMANN) then
                call pot_firstsolve_store%init_storage(mesh_cano%get_n_points(), tstep_order, 1 )
                !$omp parallel default(none) private(l, kt) &
                !$omp          shared(mesh_cano, tstep_order, pot, pot_firstsolve_store)
                do kt = 1, tstep_order
                    !$omp do
                    do l = 1, mesh_cano%get_n_points()
                       pot_firstsolve_store%vstore(l, kt, 1) = pot%vals(l)
                    enddo
                    !$omp end do
                enddo
                !$omp end parallel
            endif

            ! Initialise logarithmic quantities (for subsequent time steps, they are pre-computed at the end of the routine).
            ! Also initialise storage for polarisation advancement (only important for MMS).
            !$omp parallel default(none) private(l) &
            !$omp          shared(mesh_cano, ne, te, ti, tstep_continuity, tstep_itemp, &
            !$omp                 logne, logte, logti, ne_store, ti_store)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                logne%vals(l) = log(ne%vals(l))
                logte%vals(l) = log(te%vals(l))
                logti%vals(l) = log(ti%vals(l))
                ne_store%vstore(l,:,1) = exp(tstep_continuity%vstore(l,:,1))
                ti_store%vstore(l,:,1) = exp(tstep_itemp%vstore(l,:,1))
            enddo
            !$omp end do
            !$omp end parallel

            if (heatflux_timplicit_e .and. (bnddescr_te_core == BND_BRAGTYPE_ZONAL_NEUMANN)) then
                call logte_firstsolve_store%init_storage(mesh_cano%get_n_points(), tstep_order, 1 )
                !$omp parallel default(none) private(l, kt) &
                !$omp          shared(mesh_cano, tstep_order, logte, logte_firstsolve_store)
                do kt = 1, tstep_order
                    !$omp do
                    do l = 1, mesh_cano%get_n_points()
                       logte_firstsolve_store%vstore(l, kt, 1) = logte%vals(l)
                    enddo
                    !$omp end do
                enddo
                !$omp end parallel
            endif

            if (heatflux_timplicit_i .and. (bnddescr_ti_core == BND_BRAGTYPE_ZONAL_NEUMANN)) then
                call logti_firstsolve_store%init_storage(mesh_cano%get_n_points(), tstep_order, 1 )
                !$omp parallel default(none) private(l, kt) &
                !$omp          shared(mesh_cano, tstep_order, logti, logti_firstsolve_store)
                do kt = 1, tstep_order
                    !$omp do
                    do l = 1, mesh_cano%get_n_points()
                       logti_firstsolve_store%vstore(l, kt, 1) = logti%vals(l)
                    enddo
                    !$omp end do
                enddo
                !$omp end parallel
            endif

        endif

        ! Halo communication (first) --------------------------------------------------------------
        call perf_start('../comm1')

        call jpar%start_comm_halos(comm_handler)
        call jpar%finalize_comm_halos(comm_handler)

        call upar%start_comm_halos(comm_handler)
        call upar%finalize_comm_halos(comm_handler)

        call ne%start_comm_halos(comm_handler)
        call ne%finalize_comm_halos(comm_handler)
 
        call te%start_comm_halos(comm_handler)
        call te%finalize_comm_halos(comm_handler)

        call ti%start_comm_halos(comm_handler)
        call ti%finalize_comm_halos(comm_handler)

        call pot%start_comm_halos(comm_handler)
        call pot%finalize_comm_halos(comm_handler)

        call vort%start_comm_halos(comm_handler)
        call vort%finalize_comm_halos(comm_handler)

        call logne%start_comm_halos(comm_handler)
        call logne%finalize_comm_halos(comm_handler)

        call logte%start_comm_halos(comm_handler)
        call logte%finalize_comm_halos(comm_handler)

        call logti%start_comm_halos(comm_handler)
        call logti%finalize_comm_halos(comm_handler)

        call apar_fluct%start_comm_halos(comm_handler)
        call apar_fluct%finalize_comm_halos(comm_handler)

        call perf_stop('../comm1')


        ! Heat flux auxiliaries -------------------------------------------------------------------
        call perf_start('../omp1:aux')

        !$omp parallel default(none) &
        !$omp          private(l, ki, kb) &
        !$omp          shared(equi, mesh_cano, equi_on_cano, opsinplane_cano, mesh_stag, equi_on_stag, opsinplane_stag, map, &
        !$omp                 floor_ne, floor_te, floor_ti, &
        !$omp                 swb_etemp_flutter_pardiff_grad, swb_itemp_flutter_pardiff_grad, &
        !$omp                 ne, stag_ne, te, stag_te, ti, stag_ti, pot, stag_pot, &
        !$omp                 upar, cano_upar, jpar, cano_jpar, apar_fluct, cano_apar_fluct, &
        !$omp                 logne, gradpar_logne, logte, gradpar_logte, logti, &
        !$omp                 qcond_ti, qcond_te, gradpar_logti, gradpar_ne, vort, gradpar_vort, &
        !$omp                 heat_gradpar_logte_stag, heat_gradpar_logti_stag, bnd_descrs_nmn_cano, bnd_descrs_nmn_stag)

        call map%lift_cano_to_stag(ne%vals, ne%hfwd, stag_ne%vals)
        call map%lift_cano_to_stag(te%vals, te%hfwd, stag_te%vals)
        call map%lift_cano_to_stag(ti%vals, ti%hfwd, stag_ti%vals)
        call map%lift_cano_to_stag(pot%vals, pot%hfwd, stag_pot%vals)

        call map%flat_stag_to_cano(upar%vals, upar%hbwd, cano_upar%vals)
        call map%flat_stag_to_cano(jpar%vals, jpar%hbwd, cano_jpar%vals)
        call map%flat_stag_to_cano(apar_fluct%vals, apar_fluct%hbwd, cano_apar_fluct%vals)

        call map%par_grad(logne%vals, logne%hfwd, gradpar_logne%vals)
        call map%par_grad(logte%vals, logte%hfwd, gradpar_logte%vals)
        call map%par_grad(logti%vals, logti%hfwd, gradpar_logti%vals)
        call map%par_grad(ne%vals, ne%hfwd, gradpar_ne%vals)
        call map%par_grad(vort%vals, vort%hfwd, gradpar_vort%vals)

        !$omp do
        do l = 1, mesh_stag%get_n_points()
            ! Apply floor to staggered ne, Te, Ti
            stag_ne%vals(l) = max(stag_ne%vals(l), floor_ne) 
            stag_te%vals(l) = max(stag_te%vals(l), floor_te) 
            stag_ti%vals(l) = max(stag_ti%vals(l), floor_ti)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            heat_gradpar_logte_stag%vals(l) = gradpar_logte%vals(l) &
                    + swb_etemp_flutter_pardiff_grad * &
                    opsinplane_stag%flutter_grad(apar_fluct%vals, stag_te%vals, l)/stag_te%vals(l)
            heat_gradpar_logti_stag%vals(l) = gradpar_logti%vals(l) &
                    + swb_itemp_flutter_pardiff_grad * &
                    opsinplane_stag%flutter_grad(apar_fluct%vals, stag_ti%vals, l)/stag_ti%vals(l)
        enddo
        !$omp end do
        !$omp do
        do kb = 1, mesh_cano%get_n_points_boundary()
            bnd_descrs_nmn_cano(kb) = BND_TYPE_NEUMANN
        enddo
        !$omp end do
        !$omp do
        do kb = 1, mesh_stag%get_n_points_boundary()
            bnd_descrs_nmn_stag(kb) = BND_TYPE_NEUMANN
        enddo
        !$omp end do
        !$omp end parallel
        call perf_stop('../omp1:aux')


        ! Calculation of the heat flux according to the selected model ----------------------------
        ! (here only Landau or explicit treatment, implicit treatment is done later )
        call perf_start('../heat_flux_explicit')
        
        select case(heatflux_model)
            case('BRAGINSKII_LIM')
                call heat_flux_braginskii(mesh_stag, stag_te%vals, fac_free_streaming_e, &
                        heat_gradpar_logte_stag%vals, stag_ne%vals, qcond_te%vals, "elec", bnd_descrs_nmn_stag) 
                call heat_flux_braginskii(mesh_stag, stag_ti%vals, fac_free_streaming_i, &
                        heat_gradpar_logti_stag%vals, stag_ne%vals, qcond_ti%vals, "ions", bnd_descrs_nmn_stag)    
            case('FREE_STREAMING')
                call heat_flux_free_streaming(mesh_stag, stag_te%vals, stag_ne%vals, &
                        heat_gradpar_logte_stag%vals, qcond_te%vals, 'elec', bnd_descrs_nmn_stag)
                call heat_flux_free_streaming(mesh_stag, stag_ti%vals, stag_ne%vals, &
                        heat_gradpar_logti_stag%vals, qcond_ti%vals, 'ions', bnd_descrs_nmn_stag)          
            case('LANDAU')
                call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                      map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                                      stag_ne%vals, stag_te%vals, stag_ti%vals, heat_gradpar_logte_stag%vals, & 
                                      heat_gradpar_logti_stag%vals, tau, qcond_te%vals, "elec", &
                                      apar_fluct, cano_apar_fluct, bnd_descrs_nmn_cano, &
                                      iters=tinfo(12:23), ress=rinfo(12:23,1), true_ress=rinfo(12:23,2))
                call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                      map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                                      stag_ne%vals, stag_te%vals, stag_ti%vals, heat_gradpar_logte_stag%vals, & 
                                      heat_gradpar_logti_stag%vals, tau, qcond_ti%vals, "ions", &
                                      apar_fluct, cano_apar_fluct, bnd_descrs_nmn_cano, &
                                      iters=tinfo(24:35), ress=rinfo(24:35,1), true_ress=rinfo(24:35,2))
            case default
                call handle_error('Heat_flux_model not valid', &
                              GRILLIX_ERR_OTHER, __LINE__, __FILE__, &
                              error_info_t(heatflux_model))
        end select
                
        call perf_stop('../heat_flux_explicit')

        ! Calculation of gyroviscosity ------------------------------------------------------------
        ! (here only Landau or explicit treatment, implicit treatment is done later )
        call perf_start('../gyroviscosity_explicit')

        call gstress%drive(comm_handler, mesh_cano, mesh_stag, equi_on_stag, &
                           ne%vals, ti%vals, upar%vals, qcond_ti%vals)

        call gstress%eval_gstress(mesh_cano, equi_on_cano, opsinplane_cano, map, &
                                  ne%vals, pot%vals, ti%vals, cano_apar_fluct%vals, gflow, &
                                  eval_flow_crv=.true., eval_flow_par=.true., &
                                  eval_heat_crv=.false., eval_heat_par=.false.)

        call gstress%eval_curvature(mesh_cano, equi_on_cano, opsinplane_cano, map, &
                                    ne%vals, pot%vals, ti%vals, cano_apar_fluct%vals, crv_gstress)

        call gstress%eval_pargrad_b32(comm_handler, mesh_cano, mesh_stag, equi_on_cano, equi_on_stag, &
                                      opsinplane_cano, opsinplane_stag, map, &
                                      ne%vals, pot%vals, ti%vals, &
                                      apar_fluct%vals, cano_apar_fluct%vals, &
                                      pargrad_gstress32, &
                                      eval_flow_crv=.true., &
                                      eval_flow_par=(.not.viscosity_timplicit_upar), &
                                      eval_heat_crv=.true., &
                                      eval_heat_par=.true.)

        call perf_stop('../gyroviscosity_explicit')


        ! Communicate heat fluxes --------------------------------------------------------------------------------------------------
        call perf_start('../comm2')

        call qcond_te%start_comm_halos(comm_handler)
        call qcond_te%finalize_comm_halos(comm_handler)

        call qcond_ti%start_comm_halos(comm_handler)
        call qcond_ti%finalize_comm_halos(comm_handler)

        call perf_stop('../comm2')


        ! Compute auxiliary variables ----------------------------------------------------------------------------------------------
        call perf_start('../omp2:aux')

        !$omp parallel default(none) &
        !$omp          private(l, ki, kb, crv_pot, crv_logne, crv_logti) &
        !$omp          shared(equi, mesh_cano, equi_on_cano, opsinplane_cano, mesh_stag, equi_on_stag, opsinplane_stag, map, &
        !$omp                 beta, mass_ratio_ei, tratio, free_streaming_limiter_qfac, &
        !$omp                 floor_ne, floor_te, floor_ti, &
        !$omp                 ne, stag_ne, te, ti, pe, pi, pot, apar, &
        !$omp                 upar, cano_upar, jpar, cano_jpar, apar_fluct, cano_apar_fluct, &
        !$omp                 logne, gradpar_logne, logte, gradpar_logte, logti, gradpar_logti, &
        !$omp                 qcond_ti, qcond_te, cano_qcond_ti, cano_qcond_te, &
        !$omp                 upar_gradpar_logne, vpar, upar_gradpar_logti, vpar_gradpar_logte, jpar_over_n_stag, psipar, &
        !$omp                 jpar_over_n_full, bnd_descrs_nmn_cano)

        call map%flat_stag_to_cano(qcond_ti%vals, qcond_ti%hbwd, cano_qcond_ti%vals)
        call map%flat_stag_to_cano(qcond_te%vals, qcond_te%hbwd, cano_qcond_te%vals)
 
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            ! Auxiliary quantities
            pe(l) = te%vals(l) * ne%vals(l)
            pi(l) = ti%vals(l) * ne%vals(l)
            jpar_over_n_full%vals(l) = cano_jpar%vals(l)/ne%vals(l)
        enddo
        !$omp end do

        !$omp do
        do l = 1, mesh_stag%get_n_points()
            ! Auxiliary quantities
            upar_gradpar_logne%vals(l) = upar%vals(l) * gradpar_logne%vals(l)
            vpar%vals(l) = upar%vals(l) - jpar%vals(l)/stag_ne%vals(l)
            vpar_gradpar_logte%vals(l) = vpar%vals(l) * gradpar_logte%vals(l)
            upar_gradpar_logti%vals(l) = upar%vals(l) * gradpar_logti%vals(l)
            jpar_over_n_stag%vals(l)  = jpar%vals(l)/stag_ne%vals(l)
            psipar%vals(l) = beta * apar%vals(l) + mass_ratio_ei * jpar%vals(l) / stag_ne%vals(l) 
        enddo
        !$omp end do
        !$omp end parallel

        call perf_stop('../omp2:aux')


        ! Communication on derived variables ---------------------------------------------------------------------------------------
        call perf_start('../comm3')

        call upar_gradpar_logne%start_comm_halos(comm_handler)
        call upar_gradpar_logne%finalize_comm_halos(comm_handler)

        call upar_gradpar_logti%start_comm_halos(comm_handler)
        call upar_gradpar_logti%finalize_comm_halos(comm_handler)

        call vpar_gradpar_logte%start_comm_halos(comm_handler)
        call vpar_gradpar_logte%finalize_comm_halos(comm_handler)

        call vpar%start_comm_halos(comm_handler)
        call vpar%finalize_comm_halos(comm_handler)

        call cano_upar%start_comm_halos(comm_handler)
        call cano_upar%finalize_comm_halos(comm_handler)

        call gradpar_ne%start_comm_halos(comm_handler)
        call gradpar_ne%finalize_comm_halos(comm_handler)

        call gradpar_vort%start_comm_halos(comm_handler)
        call gradpar_vort%finalize_comm_halos(comm_handler)

        call jpar_over_n_full%start_comm_halos(comm_handler)
        call jpar_over_n_full%finalize_comm_halos(comm_handler)
        
        call perf_stop('../comm3')


        ! Get penalisation values --------------------------------------------------------------------------------------------------
        call perf_start('../penalisation1')

        call logne_penalisation(equi, mesh_cano, map, penalisation_cano, logne, logne_pen)
        
        call logte_penalisation(equi, mesh_cano, map, penalisation_cano, &
                                ne, te, logte, cano_upar, logte_pen)
        
        call logti_penalisation(equi, mesh_cano, map, penalisation_cano, &
                                ne, ti, logti, cano_upar, logti_pen)

        call upar_penalisation(equi, equi_on_stag, mesh_stag, map, penalisation_stag, opsinplane_stag, &
                                upar, stag_te, stag_ti, stag_pot, upar_pen)
                                
        call vort_penalisation(equi, mesh_cano, map, penalisation_cano, vort, vort_pen)

        call perf_stop('../penalisation1')


        ! Computation of explicit terms --------------------------------------------------------------------------------------------
        call perf_start('../omp3:main')
        ! Account for handling of perpendicular viscosity on density in temperature equations
        if (perpdiss_coeff_pe > GP_EPS) then
            perpdiss_coeff_ne_in_teeq = perpdiss_coeff_ne
        else
            perpdiss_coeff_ne_in_teeq = 0.0_GP
        endif
        if (perpdiss_coeff_pi > GP_EPS) then
            perpdiss_coeff_ne_in_tieq = perpdiss_coeff_ne
        else
            perpdiss_coeff_ne_in_tieq = 0.0_GP
        endif
        if (buffer_coeff_pe > GP_EPS) then
            buffer_coeff_ne_in_teeq = buffer_coeff_ne
        else
            buffer_coeff_ne_in_teeq = 0.0_GP
        endif
        if (buffer_coeff_pi > GP_EPS) then
            buffer_coeff_ne_in_tieq = buffer_coeff_ne
        else
            buffer_coeff_ne_in_tieq = 0.0_GP
        endif
        !$omp parallel default(none) &
        !$omp          private(ki, kb, kg, l, x, y, pen_fac, crv_pot, crv_logne, crv_logte, crv_logti, &
        !$omp                  visc_ion, visc_cg, visc_parmom, divjpar, divupar, divvpar, divqcond_te, divqcond_ti, diss_par_ne, diss_par_vort, &
        !$omp                  upar_gradpar_logne_full, vpar_gradpar_logte_full, upar_gradpar_logti_full, &
        !$omp                  gradpar_upar, gradpar_pot, gradpar_jpar_over_n, &
        !$omp                  flutter_grad_logne_full, flutter_grad_logte_full, flutter_grad_logti_full, flutter_div_jpar_full, & 
        !$omp                  flutter_div_upar_full, flutter_div_vpar_full, flutter_grad_jpar_over_n_stag, flutter_grad_pot_stag, &
        !$omp                  flutter_grad_upar_stag, flutter_grad_te_stag, flutter_grad_ti_stag, flutter_grad_logne_stag) &
        !$omp          shared(equi, map, tau, &
        !$omp                 equi_on_cano, mesh_cano, phi_cano, penalisation_cano, opsinplane_cano, cf_buffer_cano, cf_diss_cano, &
        !$omp                 equi_on_stag, mesh_stag, phi_stag, penalisation_stag, opsinplane_stag, cf_buffer_stag, cf_diss_stag, &
        !$omp                 mms_on, delta, beta, mass_ratio_ei, tratio, etapar0, nu_e0, eta_i0, thermal_force_coeff, &
        !$omp                 pen_epsinv, heatflux_timplicit_e, heatflux_timplicit_i, &
        !$omp                 perpdiss_order_ne, perpdiss_coeff_ne, pardiss_coeff_ne, buffer_coeff_ne, &
        !$omp                 perpdiss_order_te, perpdiss_coeff_te, buffer_coeff_te, &
        !$omp                 perpdiss_order_ti, perpdiss_coeff_ti, buffer_coeff_ti, &
        !$omp                 perpdiss_order_pe, perpdiss_coeff_pe, perpdiss_coeff_ne_in_teeq, buffer_coeff_pe, buffer_coeff_ne_in_teeq, &
        !$omp                 perpdiss_order_pi, perpdiss_coeff_pi, perpdiss_coeff_ne_in_tieq, buffer_coeff_pi, buffer_coeff_ne_in_tieq, &
        !$omp                 perpdiss_order_vort, perpdiss_coeff_vort, pardiss_coeff_vort, &
        !$omp                 perpdiss_order_upar, perpdiss_coeff_upar, perpdiss_order_ohm, perpdiss_coeff_ohm, &
        !$omp                 buffer_coeff_vort, buffer_coeff_upar, buffer_coeff_ohm, &
        !$omp                 swb_cont_exb, swb_cont_curv_pot, swb_cont_curv_te, swb_cont_curv_ne, swb_cont_dissperp, &
        !$omp                 swb_cont_paradv, swb_cont_divupar, swb_cont_divjpar, swb_cont_dissparallel, swb_cont_source, &
        !$omp                 swb_cont_flutter_paradv, swb_cont_flutter_divjpar, swb_cont_flutter_divupar, &
        !$omp                 swb_etemp_exb, swb_etemp_curv_pot, swb_etemp_curv_ne, swb_etemp_curv_te, swb_etemp_dissperp, &
        !$omp                 swb_etemp_paradv, swb_etemp_divjpar, swb_etemp_divvpar, swb_etemp_resist, swb_etemp_pardiff, &
        !$omp                 swb_etemp_flutter_pardiff_grad, swb_etemp_flutter_pardiff_div, swb_etemp_flutter_paradv, &
        !$omp                 swb_etemp_flutter_divvpar, swb_etemp_flutter_divjpar, swb_etemp_equipart, swb_etemp_source, & 
        !$omp                 swb_itemp_exb, swb_itemp_curv_pot, swb_itemp_curv_ne, swb_itemp_curv_te, swb_itemp_curv_ti, &
        !$omp                 swb_itemp_dissperp, swb_itemp_equipart, swb_itemp_vischeat, swb_itemp_source, &
        !$omp                 swb_itemp_paradv, swb_itemp_pardiff, swb_itemp_divupar, swb_itemp_divjpar, &
        !$omp                 swb_itemp_flutter_paradv, swb_itemp_flutter_divupar, swb_itemp_flutter_divjpar, &
        !$omp                 swb_itemp_flutter_pardiff_grad, swb_itemp_flutter_pardiff_div, &
        !$omp                 swb_vort_tecurvne, swb_vort_necurvte, swb_vort_necurvti, swb_vort_ticurvne, swb_vort_dissperp, &
        !$omp                 swb_vort_viscion, swb_vort_source, swb_vort_divjpar, swb_vort_dissparallel, &
        !$omp                 swb_vort_flutter_divjpar, &
        !$omp                 swb_upar_exb, swb_upar_curv, swb_upar_dissperp, swb_upar_source, swb_upar_viscion, &
        !$omp                 swb_upar_paradv, swb_upar_gradpar_n, swb_upar_gradparte, swb_upar_gradparti, &
        !$omp                 swb_upar_flutter_paradv, swb_upar_flutter_gradne, &
        !$omp                 swb_upar_flutter_gradte, swb_upar_flutter_gradti, &
        !$omp                 swb_ohm_exb, swb_ohm_dissperp, swb_ohm_paradv, swb_ohm_gradpar_pot, &
        !$omp                 swb_ohm_gradpar_ne, swb_ohm_gradpar_te, swb_ohm_flutter_paradv, swb_ohm_flutter_gradpot, &
        !$omp                 swb_ohm_flutter_gradne, swb_ohm_flutter_gradte, &
        !$omp                 ne, logne, te, logte, ti, logti, pe, pi, pot, vort, jpar, upar, vpar, psipar, apar_fluct, &
        !$omp                 qcond_te, cano_qcond_te, qcond_ti, cano_qcond_ti, &
        !$omp                 cano_upar, cano_apar_fluct, cano_jpar, jpar_over_n_full, &
        !$omp                 gradpar_ne, gradpar_vort, upar_gradpar_logne, vpar_gradpar_logte, upar_gradpar_logti, &
        !$omp                 stag_ne, stag_te, stag_ti, stag_pot, jpar_over_n_stag, &
        !$omp                 diss_perp_ne, diss_perp_te, diss_perp_ti, diss_perp_pe, diss_perp_pi, diss_perp_vort, diss_perp_upar, diss_perp_ohm, &
        !$omp                 gradpar_logne, gradpar_logte, gradpar_logti, &
        !$omp                 gstress, gflow, crv_gstress, pargrad_gstress32, &
        !$omp                 src_ne, logne_pen, dlogne, src_te, logte_pen, dlogte, src_ti, logti_pen, dlogti, &
        !$omp                 src_vort, vort_pen, dvort, src_upar, upar_pen, dupar, dohm)

        ! Apply dissipation
        call opsinplane_cano%apply_dissipation_inplane(ne%vals, perpdiss_order_ne, diss_perp_ne, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane(vort%vals, perpdiss_order_vort, diss_perp_vort, cf_diss_cano)
        call opsinplane_stag%apply_dissipation_inplane(upar%vals, perpdiss_order_upar, diss_perp_upar, cf_diss_stag)
        call opsinplane_stag%apply_dissipation_inplane(psipar%vals, perpdiss_order_ohm, diss_perp_ohm, cf_diss_stag)
        call opsinplane_cano%apply_dissipation_inplane(te%vals, perpdiss_order_te, diss_perp_te, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane(ti%vals, perpdiss_order_ti, diss_perp_ti, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane(pe, perpdiss_order_pe, diss_perp_pe, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane(pi, perpdiss_order_pi, diss_perp_pi, cf_diss_cano)
       
        ! Loop over canonical inner mesh points
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)

            ! Penalisation factor             
            pen_fac = 1.0_GP - penalisation_cano%get_charfun_val(ki)

            ! Curvature terms
            crv_pot = opsinplane_cano%curvature(pot%vals, l)
            crv_logne = opsinplane_cano%curvature(logne%vals, l)
            crv_logte = opsinplane_cano%curvature(logte%vals, l)
            crv_logti = opsinplane_cano%curvature(logti%vals, l)

            ! Parallel operators ...
            ! ...parallel divergence
            divjpar = map%par_divb_single(jpar%vals, jpar%hbwd, l)
            divupar = map%par_divb_single(upar%vals, upar%hbwd, l)
            divvpar = map%par_divb_single(vpar%vals, vpar%hbwd, l)

            if (.not.heatflux_timplicit_e) then
                divqcond_te = map%par_divb_single(qcond_te%vals, qcond_te%hbwd, l)
                divqcond_te = divqcond_te + swb_etemp_flutter_pardiff_div *  &
                              opsinplane_cano%flutter_div(cano_apar_fluct%vals, cano_qcond_te%vals, l)
            else
                divqcond_te = 0.0_GP
            endif
            if (.not.heatflux_timplicit_i) then
                divqcond_ti = map%par_divb_single(qcond_ti%vals, qcond_ti%hbwd, l)
                divqcond_ti = divqcond_ti + swb_itemp_flutter_pardiff_div * &
                              opsinplane_cano%flutter_div(cano_apar_fluct%vals, cano_qcond_ti%vals, l)
            else
                divqcond_ti = 0.0_GP
            endif
            diss_par_ne = map%par_divb_single(gradpar_ne%vals, gradpar_ne%hbwd, l)
            diss_par_vort = map%par_divb_single(gradpar_vort%vals, gradpar_vort%hbwd, l)

            !! ... parallel mappings - map to canonical
            upar_gradpar_logne_full = map%flat_stag_to_cano_single(upar_gradpar_logne%vals, upar_gradpar_logne%hbwd, l)
            vpar_gradpar_logte_full = map%flat_stag_to_cano_single(vpar_gradpar_logte%vals, vpar_gradpar_logte%hbwd, l)
            upar_gradpar_logti_full = map%flat_stag_to_cano_single(upar_gradpar_logti%vals, upar_gradpar_logti%hbwd, l)
            
            ! Magnetic flutter terms ...
            ! ... on canonical grid

            flutter_grad_logne_full = opsinplane_cano%flutter_grad(cano_apar_fluct%vals, logne%vals, l)
            flutter_grad_logte_full = opsinplane_cano%flutter_grad(cano_apar_fluct%vals, logte%vals, l)
            flutter_grad_logti_full = opsinplane_cano%flutter_grad(cano_apar_fluct%vals, logti%vals, l)
            flutter_div_jpar_full= opsinplane_cano%flutter_div(cano_apar_fluct%vals, cano_jpar%vals, l) 
            flutter_div_upar_full = opsinplane_cano%flutter_div(cano_apar_fluct%vals, cano_upar%vals, l)
            flutter_div_vpar_full = flutter_div_upar_full - opsinplane_cano%flutter_div(cano_apar_fluct%vals, jpar_over_n_full%vals, l)

            ! Viscous ion heating term
            if (eta_i0 > GP_EPS) then 
                visc_ion = 2.0_GP / (9.0_GP * gstress%eta_flow_v(l) * ne%vals(l) * ti%vals(l)) &
                           * gflow(l)**2
            else
                ! For eta_i0 = 0, visc_ion is zero but divison by zero would occur in expression above
                visc_ion = 0.0_GP
            endif

            ! Viscous ion term in vorticity equation ( ~ C(G))
            visc_cg = -tratio / 6.0_GP * crv_gstress(ki)

            ! Continuity equation
            dlogne(l) = pen_fac * ( &
                        + swb_cont_exb * delta / equi_on_cano%btor(l) * opsinplane_cano%arakawa(pot%vals, logne%vals, l) &          ! ExB advection
                        + swb_cont_curv_pot * crv_pot &                                                                             ! Curvature term ne*C(pot)
                        - swb_cont_curv_te * te%vals(l) * crv_logte &                                                               ! Curvature term ne*C(te)
                        - swb_cont_curv_ne * te%vals(l) * crv_logne &                                                               ! Curvature term te*C(ne)
                        - swb_cont_paradv * upar_gradpar_logne_full &                                                               ! Parallel advection
                        - swb_cont_divupar * divupar &                                                                              ! Divergence of upar (introduce additional parameter?)
                        + swb_cont_divjpar * divjpar / ne%vals(l)  &                                                                ! Divergence of parallel current
                        - swb_cont_flutter_paradv * cano_upar%vals(l) * flutter_grad_logne_full &                                   ! Magnetic-flutter-caused parallel advection
                        + swb_cont_flutter_divjpar * flutter_div_jpar_full/ ne%vals(l) &                                            ! Magnetic-flutter-caused divergence of jpar
                        - swb_cont_flutter_divupar * flutter_div_upar_full &                                                        ! Magnetic-flutter-caused divergence of upar
                        + swb_cont_dissperp * perpdiss_coeff_ne * diss_perp_ne(ki) / ne%vals(l) &                                   ! Perpendicular dissipation
                        + swb_cont_dissparallel * pardiss_coeff_ne * diss_par_ne / ne%vals(l)  &                                    ! Parallel dissipation
                        + buffer_coeff_ne * opsinplane_cano%nabla_pol_sqrd(ne%vals, l, cf_buffer_cano) / ne%vals(l) &               ! Buffer zone diffusion
                        + swb_cont_source   * src_ne(ki) / ne%vals(l) &                                                             ! Particle source
                        ) &                                                                                                         
                        + penalisation_cano%get_charfun_val(ki) * pen_epsinv * logne_pen(ki)                                        ! Penalisation source

            ! Electron temperature equation
            dlogte(l) = pen_fac * ( &
                        + swb_etemp_exb * delta / equi_on_cano%btor(l) * opsinplane_cano%arakawa(pot%vals, logte%vals, l) &         ! ExB advection
                        + swb_etemp_curv_pot * twothirds * crv_pot &                                                                ! Curvature term ~C(pot)
                        - swb_etemp_curv_ne * twothirds * te%vals(l) * crv_logne &                                                  ! Curvature term ~C(ne)
                        - swb_etemp_curv_te * 3.5_GP * twothirds * te%vals(l) * crv_logte &                                         ! Curvature term ~C(te)
                        - swb_etemp_paradv * vpar_gradpar_logte_full &                                                              ! Parallel advection
                        + swb_etemp_divjpar * twothirds * thermal_force_coeff / ne%vals(l) * divjpar &                              ! Divergence of jparvisc_ion
                        - swb_etemp_divvpar  * twothirds * divvpar &                                                                ! Divergence of vpar
                        + swb_etemp_resist   * twothirds * etapar0 / (sqrt(te%vals(l)**5) * ne%vals(l)) * (cano_jpar%vals(l)**2) &  ! Resistivity
                        - swb_etemp_equipart * nu_e0 * mass_ratio_ei * 2.0_GP  * ne%vals(l) &                                       ! Equipartition
                                                / ( sqrt(te%vals(l)**5) ) * ( te%vals(l) - tratio * ti%vals(l) ) &                  
                        - swb_etemp_pardiff  * twothirds / ( ne%vals(l) * te%vals(l) ) * divqcond_te &                              ! Parallel heat diffusion
                        + swb_etemp_dissperp * ( &                                                                                  ! Perpendicular dissipation
                                perpdiss_coeff_te * diss_perp_te(ki) / te%vals(l) &
                              + (perpdiss_coeff_pe * diss_perp_pe(ki) / te%vals(l) - perpdiss_coeff_ne_in_teeq * diss_perp_ne(ki)) &
                                / ne%vals(l) ) &
                        + buffer_coeff_te * opsinplane_cano%nabla_pol_sqrd(te%vals, l, cf_buffer_cano) / te%vals(l) &               ! Buffer zone diffusion
                        + (  buffer_coeff_pe * opsinplane_cano%nabla_pol_sqrd(pe, l, cf_buffer_cano) / te%vals(l) &
                           - buffer_coeff_ne_in_teeq * opsinplane_cano%nabla_pol_sqrd(ne%vals, l, cf_buffer_cano)) &
                          / ne%vals(l) &
                        + swb_etemp_source * src_te(ki) / te%vals(l) &                                                              ! Electron temperature source
                        - swb_etemp_flutter_paradv * (cano_upar%vals(l) - jpar_over_n_full%vals(l)) * flutter_grad_logte_full &     ! Magnetic-flutter-caused parallel advection    
                        - swb_etemp_flutter_divvpar * twothirds * flutter_div_vpar_full &                                           ! Magnetic-flutter-caused divergence of vpar
                        + swb_etemp_flutter_divjpar * twothirds * thermal_force_coeff / ne%vals(l) * flutter_div_jpar_full &        ! Magnetic-flutter-caused divergence of jpar
                        ) &
                        + penalisation_cano%get_charfun_val(ki) * pen_epsinv * logte_pen(ki)                                        ! Penalisation source

            ! Ion temperature equation
            dlogti(l) = pen_fac * ( &
                        + swb_itemp_exb * delta / equi_on_cano%btor(l) * opsinplane_cano%arakawa(pot%vals, logti%vals, l) &         ! ExB advection
                        + swb_itemp_curv_pot * twothirds * crv_pot &                                                                ! Curvature term ~C(pot)
                        - swb_itemp_curv_ne * twothirds * te%vals(l) * crv_logne &                                                  ! Curvature term ~C(ne)
                        - swb_itemp_curv_te * twothirds * te%vals(l) * crv_logte &                                                  ! Curvature term ~C(te)
                        + swb_itemp_curv_ti * twothirds * tratio * 2.5_GP * ti%vals(l) * crv_logti &                                ! Curvature term ~C(ti)
                        - swb_itemp_paradv * upar_gradpar_logti_full &                                                              ! Parallel advection
                        - swb_itemp_pardiff * twothirds / ( ne%vals(l) * ti%vals(l) ) * divqcond_ti &                               ! Parallel heat diffusion
                        - swb_itemp_divupar * twothirds * divupar &                                                                 ! Divergence of upar
                        + swb_itemp_divjpar * twothirds / ne%vals(l) * divjpar &                                                    ! Divergence of jpar
                        + swb_itemp_equipart * nu_e0 * mass_ratio_ei * 2.0_GP * ne%vals(l) &                                        ! Equipartition
                                                / ( sqrt(te%vals(l)**3) * ti%vals(l) ) * ( te%vals(l)/tratio - ti%vals(l) ) &   
                        + swb_itemp_dissperp * ( &                                                                                  ! Perpendicular dissipation
                                perpdiss_coeff_ti * diss_perp_ti(ki) / ti%vals(l) &
                              + (perpdiss_coeff_pi * diss_perp_pi(ki) / ti%vals(l) - perpdiss_coeff_ne_in_tieq * diss_perp_ne(ki)) &
                                / ne%vals(l) ) &
                        + buffer_coeff_ti * opsinplane_cano%nabla_pol_sqrd(ti%vals, l, cf_buffer_cano) / ti%vals(l) &               ! Buffer zone diffusion
                        + (  buffer_coeff_pi * opsinplane_cano%nabla_pol_sqrd(pi, l, cf_buffer_cano) / ti%vals(l) &
                           - buffer_coeff_ne_in_tieq * opsinplane_cano%nabla_pol_sqrd(ne%vals, l, cf_buffer_cano)) &
                          / ne%vals(l) &
                        + swb_itemp_vischeat * visc_ion &                                                                           ! Viscous ion heating
                        + swb_itemp_source   * src_ti(ki) / ti%vals(l) &                                                            ! Ion temperature source
                        - swb_itemp_flutter_paradv * cano_upar%vals(l) * flutter_grad_logti_full &                                  ! Magnetic-flutter-caused parallel advection
                        - swb_itemp_flutter_divupar * twothirds * flutter_div_upar_full &                                           ! Magnetic-flutter-caused divergence of upar
                        + swb_itemp_flutter_divjpar * twothirds / ne%vals(l) * flutter_div_jpar_full &                              ! Magnetic-flutter-caused divergence of jpar
                        ) &
                        + penalisation_cano%get_charfun_val(ki) * pen_epsinv * logti_pen(ki)                                        ! Penalisation source

            ! Vorticity equation
            dvort(l) = pen_fac * ( &
                        - swb_vort_tecurvne * te%vals(l) * ne%vals(l) * crv_logne &                                                 ! Curvature term ~Te*C(ne)
                        - swb_vort_necurvte * ne%vals(l) * te%vals(l) * crv_logte &                                                 ! Curvature term ~ne*C(Te)
                        - swb_vort_necurvti * tratio * ne%vals(l) * ti%vals(l) * crv_logti &                                        ! Curvature term ~tratio*ne*C(Ti)
                        - swb_vort_ticurvne * tratio * ti%vals(l) * ne%vals(l) * crv_logne &                                        ! Curvature term ~tratio*Ti*C(ne)
                        + swb_vort_divjpar * divjpar &                                                                              ! Divergence of parallel current
                        + swb_vort_viscion * visc_cg &                                                                              ! Ion gyroviscous dissipation
                        + swb_vort_dissperp * perpdiss_coeff_vort * diss_perp_vort(ki) &                                            ! Perpendicular Dissipation
                        + swb_vort_dissparallel * pardiss_coeff_vort * diss_par_vort &                                              ! Parallel dissipation   
                        + buffer_coeff_vort * opsinplane_cano%nabla_pol_sqrd(vort%vals, l, cf_buffer_cano) &                        ! Buffer zone diffusion
                        + swb_vort_source * src_vort(ki) &                                                                          ! Vorticity source
                        + swb_vort_flutter_divjpar* flutter_div_jpar_full &                                                         ! Magnetic-flutter-caused divergence of jpar
                        ) &         
                        + penalisation_cano%get_charfun_val(ki) * pen_epsinv *  vort_pen(ki)                                        ! Penalisation source

        enddo
        !$omp end do

        ! Loop over staggered inner mesh points
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)    
            x = mesh_stag%get_x(l)
            y = mesh_stag%get_y(l)

            ! Penalisation factor             
            pen_fac = 1.0_GP - penalisation_stag%get_charfun_val(ki)

            ! Parallel gradients
            gradpar_upar = map%par_grad_single(cano_upar%vals, cano_upar%hfwd, l)
            gradpar_pot = map%par_grad_single(pot%vals, pot%hfwd, l)
            gradpar_jpar_over_n = map%par_grad_single(jpar_over_n_full%vals, jpar_over_n_full%hfwd, l)

            ! Magnetic flutter terms
            flutter_grad_jpar_over_n_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, jpar_over_n_stag%vals, l)
            flutter_grad_upar_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, upar%vals, l)
            flutter_grad_pot_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, stag_pot%vals, l)
            flutter_grad_te_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, stag_te%vals, l)
            flutter_grad_ti_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, stag_ti%vals, l)
            flutter_grad_logne_stag = opsinplane_stag%flutter_grad(apar_fluct%vals, stag_ne%vals, l) / stag_ne%vals(l)

            ! Viscous ion term in parallel momentum equation
            visc_parmom = -2.0_GP * tratio / (3.0_GP * stag_ne%vals(l)) * pargrad_gstress32(ki) 

            ! Parallel momentum equation
            dupar(l) = pen_fac * ( &
                        + swb_upar_exb * delta / equi_on_stag%btor(l) * opsinplane_stag%arakawa(stag_pot%vals, upar%vals, l) &      ! ExB advection
                        - swb_upar_paradv * upar%vals(l) * gradpar_upar &                                                           ! Parallel advection
                        + swb_upar_curv * tratio * stag_ti%vals(l) * opsinplane_stag%curvature(upar%vals, l) &                      ! Curvature(upar)
                        - swb_upar_gradpar_n * (stag_te%vals(l) + tratio * stag_ti%vals(l)) * gradpar_logne%vals(l) &               ! par. p grad: n
                        - swb_upar_gradparTe * stag_te%vals(l) * gradpar_logte%vals(l) &                                            ! par. p grad: Te
                        - swb_upar_gradparTi * tratio * stag_ti%vals(l) * gradpar_logti%vals(l) &                                   ! par. p grad: Ti
                        + swb_upar_viscion * visc_parmom &                                                                          ! Ion gyroviscous dissipation
                        + swb_upar_dissperp * perpdiss_coeff_upar * diss_perp_upar(ki) &                                            ! Perpendicular dissipation
                        + buffer_coeff_upar * opsinplane_stag%nabla_pol_sqrd(upar%vals, l, cf_buffer_stag) &                        ! Buffer zone diffusion
                        + swb_upar_source   * src_upar(ki) &                                                                        ! Parallel momentum source
                        - swb_upar_flutter_paradv * upar%vals(l) * flutter_grad_upar_stag &                                      ! Magnetic-flutter-caused advection
                        - swb_upar_flutter_gradne * (stag_te%vals(l) + tratio * stag_ti%vals(l)) * flutter_grad_logne_stag  &    ! Magnetic-flutter-caused gradient of density
                        - swb_upar_flutter_gradte * flutter_grad_te_stag &                                                       ! Magnetic-flutter-caused gradient of electron temperature
                        - swb_upar_flutter_gradti * tratio * flutter_grad_ti_stag &                                            ! Magnetic-flutter-caused gradient of ion temperature
                        ) & 
                        + penalisation_stag%get_charfun_val(ki) * pen_epsinv * upar_pen(ki)                                         ! Penalisation source

            ! Ohm's law
            dohm(l) = pen_fac * ( & 
                        + swb_ohm_exb * delta * mass_ratio_ei / equi_on_stag%btor(l) &
                                             * opsinplane_stag%arakawa(stag_pot%vals, jpar_over_n_stag%vals, l) &                   ! ExB advection of inertial term
                        - swb_ohm_paradv * mass_ratio_ei * vpar%vals(l) * gradpar_jpar_over_n &                                     ! Parallel advection
                        - swb_ohm_gradpar_pot * gradpar_pot &                                                                       ! Parallel gradient of electrostatic potential
                        + swb_ohm_gradpar_ne * stag_te%vals(l) * gradpar_logne%vals(l) &                                            ! Parallel gradient of density
                        + swb_ohm_gradpar_te * (1.0_GP + thermal_force_coeff) * stag_te%vals(l) * gradpar_logte%vals(l) &           ! Parallel gradient of electron temperature
                        + swb_ohm_dissperp * perpdiss_coeff_ohm * diss_perp_ohm(ki) &                                               ! Perpendicular dissipation
                        - swb_ohm_flutter_paradv * mass_ratio_ei * vpar%vals(l) * flutter_grad_jpar_over_n_stag &                   ! Magnetic-flutter-caused advection 
                        - swb_ohm_flutter_gradpot * flutter_grad_pot_stag &                                                         ! Magnetic-flutter-caused gradient of electrostatic potential 
                        + swb_ohm_flutter_gradne * stag_te%vals(l) * flutter_grad_logne_stag &                                      ! Magnetic-flutter-caused gradient of density
                        + swb_ohm_flutter_gradte * (1.0_GP + thermal_force_coeff) * flutter_grad_te_stag &                          ! Magnetic-flutter-caused of gradient of electron temperature
                        + buffer_coeff_ohm * opsinplane_stag%nabla_pol_sqrd(psipar%vals, l, cf_buffer_stag) &                       ! Buffer zone diffusion
                        ) ! Note: Penalisation applied lateron               

        enddo
        !$omp end do

        ! MMS source
        if (mms_on) then
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()
                l = mesh_cano%inner_indices(ki)    
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)

                dlogne(l) = dlogne(l) + mms_source_braginskii_continuity(equi, x, y, phi_cano, tau, &
                                          penalisation_cano%get_charfun_val(ki), logne_pen(ki), src_ne(ki)) / ne%vals(l)

                dlogte(l) = dlogte(l) + mms_source_braginskii_etemp(equi, x, y, phi_cano, tau, &
                                          penalisation_cano%get_charfun_val(ki), logte_pen(ki), src_te(ki)) / te%vals(l)

                dlogti(l) = dlogti(l) + mms_source_braginskii_itemp(equi, x, y, phi_cano, tau, &
                                          penalisation_cano%get_charfun_val(ki), logti_pen(ki), src_ti(ki)) / ti%vals(l)

                dvort(l) = dvort(l) + mms_source_braginskii_vorticity(equi, x, y, phi_cano, tau, &
                                          penalisation_cano%get_charfun_val(ki), vort_pen(ki), src_vort(ki))
            enddo
            !$omp end do
            !$omp do
            do ki = 1, mesh_stag%get_n_points_inner()
                l = mesh_stag%inner_indices(ki)    
                x = mesh_stag%get_x(l)
                y = mesh_stag%get_y(l)                                  

                dupar(l) = dupar(l) + mms_source_braginskii_parmomentum(equi, x, y, phi_stag, tau, &
                                          penalisation_stag%get_charfun_val(ki), upar_pen(ki), src_upar(ki))

                dohm(l)  = dohm(l) + mms_source_braginskii_ohm(equi, x, y, phi_stag, tau, &
                                          penalisation_stag%get_charfun_val(ki), 0.0_GP)
            enddo
            !$omp end do
        endif                       

        ! Set boundary and ghost points to dummy values for time-step integrators
        !$omp do
        do kb = 1, mesh_cano%get_n_points_boundary()
            l = mesh_cano%boundary_indices(kb)
            dlogne(l) = 0.0_GP
            dlogte(l) = 0.0_GP
            dlogti(l) = 0.0_GP
            dvort(l) = 0.0_GP
        enddo
        !$omp end do
        !$omp do
        do kb = 1, mesh_stag%get_n_points_boundary()
            l = mesh_stag%boundary_indices(kb)
            dupar(l) = 0.0_GP
            dohm(l) = 0.0_GP
        enddo
        !$omp end do
        
        !$omp do
        do kg = 1, mesh_cano%get_n_points_ghost()
            l = mesh_cano%ghost_indices(kg)
            dlogne(l) = 0.0_GP
            dlogte(l) = 0.0_GP
            dlogti(l) = 0.0_GP
            dvort(l) = 0.0_GP
        enddo
        !$omp end do
        !$omp do
        do kg = 1, mesh_stag%get_n_points_ghost()
            l = mesh_stag%ghost_indices(kg)
            dupar(l) = 0.0_GP
            dohm(l) = 0.0_GP
        enddo
        !$omp end do

        !$omp end parallel
        call perf_stop('../omp3:main')


        ! Explicit time advancement of variables -----------------------------------------------------------------------------------
        call perf_start('../tadvance_braginskii')

        call tstep_continuity%advance(dlogne, logne%vals, logne_adv)
        call tstep_etemp%advance(dlogte, logte%vals, logte_adv)
        call tstep_itemp%advance(dlogti, logti%vals, logti_adv)
        call tstep_parmom%advance(dupar, upar%vals,  upar_adv)
        
        call perf_stop('../tadvance_braginskii')
               
        call perf_start('../omp4')
        !$omp parallel default(none) &
        !$omp          private(ki, kb, l, pen_fac, x, y) &
        !$omp          shared(comm_handler, equi, tau_adv, &
        !$omp                 mesh_cano, phi_cano, penalisation_cano, polars_cano, mesh_stag, phi_stag, penalisation_stag,&
        !$omp                 tstep_continuity, boundaries, mms_on, pen_epsinv, &
        !$omp                 heatflux_timplicit_e, heatflux_timplicit_i, viscosity_timplicit_upar, &
        !$omp                 logne_adv, logte_adv, logti_adv, upar_adv)

        ! Retrieve advanced values and implicit part of penalisation ---------------------------------------------------------------
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            pen_fac = 1.0_GP + penalisation_cano%get_charfun_val(ki) * pen_epsinv * tstep_continuity%get_dtau_bdf()

            logne_adv(l) = logne_adv(l) / pen_fac
            if (.not.heatflux_timplicit_e) then
                logte_adv(l) = logte_adv(l) / pen_fac
            endif
            if (.not.heatflux_timplicit_i) then
                logti_adv(l) = logti_adv(l) / pen_fac
            endif
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            pen_fac = 1.0_GP + penalisation_stag%get_charfun_val(ki) * pen_epsinv * tstep_continuity%get_dtau_bdf()
            
            if (.not.viscosity_timplicit_upar) then
                upar_adv(l)  = upar_adv(l) / pen_fac
            endif
        enddo
        !$omp end do

        ! Set perpendicular boundary conditions and ghost points -------------------------------------------------------------------
        
        ! Get boundary values for MMS analysis 
        if (mms_on) then
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary()
                l = mesh_cano%boundary_indices(kb)
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l) 
                boundaries%ne%vals(kb) = log(mms_sol_braginskii_ne(equi, x, y, phi_cano, tau_adv))
                boundaries%te%vals(kb) = log(mms_sol_braginskii_te(equi, x, y, phi_cano, tau_adv))
                boundaries%ti%vals(kb) = log(mms_sol_braginskii_ti(equi, x, y, phi_cano, tau_adv))
                boundaries%vort%vals(kb) = mms_sol_braginskii_vort(equi, x, y, phi_cano, tau_adv)
                boundaries%pot%vals(kb) = mms_sol_braginskii_pot(equi, x, y, phi_cano, tau_adv)
            enddo
            !$omp end do
            !$omp do
            do kb = 1, mesh_stag%get_n_points_boundary()
                l = mesh_stag%boundary_indices(kb)
                x = mesh_stag%get_x(l)
                y = mesh_stag%get_y(l)   
                boundaries%jpar%vals(kb) = mms_sol_braginskii_jpar(equi, x, y, phi_stag, tau_adv)
                boundaries%apar%vals(kb) = mms_sol_braginskii_apar(equi, x, y, phi_stag, tau_adv)
                boundaries%upar%vals(kb) = mms_sol_braginskii_upar(equi, x, y, phi_stag, tau_adv)
            enddo
            !$omp end do
        endif

        call set_perpbnds(mesh_stag, boundaries%upar%types, upar_adv, boundaries%upar%vals)
        call set_perpbnds(mesh_cano, boundaries%ne%types, logne_adv, boundaries%ne%vals)
        call set_perpbnds(mesh_cano, boundaries%te%types, logte_adv, boundaries%te%vals)
        call set_perpbnds(mesh_cano, boundaries%ti%types, logti_adv, boundaries%ti%vals)

        !$omp end parallel
        call perf_stop('../omp4')

        ! Take special care of zonal Neumann core boundaries if present
        call perf_start('../zonal_neumann_core')
        if (bnddescr_ne_core == BND_BRAGTYPE_ZONAL_NEUMANN) then
            call zonal_neumann_core(comm_handler, mesh_cano, equi, polars_cano, logne_adv)
        endif
        if ((.not.heatflux_timplicit_e) .and. (bnddescr_te_core == BND_BRAGTYPE_ZONAL_NEUMANN)) then
            call zonal_neumann_core(comm_handler, mesh_cano, equi, polars_cano, logte_adv)
        endif
        if ((.not.heatflux_timplicit_i) .and. (bnddescr_ti_core == BND_BRAGTYPE_ZONAL_NEUMANN)) then
            call zonal_neumann_core(comm_handler, mesh_cano, equi, polars_cano, logti_adv)
        endif
        call perf_stop('../zonal_neumann_core')

        call perf_start('../extrapolate_ghost')
        !$omp parallel default(none) &
        !$omp          private(kg, l, x, y) &
        !$omp          shared(equi, mesh_cano, mesh_stag, phi_cano, phi_stag, mms_on, &
        !$omp                 logne_adv, logte_adv, logti_adv, upar_adv, tau_adv)
        ! Set ghost points
        if (mms_on) then
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()
                l = mesh_cano%ghost_indices(kg)
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                logne_adv(l) = log(mms_sol_braginskii_ne(equi, x, y, phi_cano, tau_adv))
                logte_adv(l) = log(mms_sol_braginskii_te(equi, x, y, phi_cano, tau_adv))
                logti_adv(l) = log(mms_sol_braginskii_ti(equi, x, y, phi_cano, tau_adv))
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_stag%get_n_points_ghost()
                l = mesh_stag%ghost_indices(kg)
                x = mesh_stag%get_x(l)
                y = mesh_stag%get_y(l) 
                upar_adv(l) = mms_sol_braginskii_upar(equi, x, y, phi_stag, tau_adv)
            enddo
            !$omp end do
        else
            call extrapolate_ghost_points(mesh_cano, logne_adv)
            call extrapolate_ghost_points(mesh_cano, logte_adv)
            call extrapolate_ghost_points(mesh_cano, logti_adv)
            call extrapolate_ghost_points(mesh_stag, upar_adv)
        endif
        !$omp end parallel
        call perf_stop('../extrapolate_ghost')

        ! Implicit treatment of parallel electron heat conduction ------------------------------------------------------------------
        if (heatflux_timplicit_e .and. (heatflux_model == 'BRAGINSKII_LIM') ) then
            
            call perf_start('../parheatslv_e')  
        
            ! Set up coefficients on canonical mesh for parallel solve
            !$omp parallel default(none) private(ki, kb, kg, l, x, y, pen_fac, heatflux_coeff_e) &
            !$omp          shared(equi, tau_adv, mesh_cano, phi_cano, penalisation_cano, mesh_stag, tstep_etemp, boundaries, &
            !$omp                 mms_on, pen_epsinv, swb_etemp_pardiff, bnd_descrs_nmn_stag, bnddescr_te_core, &
            !$omp                 chipar0_e, fac_free_streaming_e, heatflux_cutoff_e, logte_firstsolve_store, &
            !$omp                 ne, te, logte, stag_ne, stag_te, lambda_par, xi_par, co_par, rhs_par, logte_adv)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()
                l = mesh_cano%inner_indices(ki)
                pen_fac = 1.0_GP - penalisation_cano%get_charfun_val(ki)
                            
                lambda_par(l) = 1.0_GP + tstep_etemp%get_dtau_bdf() * pen_epsinv * penalisation_cano%get_charfun_val(ki)              
                xi_par(l) = swb_etemp_pardiff * pen_fac * tstep_etemp%get_dtau_bdf() * twothirds / ( ne%vals(l) * te%vals(l) )
                rhs_par(l) = logte_adv(l)
            enddo
            !$omp end do
            ! Set logte_adv to the initial guess for the solver:
            if (bnddescr_te_core == BND_BRAGTYPE_ZONAL_NEUMANN) then
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logte_adv(l) = 3.0_GP*logte_firstsolve_store%vstore(l,1,1) &
                                 - 3.0_GP*logte_firstsolve_store%vstore(l,2,1) + logte_firstsolve_store%vstore(l,3,1)
                enddo
                !$omp end do
            else
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logte_adv(l) = 3.0_GP*logte%vals(l) - 3.0_GP*tstep_etemp%vstore(l,1,1) + tstep_etemp%vstore(l,2,1)
                enddo
                !$omp end do
            endif
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary()
                l = mesh_cano%boundary_indices(kb)
                
                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                rhs_par(l) = boundaries%te%vals(kb)
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()
                l = mesh_cano%ghost_indices(kg)
                
                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                if (mms_on) then
                    x = mesh_cano%get_x(l)
                    y = mesh_cano%get_y(l) 
                    rhs_par(l) = log(mms_sol_braginskii_te(equi, x, y, phi_cano, tau_adv))
                    logte_adv(l) = log(mms_sol_braginskii_te(equi, x, y, phi_cano, tau_adv))
                else
                    rhs_par(l) = logte%vals(l)
                endif                
            enddo
            !$omp end do
            
            ! Setup coefficient co_par on staggered mesh
            !$omp do
            do l = 1, mesh_stag%get_n_points()
                heatflux_coeff_e = chipar0_e *  sqrt(stag_te%vals(l)**5)                                                            ! Braginskii heat flux  
                heatflux_coeff_e = heatflux_coeff_e / (1.0_GP + fac_free_streaming_e * stag_te%vals(l)**2 / stag_ne%vals(l))        ! Free streaming limiter    
                heatflux_coeff_e = min(heatflux_coeff_e, heatflux_cutoff_e)                                                         ! Hard heat flux limiter
                co_par(l) = heatflux_coeff_e * stag_te%vals(l)
            enddo
            !$omp end do
            
            !$omp end parallel

            ! Solve parallel helmholtz problem
            flutter_fac_cano = swb_etemp_flutter_pardiff_div
            flutter_fac_stag = swb_etemp_flutter_pardiff_grad
            call solve_aligned3d(comm_handler, &
                                 equi_on_cano=equi_on_cano, equi_on_stag=equi_on_stag, &
                                 mesh_cano=mesh_cano, mesh_stag=mesh_stag, & 
                                 map=map, &
                                 params_solver=params_parsolver_te, solver_type=parsolver_te_select, &
                                 rhs=rhs_par, sol=logte_adv, &
                                 niter=niter_parslv, res=rinfo(5,1), true_res=rinfo(5,2), &
                                 cnt_matvec=cnt_matvec_parslv, &
                                 lambda=lambda_par, xi=xi_par, co_stag=co_par, &
                                 bnd_descrs=boundaries%te%types, &
                                 flutter_fac_cano=flutter_fac_cano, flutter_fac_stag=flutter_fac_stag, &
                                 opsinplane_cano=opsinplane_cano, opsinplane_stag=opsinplane_stag, &
                                 apar_stag=apar_fluct%vals, apar_cano=cano_apar_fluct%vals, &
                                 bnd_descrs_flux=bnd_descrs_nmn_stag)
                                 
            if (niter_parslv > 0) then
                tinfo(5) = cnt_matvec_parslv
            else
                tinfo(5) = niter_parslv
            endif

            ! Extrapolate ghost points
            if (.not.mms_on) then
                !$omp parallel default(none) shared(mesh_cano, logte_adv)
                call extrapolate_ghost_points(mesh_cano, logte_adv)
                !$omp end parallel
            endif

            if (bnddescr_te_core == BND_BRAGTYPE_ZONAL_NEUMANN) then

                ! Store result of first solve to have a good initial guess for the next time-step
                call logte_firstsolve_store%shift_storage(logte_adv)

                ! New boundary conditions are set in rhs_par, computed from the first solve solution logte_adv
                call zonal_neumann_core(comm_handler, mesh_cano, equi, polars_cano, logte_adv, rhs_par)

                ! New initial guess
                !$omp parallel default(none) private(l) &
                !$omp          shared(mesh_cano, logte_adv, logte, tstep_etemp)
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logte_adv(l) = 3.0_GP*logte%vals(l) - 3.0_GP*tstep_etemp%vstore(l,1,1) + tstep_etemp%vstore(l,2,1)
                enddo
                !$omp end do
                !$omp end parallel

                ! Solve parallel helmholtz problem again
                call solve_aligned3d(comm_handler, &
                                     equi_on_cano=equi_on_cano, equi_on_stag=equi_on_stag, &
                                     mesh_cano=mesh_cano, mesh_stag=mesh_stag, &
                                     map=map, &
                                     params_solver=params_parsolver_te, solver_type=parsolver_te_select, &
                                     rhs=rhs_par, sol=logte_adv, &
                                     niter=niter_parslv, res=rinfo(36,1), true_res=rinfo(36,2), &
                                     cnt_matvec=cnt_matvec_parslv, &
                                     lambda=lambda_par, xi=xi_par, co_stag=co_par, &
                                     bnd_descrs=boundaries%te%solver_types, &
                                     flutter_fac_cano=flutter_fac_cano, flutter_fac_stag=flutter_fac_stag, &
                                     opsinplane_cano=opsinplane_cano, opsinplane_stag=opsinplane_stag, &
                                     apar_stag=apar_fluct%vals, apar_cano=cano_apar_fluct%vals, &
                                     bnd_descrs_flux=bnd_descrs_nmn_stag)

                if (niter_parslv > 0) then
                    tinfo(36) = cnt_matvec_parslv
                else
                    tinfo(36) = niter_parslv
                endif

                ! Extrapolate ghost points
                if (.not.mms_on) then
                    !$omp parallel default(none) shared(mesh_cano, logte_adv)
                    call extrapolate_ghost_points(mesh_cano, logte_adv)
                    !$omp end parallel
                endif
            endif
     
            call perf_stop('../parheatslv_e')
        endif

        ! Implicit treatment of parallel ion heat conduction -----------------------------------------------------------------------
        if (heatflux_timplicit_i .and. (heatflux_model == 'BRAGINSKII_LIM') ) then
            
            call perf_start('../parheatslv_i')  

            ! Set up coefficients on canonical mesh for parallel solve
            !$omp parallel default(none) &
            !$omp          private(ki, kb, kg, l, x, y, pen_fac, heatflux_coeff_i) &
            !$omp          shared(equi, tau_adv, mesh_cano, phi_cano, penalisation_cano, mesh_stag, tstep_itemp, boundaries, &
            !$omp                 mms_on, pen_epsinv, swb_itemp_pardiff, bnd_descrs_nmn_stag, bnddescr_ti_core, &
            !$omp                 chipar0_i, fac_free_streaming_i, heatflux_cutoff_i, logti_firstsolve_store, &
            !$omp                 ne, ti, logti, stag_ne, stag_ti, lambda_par, xi_par, co_par, rhs_par, logti_adv)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()
                l = mesh_cano%inner_indices(ki)
                pen_fac = 1.0_GP - penalisation_cano%get_charfun_val(ki)

                lambda_par(l) = 1.0_GP + tstep_itemp%get_dtau_bdf() * pen_epsinv * penalisation_cano%get_charfun_val(ki)
                xi_par(l) = swb_itemp_pardiff * pen_fac * tstep_itemp%get_dtau_bdf() * twothirds / (ne%vals(l) * ti%vals(l))
                rhs_par(l) = logti_adv(l)
            enddo
            !$omp end do
            ! Set logti_adv to the initial guess for the solver:
            if (bnddescr_ti_core == BND_BRAGTYPE_ZONAL_NEUMANN) then
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logti_adv(l) = 3.0_GP*logti_firstsolve_store%vstore(l,1,1) &
                                 - 3.0_GP*logti_firstsolve_store%vstore(l,2,1) + logti_firstsolve_store%vstore(l,3,1)
                enddo
                !$omp end do
            else
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logti_adv(l) = 3.0_GP*logti%vals(l) - 3.0_GP*tstep_itemp%vstore(l,1,1) + tstep_itemp%vstore(l,2,1)
                enddo
                !$omp end do
            endif
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary()
                l = mesh_cano%boundary_indices(kb)

                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                rhs_par(l) = boundaries%ti%vals(kb)
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_cano%get_n_points_ghost()
                l = mesh_cano%ghost_indices(kg)
                
                lambda_par(l) = 1.0_GP
                xi_par(l) = 0.0_GP
                if (mms_on) then
                    x = mesh_cano%get_x(l)
                    y = mesh_cano%get_y(l) 
                    rhs_par(l) = log(mms_sol_braginskii_ti(equi, x, y, phi_cano, tau_adv))
                    logti_adv(l) = log(mms_sol_braginskii_ti(equi, x, y, phi_cano, tau_adv))
                else
                    rhs_par(l) = logti%vals(l)
                endif                
            enddo
            !$omp end do
            
            ! Setup coefficient co_par on staggered mesh
            !$omp do
            do l = 1, mesh_stag%get_n_points()
                heatflux_coeff_i = chipar0_i * sqrt(stag_ti%vals(l) ** 5)                                                           ! Braginskii heat flux  
                heatflux_coeff_i = heatflux_coeff_i/ (1.0_GP + fac_free_streaming_i * stag_ti%vals(l)**2 / stag_ne%vals(l))         ! Free streaming limiter    
                heatflux_coeff_i = min(heatflux_coeff_i, heatflux_cutoff_i)                                                         ! Hard heat flux limiter
                co_par(l) = heatflux_coeff_i * stag_ti%vals(l)                                               
            enddo
            !$omp end do
            
            !$omp end parallel
            
            ! Solve parallel helmholtz problem
            flutter_fac_cano = swb_itemp_flutter_pardiff_div
            flutter_fac_stag = swb_itemp_flutter_pardiff_grad
            call solve_aligned3d(comm_handler, &
                                 equi_on_cano=equi_on_cano, equi_on_stag=equi_on_stag, &
                                 mesh_cano=mesh_cano, mesh_stag=mesh_stag, & 
                                 map=map, &
                                 params_solver=params_parsolver_ti, solver_type=parsolver_ti_select, &
                                 rhs=rhs_par, sol=logti_adv, &
                                 niter=niter_parslv, res=rinfo(6,1), true_res=rinfo(6,2), &
                                 cnt_matvec=cnt_matvec_parslv, &
                                 lambda=lambda_par, xi=xi_par, co_stag=co_par, &
                                 bnd_descrs=boundaries%ti%types, &
                                 flutter_fac_cano=flutter_fac_cano, flutter_fac_stag=flutter_fac_stag, &
                                 opsinplane_cano=opsinplane_cano, opsinplane_stag=opsinplane_stag, &
                                 apar_stag=apar_fluct%vals, apar_cano=cano_apar_fluct%vals, &
                                 bnd_descrs_flux=bnd_descrs_nmn_stag)
                                          
            if (niter_parslv > 0) then
                tinfo(6) = cnt_matvec_parslv
            else
                tinfo(6) = niter_parslv
            endif

            ! Extrapolate ghost points
            if (.not.mms_on) then
                !$omp parallel default(none) shared(mesh_cano, logti_adv)
                call extrapolate_ghost_points(mesh_cano, logti_adv)
                !$omp end parallel
            endif

            if (bnddescr_ti_core == BND_BRAGTYPE_ZONAL_NEUMANN) then

                ! Store result of first solve to have a good initial guess for the next time-step
                call logti_firstsolve_store%shift_storage(logti_adv)

                ! New boundary conditions are set in rhs_par, computed from the first solve solution logti_adv
                call zonal_neumann_core(comm_handler, mesh_cano, equi, polars_cano, logti_adv, rhs_par)

                ! New initial guess
                !$omp parallel default(none) private(l) &
                !$omp          shared(mesh_cano, logti_adv, logti, tstep_itemp)
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    logti_adv(l) = 3.0_GP*logti%vals(l) - 3.0_GP*tstep_itemp%vstore(l,1,1) + tstep_itemp%vstore(l,2,1)
                enddo
                !$omp end do
                !$omp end parallel

                ! Solve parallel helmholtz problem again
                call solve_aligned3d(comm_handler, &
                                     equi_on_cano=equi_on_cano, equi_on_stag=equi_on_stag, &
                                     mesh_cano=mesh_cano, mesh_stag=mesh_stag, &
                                     map=map, &
                                     params_solver=params_parsolver_ti, solver_type=parsolver_ti_select, &
                                     rhs=rhs_par, sol=logti_adv, &
                                     niter=niter_parslv, res=rinfo(37,1), true_res=rinfo(37,2), &
                                     cnt_matvec=cnt_matvec_parslv, &
                                     lambda=lambda_par, xi=xi_par, co_stag=co_par, &
                                     bnd_descrs=boundaries%ti%solver_types, &
                                     flutter_fac_cano=flutter_fac_cano, flutter_fac_stag=flutter_fac_stag, &
                                     opsinplane_cano=opsinplane_cano, opsinplane_stag=opsinplane_stag, &
                                     apar_stag=apar_fluct%vals, apar_cano=cano_apar_fluct%vals, &
                                     bnd_descrs_flux=bnd_descrs_nmn_stag)

                if (niter_parslv > 0) then
                    tinfo(37) = cnt_matvec_parslv
                else
                    tinfo(37) = niter_parslv
                endif

                ! Extrapolate ghost points
                if (.not.mms_on) then
                    !$omp parallel default(none) shared(mesh_cano, logti_adv)
                    call extrapolate_ghost_points(mesh_cano, logti_adv)
                    !$omp end parallel
                endif
            endif
     
            call perf_stop('../parheatslv_i')
        endif
        
        ! Implicit treatment of parallel momentum dissipation ----------------------------------------------------------------------
        if (viscosity_timplicit_upar) then
            call perf_start('../parslv_u')

            ! Set up coefficients on staggered mesh for parallel solve
            !$omp parallel default(none) private(ki, kb, kg, l, x, y, pen_fac) &
            !$omp shared(equi, equi_on_cano, equi_on_stag, tau_adv, mesh_stag, phi_stag, &
            !$omp        penalisation_stag, mesh_cano, tstep_parmom, boundaries, &
            !$omp        mms_on, tratio, pen_epsinv, swb_upar_viscion, bnd_descrs_nmn_cano, &
            !$omp        lambda_par_adj, xi_par_adj, fcx_par_adj, rhs_par_adj, co_par_adj, &
            !$omp        gstress, stag_ne, upar, upar_adv)
            !$omp do
            do ki = 1, mesh_stag%get_n_points_inner()
                l = mesh_stag%inner_indices(ki)
                pen_fac = 1.0_GP - penalisation_stag%get_charfun_val(ki)

                lambda_par_adj(l) = 1.0_GP + tstep_parmom%get_dtau_bdf() * pen_epsinv * penalisation_stag%get_charfun_val(ki)
                xi_par_adj(l) = swb_upar_viscion * pen_fac * tstep_parmom%get_dtau_bdf() &
                                * twothirds * tratio * sqrt(equi_on_stag%absb(l)**3) / (stag_ne%vals(l))
                fcx_par_adj(l) = sqrt(equi_on_stag%absb(l)**3)
                rhs_par_adj(l) = upar_adv(l)
                ! Set upar_adv to the initial guess for the solver:
                upar_adv(l) = 3.0_GP*upar%vals(l) - 3.0_GP*tstep_parmom%vstore(l,1,1) + tstep_parmom%vstore(l,2,1)
            enddo
            !$omp end do
            !$omp do
            do kb = 1, mesh_stag%get_n_points_boundary()
                l = mesh_stag%boundary_indices(kb)
                
                lambda_par_adj(l) = 1.0_GP
                xi_par_adj(l) = 0.0_GP
                fcx_par_adj(l) = sqrt(equi_on_stag%absb(l)**3)
                rhs_par_adj(l) = boundaries%upar%vals(kb)
                upar_adv(l) = 3.0_GP*upar%vals(l) - 3.0_GP*tstep_parmom%vstore(l,1,1) + tstep_parmom%vstore(l,2,1)
            enddo
            !$omp end do
            !$omp do
            do kg = 1, mesh_stag%get_n_points_ghost()
                l = mesh_stag%ghost_indices(kg)
                
                lambda_par_adj(l) = 1.0_GP
                xi_par_adj(l) = 0.0_GP
                fcx_par_adj(l) = sqrt(equi_on_stag%absb(l)**3)
                if (mms_on) then
                    x = mesh_stag%get_x(l)
                    y = mesh_stag%get_y(l) 
                    rhs_par_adj(l) = mms_sol_braginskii_upar(equi, x, y, phi_stag, tau_adv)
                else
                    rhs_par_adj(l) = upar%vals(l)
                    upar_adv(l) = 3.0_GP*upar%vals(l) - 3.0_GP*tstep_parmom%vstore(l,1,1) + tstep_parmom%vstore(l,2,1)
                endif               
            enddo
            !$omp end do
            
            ! Setup coefficient co_par_adj on canonical mesh
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                co_par_adj(l) = 2.0_GP * gstress%eta_flow_v(l) / (equi_on_cano%absb(l)**3)
            enddo
            !$omp end do
            !$omp end parallel
            
            flutter_fac_cano = swb_flutter_visc
            flutter_fac_stag = swb_upar_flutter_viscion_grad
            call solve_aligned3d_adjoint(comm_handler, &
                                         equi_on_cano=equi_on_cano, equi_on_stag=equi_on_stag, &
                                         mesh_cano=mesh_cano, mesh_stag=mesh_stag, & 
                                         map=map, &
                                         params_solver=params_parsolver_upar, solver_type=parsolver_upar_select, &
                                         rhs=rhs_par_adj, sol=upar_adv, &
                                         niter=niter_parslv, res=rinfo(7,1), true_res=rinfo(7,2), &
                                         cnt_matvec=cnt_matvec_parslv, &
                                         lambda=lambda_par_adj, xi=xi_par_adj, co_cano=co_par_adj, fcx=fcx_par_adj, &
                                         bnd_descrs=boundaries%upar%types, &
                                         flutter_fac_cano=flutter_fac_cano, flutter_fac_stag=flutter_fac_stag, &
                                         opsinplane_cano=opsinplane_cano, opsinplane_stag=opsinplane_stag, &
                                         apar_stag=apar_fluct%vals, apar_cano=cano_apar_fluct%vals, &
                                         bnd_descrs_flux=bnd_descrs_nmn_cano)
                                                      
            if (niter_parslv > 0) then
                tinfo(7) = cnt_matvec_parslv
            else
                tinfo(7) = niter_parslv
            endif

            ! Extrapolate ghost points
            if (.not.mms_on) then
                !$omp parallel default(none) shared(mesh_stag, upar_adv)
                call extrapolate_ghost_points(mesh_stag, upar_adv)
                !$omp end parallel
            endif

            call perf_stop('../parslv_u')
        endif

        call perf_start('../omp5:exp')

        !$omp parallel default(none) private(ki, l, x, y, pen_fac, dia_fac) &
        !$omp          shared(equi, equi_on_cano, mesh_cano, phi_cano, penalisation_cano, opsinplane_cano, &
        !$omp                 delta, beta, tratio, boussinesq_on, floor_ne, floor_te, floor_ti, &
        !$omp                 swb_vort_dia, swb_vort_exb, swb_vort_flutter_paradv, &
        !$omp                 ne_adv, logne_adv, te_adv, logte_adv, ti_adv, logti_adv, cano_upar, &
        !$omp                 polarisation_coeff, flutter_paradv_coeff, dvort, pot, ti, ne, cano_apar_fluct, &
        !$omp                 cano_apar_over_btor, dtau, src_floor_ne)
        
        ! Measure floor contribution to density
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            src_floor_ne(l) = max(floor_ne - exp(logne_adv(l)), 0.0_GP) / dtau
        enddo
        !$omp end do

        ! Apply floors and compute actual density and temperature from their logarithms --------------------------------------------
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            logne_adv(l) = max(logne_adv(l), log(floor_ne))
            ne_adv(l) = exp(logne_adv(l))

            logte_adv(l) = max(logte_adv(l), log(floor_te))
            te_adv(l) = exp(logte_adv(l))

            logti_adv(l) = max(logti_adv(l), log(floor_ti))
            ti_adv(l) = exp(logti_adv(l))
        enddo
        !$omp end do

        ! Compute polarisation coefficient -----------------------------------------------------------------------------------------
        if (boussinesq_on) then
            ! In Boussinesq approximation
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                polarisation_coeff(l) = 1.0_GP/(equi_on_cano%btor(l)**2) * equi%jacobian(x, y, phi_cano)
            enddo
            !$omp end do
        else
            ! Full-f version (use ne at advanced time)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                polarisation_coeff(l) = ne_adv(l) / (equi_on_cano%btor(l)**2) * equi%jacobian(x, y, phi_cano)
            enddo
            !$omp end do
        endif
        
        ! Compute flutter polarisation advection coefficient -----------------------------------------------------------------------------------------
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            flutter_paradv_coeff(l) = polarisation_coeff(l) * cano_upar%vals(l) * equi_on_cano%btor(l)
            cano_apar_over_btor(l) = cano_apar_fluct%vals(l) / equi_on_cano%btor(l)
        enddo
        !$omp end do

        ! Add advection in vorticity equation --------------------------------------------------------------------------------------
        dia_fac = tratio * swb_vort_dia
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)
            ! Penalisation factor             
            pen_fac = 1.0_GP - penalisation_cano%get_charfun_val(ki)

            dvort(l) = dvort(l) + pen_fac / equi%jacobian(x, y, phi_cano) * &
                    (swb_vort_exb * delta * &
                        opsinplane_cano%polarisation_advection(dia_fac, polarisation_coeff, pot%vals, &
                                                               pot%vals, ti%vals, ne%vals, l) &
                     - swb_vort_flutter_paradv * delta * beta * &
                        opsinplane_cano%polarisation_advection(dia_fac, flutter_paradv_coeff, cano_apar_over_btor, &
                                                               pot%vals, ti%vals, ne%vals, l) &
                    )
        enddo
        !$omp end do
        !$omp end parallel
        call perf_stop('../omp5:exp')
        
        call perf_start('../exbvort')
        call parallel_polarisation_advection(comm_handler, equi, mesh_cano, mesh_stag, map, penalisation_cano, &
                                             boundaries, polarisation_coeff, &
                                             pot, ti, ne, logne, cano_upar%vals, dvort)
        call perf_stop('../exbvort')

        ! Compute penalisation values for potential --------------------------------------------------------------------------------
        call perf_start('../penalisation_pot')
        call pot_penalisation(equi, mesh_cano, map, penalisation_cano, te, pot_pen)
        call perf_stop('../penalisation_pot')
    
        ! Advance vorticity/potential in time --------------------------------------------------------------------------------------
        call perf_start('../vortpot')
        
        if ( (mms_on) .and. (.not.mms_potvort_solve_on) ) then

            ! For mms, with mms_potvort_solve = false, use analytic MMS solution for potential and vorticity
            !$omp parallel default(none) private(l, x, y) &
            !$omp          shared(equi, tau_adv, mesh_cano, phi_cano, &
            !$omp                 pot_adv , vort_adv)         
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                pot_adv(l) = mms_sol_braginskii_pot(equi, x, y, phi_cano,tau_adv)
                vort_adv(l) = mms_sol_braginskii_vort(equi, x, y, phi_cano, tau_adv)
            enddo
            !$omp end do
            !$omp end parallel

        else           

            ! Initial guess for potential
            !$omp parallel default(none) private(kb, l) &
            !$omp          shared(mesh_cano, boundaries, bnddescr_pot_core, sheath_potential_lambda_sh, &
            !$omp                 pot, pot_adv, pot_store, pot_firstsolve_store, te)
            if (bnddescr_pot_core == BND_BRAGTYPE_ZONAL_NEUMANN) then
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    pot_adv(l) = 3.0_GP*pot_firstsolve_store%vstore(l,1,1) &
                                - 3.0_GP*pot_firstsolve_store%vstore(l,2,1) + pot_firstsolve_store%vstore(l,3,1)
                enddo
                !$omp end do
            else
                !$omp do
                do l = 1, mesh_cano%get_n_points()
                    pot_adv(l) = 3.0_GP*pot%vals(l) - 3.0_GP*pot_store%vstore(l,1,1) + pot_store%vstore(l,2,1)
                enddo
                !$omp end do
            endif
            !$omp do
            do kb = 1, mesh_cano%get_n_points_boundary()
                if (boundaries%pot%types(kb) == BND_BRAGTYPE_FLOATING_POTENTIAL_LOCAL) then
                    ! Set equal to lambda_sh * Te
                    l = mesh_cano%boundary_indices(kb)
                    boundaries%pot%vals(kb) = sheath_potential_lambda_sh * te%vals(l)
                endif
            enddo
            !$omp end do
            !$omp end parallel

            ! Numerical advance/solve pot/vort
            call polarisation_advance(comm_handler, &
                                      equi, mesh_cano, hsolver_cano, penalisation_cano, polars_cano, &
                                      boundaries, tstep_vort, &
                                      [pot%vals, pot_store%vstore], &
                                      [ne%vals, ne_store%vstore], &
                                      [ti%vals, ti_store%vstore], &
                                      pot_firstsolve_store, &
                                      polarisation_coeff, ne_adv, ti_adv, dvort, pot_pen, &
                                      pot_adv, vort_adv, &
                                      sinfo = tinfo(1), res = rinfo(1,1), &   
                                      sinfo_zon = tinfo(2), res_zon = rinfo(2,1))

            if (mms_on) then
                ! Set ghgost points of MMS solution
                !$omp parallel default(none) private(kg, l, x, y) &
                !$omp          shared(equi, tau_adv, mesh_cano, phi_cano, pot_adv, vort_adv)
                !$omp do
                do kg = 1, mesh_cano%get_n_points_ghost()
                    l = mesh_cano%ghost_indices(kg)
                    x = mesh_cano%get_x(l)
                    y = mesh_cano%get_y(l)
                    pot_adv(l) = mms_sol_braginskii_pot(equi, x, y, phi_cano, tau_adv)
                    vort_adv(l) = mms_sol_braginskii_vort(equi, x, y, phi_cano, tau_adv)
                enddo
                !$omp end do
                !$omp end parallel 
            endif

        endif 

        call perf_stop('../vortpot')

        ! Compute density at t+1 on staggered grid ---------------------------------------------------------------------------------
        call perf_start('../newnstag')

        ! Also, initial guesses for apar solvers (main + penalisation)
        !$omp parallel default(none) private(l) &
        !$omp          shared(mesh_cano, mesh_stag, ne_adv, nevar_adv, te_adv, tevar_adv, &
        !$omp                 jpar, jpar_store, jpar_t_extrapolate, &
        !$omp                 apar_pen_store, apar_pen_guess, apar, apar_adv, apar_store)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            nevar_adv%vals(l) = ne_adv(l)
            tevar_adv%vals(l) = te_adv(l)
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_stag%get_n_points()  
            jpar_t_extrapolate%vals(l) = 3.0_GP*jpar%vals(l) - 3.0_GP*jpar_store%vstore(l,1,1) + jpar_store%vstore(l,2,1)
            apar_pen_guess(l) = 3.0_GP*apar_pen_store%vstore(l,1,1) &
                              - 3.0_GP*apar_pen_store%vstore(l,2,1) + apar_pen_store%vstore(l,3,1)
            apar_adv(l) = 3.0_GP*apar%vals(l) - 3.0_GP*apar_store%vstore(l,1,1) + apar_store%vstore(l,2,1)
        enddo
        !$omp end do
        !$omp end parallel

        ! Communication 
        call nevar_adv%start_comm_halos(comm_handler)
        call nevar_adv%finalize_comm_halos(comm_handler)
        
        call tevar_adv%start_comm_halos(comm_handler)
        call tevar_adv%finalize_comm_halos(comm_handler)

        call jpar_t_extrapolate%start_comm_halos(comm_handler)
        call jpar_t_extrapolate%finalize_comm_halos(comm_handler)

        !$omp parallel default(none) private(ki, l) shared(map, mesh_stag, &
        !$omp          nevar_adv, nevar_adv_stag, tevar_adv, tevar_adv_stag, floor_ne, floor_te)
        call map%lift_cano_to_stag(nevar_adv%vals, nevar_adv%hfwd, nevar_adv_stag%vals)
        call map%lift_cano_to_stag(tevar_adv%vals, tevar_adv%hfwd, tevar_adv_stag%vals)
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            nevar_adv_stag%vals(l) = max(nevar_adv_stag%vals(l), floor_ne)
            tevar_adv_stag%vals(l) = max(tevar_adv_stag%vals(l), floor_te)
        enddo
        !$omp end do
        !$omp end parallel

        call perf_stop('../newnstag')

        ! Compute and apply penalisation values for Ohm's law (psipar_pen_vals) ----------------------------------------------------
        call perf_start('../penalisation_ohm')
      
        call psipar_penalisation(comm_handler, equi, mesh_stag, hsolver_stag, map, penalisation_stag, &
                                 jpar_t_extrapolate, nevar_adv_stag%vals, &
                                 apar_pen_guess, psipar_pen, sinfo=tinfo(4), res=rinfo(4,1))

        !$omp parallel default(none) private(ki, l) &
        !$omp          shared(mesh_stag, penalisation_stag, pen_epsinv, psipar_pen, dohm)
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)        
            dohm(l) = dohm(l) + penalisation_stag%get_charfun_val(ki) * pen_epsinv * psipar_pen(ki)     
        enddo
        !$omp end do
        !$omp end parallel

        call perf_stop('../penalisation_ohm')

        ! Advance parallel current/electromagnetic potential in time ---------------------------------------------------------------
        call perf_start('../jparapar')

        if ( (mms_on) .and. (.not.mms_aparjpar_solve_on) ) then

            ! For mms, with mms_aparjpar_solve = false, use analytic MMS solution for potential and vorticity
            !$omp parallel default(none) private(l, x, y) &
            !$omp          shared(equi, tau_adv, mesh_stag, phi_stag, apar_adv, jpar_adv)          
            !$omp do
            do l = 1, mesh_stag%get_n_points()
                x = mesh_stag%get_x(l)
                y = mesh_stag%get_y(l)
                apar_adv(l) = mms_sol_braginskii_apar(equi, x, y, phi_stag, tau_adv)
                jpar_adv(l) = mms_sol_braginskii_jpar(equi, x, y, phi_stag, tau_adv)
            enddo
            !$omp end do
            !$omp end parallel

        else

            ! Numerical advance/solve apar/jpar
            call ohm_advance(comm_handler, equi, mesh_stag, hsolver_stag, map, penalisation_stag, &
                             boundaries, tstep_ohm, &
                             stag_ne%vals, nevar_adv_stag%vals, tevar_adv_stag%vals, &
                             apar%vals, jpar%vals, & 
                             dohm, & 
                             apar_adv, jpar_adv, & 
                             sinfo=tinfo(3), res=rinfo(3,1))

            if (mms_on) then
                ! Set ghost points of MMS solution
                !$omp parallel default(none) private(kg, l, x, y) &
                !$omp          shared(equi, tau_adv, mesh_stag, phi_stag, apar_adv, jpar_adv)
                !$omp do
                do kg = 1, mesh_stag%get_n_points_ghost()
                    l = mesh_stag%ghost_indices(kg)
                    x = mesh_stag%get_x(l)
                    y = mesh_stag%get_y(l)
                    apar_adv(l) = mms_sol_braginskii_apar(equi, x, y, phi_stag, tau_adv)
                    jpar_adv(l) = mms_sol_braginskii_jpar(equi, x, y, phi_stag, tau_adv)
                enddo
                !$omp end do
                !$omp end parallel
            endif

        endif 

        call perf_stop('../jparapar')

        ! Check for the successful execution of the time step ----------------------------------------------------------------------
        tinfo_min = minval(tinfo)
        call MPI_allreduce(MPI_IN_PLACE, tinfo_min, 1, MPI_Integer, MPI_MIN, comm_handler%get_comm_cart(), ierr)
        if ( tinfo_min < 0 ) then
            ! Do not update the data.
            ! Code will safe stop with error snaps output from model_braginskii.
            success_plasma = .false.
            call perf_stop('timestep')
            return
        endif
        success_plasma = .true.

        ! Formal advancement of time -----------------------------------------------------------------------------------------------
        call perf_start('../shiftstorage')

        call tstep_continuity%shift_storage((/logne%vals, dlogne/))
        call tstep_etemp%shift_storage((/logte%vals, dlogte/))
        call tstep_itemp%shift_storage((/logti%vals, dlogti/))
        call tstep_parmom%shift_storage((/upar%vals, dupar/))
        call pot_store%shift_storage(pot%vals)
        call ne_store%shift_storage(ne%vals)
        call ti_store%shift_storage(ti%vals)
        call jpar_store%shift_storage(jpar%vals)
        call apar_pen_store%shift_storage(apar_pen_guess)
        call apar_store%shift_storage(apar%vals)
        ! Note: tstep_vort already shifted in polarisation_advance and tstep_ohm in ohm_advance

        !$omp parallel default(none) private(l) &
        !$omp          shared(mesh_cano, mesh_stag, &
        !$omp                 ne, te, ti, pot, vort, logne, logte, logti, upar, jpar, apar_fluct, apar, &
        !$omp                 ne_adv, te_adv, ti_adv, pot_adv, vort_adv, upar_adv, jpar_adv, apar_adv, &
        !$omp                 logne_adv, logte_adv, logti_adv)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            ne%vals(l) = ne_adv(l)
            te%vals(l) = te_adv(l)
            ti%vals(l) = ti_adv(l)
            pot%vals(l) = pot_adv(l)
            vort%vals(l) = vort_adv(l)
            logne%vals(l) = logne_adv(l)
            logte%vals(l) = logte_adv(l)
            logti%vals(l) = logti_adv(l)
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            upar%vals(l) = upar_adv(l)
            jpar%vals(l) = jpar_adv(l)
            apar_fluct%vals(l) = apar_fluct%vals(l) + apar_adv(l) - apar%vals(l)
            ! Time advance of apar_fluct, assuming that apar_background does not change.
            ! Updating and removing the apar_background will be done in model_braginskii_m.
            apar%vals(l) = apar_adv(l)
            ! These fields are saved within the timestep module for the next time step.
        enddo
        !$omp end do
        !$omp end parallel

        call perf_stop('../shiftstorage')

        call perf_stop('timestep')

    contains
        
        subroutine setup_work_variables()
            !! Allocates work variables
            
            call upar_gradpar_logne%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            
            call stag_te%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call qcond_te%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call cano_qcond_te%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call heat_gradpar_logte_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call stag_ti%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call qcond_ti%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call cano_qcond_ti%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call heat_gradpar_logti_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call upar_gradpar_logti%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call stag_ne%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call vpar%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call vpar_gradpar_logte%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call stag_pot%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call gradpar_logne%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call gradpar_logte%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call gradpar_logti%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call cano_upar%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call cano_apar_fluct%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)

            call jpar_over_n_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call jpar_over_n_full%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call cano_jpar%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)

            call nevar_adv%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call nevar_adv_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call tevar_adv%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call tevar_adv_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call logne%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call logte%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
            call logti%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)

            call gradpar_ne%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            call gradpar_vort%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

            call psipar%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
            
            call jpar_t_extrapolate%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)

        end subroutine

        subroutine setup_halo_structure()
            !! Creates halo structures, i.e. allocates halo spaces
                   
            ! Variables which can be Neumann penalized
            call logne%create_halos(comm_handler, 1, 1)
            call logte%create_halos(comm_handler, 1, 1)
            call logti%create_halos(comm_handler, 1, 1)      
            call jpar_t_extrapolate%create_halos(comm_handler, 1, 1)     

            ! Variables on which Q,M: G -> G* is applied
            call cano_upar%create_halos(comm_handler, 1, 0)
            call nevar_adv%create_halos(comm_handler, 1, 0)
            call tevar_adv%create_halos(comm_handler, 1, 0)
            call jpar_over_n_full%create_halos(comm_handler, 1, 0)

            ! Variables on which Q*,M*: G* -> G is applied
            call vpar%create_halos(comm_handler, 0, 1)
            call upar_gradpar_logne%create_halos(comm_handler, 0, 1)
            call vpar_gradpar_logte%create_halos(comm_handler, 0, 1)
            call upar_gradpar_logti%create_halos(comm_handler, 0, 1)
            call qcond_te%create_halos(comm_handler, 0, 1)
            call qcond_ti%create_halos(comm_handler, 0, 1)
            call gradpar_ne%create_halos(comm_handler, 0, 1)
            call gradpar_vort%create_halos(comm_handler, 0, 1)

        end subroutine

    end subroutine

end module