diagnostics_packet_m.f90 Source File


Contents


Source Code

module diagnostics_packet_m
    !! Module for diagnostics packet
    use MPI

    use comm_handler_m, only : comm_handler_t
    use screen_io_m, only :  get_stdout
    use variable_m, only : variable_t
    use precision_grillix_m, only : GP, MPI_GP, GP_EPS
    use perf_m, only : perf_start, perf_stop
    use sources_external_m, only : sources_external_t
    use mesh_cart_m, only: mesh_cart_t
    use parallel_map_m, only : parallel_map_t
    use inplane_operators_m, only : inplane_operators_t
    use equilibrium_m, only: equilibrium_t
    use penalisation_m, only : penalisation_t
    use polars_m, only : polars_t
    use equilibrium_storage_m, only : equilibrium_storage_t
    use descriptors_braginskii_m, only : BND_TYPE_NEUMANN
    use boundaries_braginskii_m, only : boundaries_braginskii_t
    use boundaries_neutrals_m, only : boundaries_neutrals_t
    use heat_flux_m, only : heat_flux_landau, heat_flux_braginskii, &
                            heat_flux_free_streaming
    use helmholtz_operator_m, only : helmholtz_single_inner
    use boundaries_perp_m, only : set_perpbnds, extrapolate_ghost_points
    use multistep_m, only : multistep_storage_t
    use rate_coeff_neutrals_m, only : rate_coeff_neutrals_t
    use coronal_impurities_m, only : coronal_impurity_t
    use parallel_target_flux_m, only : parallel_target_flux_t
    use perp_bnd_flux_m, only : perp_bnd_flux_t, compute_drift_flux, get_drift_flux_total, &
                                compute_diffusive_flux, get_diffusive_flux_total
    use timestep_braginskii_m, only : src_floor_ne
    use neutrals_module_m, only : src_floor_neutrals_dens
    use recycling_m, only : compute_recycling_source
    use polarisation_braginskii_m, only : compute_vorticity, parallel_polarisation_advection
    use gyroviscosity_m, only : gyroviscosity_t

    ! Parameters
    use params_feature_selection_m, only : &
        neutrals_on
    use params_tstep_m, only : &
        dtau
    use params_brag_model_m, only : &
        delta, beta, etapar0, nu_e0, mass_ratio_ei
    use params_brag_pardiss_model_m, only : &
        heatflux_model, chipar0_e, chipar0_i, free_streaming_fraction_e, free_streaming_fraction_i, &
        free_streaming_limiter_qfac, landau_flutter_lhs_on
    use params_brag_numdiss_m, only : &
        perpdiss_order_ne, perpdiss_coeff_ne, pardiss_coeff_ne, &
        perpdiss_order_vort, perpdiss_coeff_vort, pardiss_coeff_vort, &
        perpdiss_order_te, perpdiss_coeff_te, perpdiss_order_pe, perpdiss_coeff_pe, &
        perpdiss_order_ti, perpdiss_coeff_ti, perpdiss_order_pi, perpdiss_coeff_pi, &
        perpdiss_order_upar, perpdiss_coeff_upar, &
        perpdiss_order_ohm, perpdiss_coeff_ohm, pardiss_coeff_ohm
    use params_brag_buffer_m, only : &
        buffer_coeff_ne, buffer_coeff_te, buffer_coeff_pe, buffer_coeff_ti, buffer_coeff_pi, &
        buffer_coeff_upar, buffer_coeff_ohm, buffer_coeff_vort
    use params_brag_floors_m, only : &
        floor_ne, floor_te, floor_ti
    use params_neut_floors_m, only : &
        floor_dens, rho_min_for_parmom_advection
    use params_neut_model_m, only : &
        ne_ref, te_ref, k_ref, w_ref, pot_iz, s_cx, pardiss_coeff_dens
    use params_neut_data_paths_m, only : &
        path_k_iz, path_k_rec, path_w_iz, path_p_rec
    use params_impy_coronal_m, only : &
        impy_concentration, impy_path, impy_tab_length
    use params_brag_switches_m, only : &
        swb_etemp_flutter_pardiff_grad, swb_itemp_flutter_pardiff_grad
    implicit none

    private :: compute_div_jpol
    private :: compute_dissipation
    private :: compute_dissipative_radflux

    type, public :: diagnostics_packet_t
        !! Collection of diagnostics objects
        !! Gets passed into timestep_brag/neut routines
        !! to pick up relevant data on entire mesh and
        !! is projected into different diagnostics dimensions

        ! Auxilliaries
        real(GP), dimension(:), allocatable, public :: ne_stag
        ! Electron density on staggered grid
        real(GP), dimension(:), allocatable, public :: te_stag
        ! Electron temperature on staggered grid
        real(GP), dimension(:), allocatable, public :: ti_stag
        ! Ion temperature on staggered grid
        real(GP), dimension(:), allocatable, public :: pot_stag
        ! Electrostatic potential on staggered grid
        real(GP), dimension(:), allocatable, public :: upar_cano
        ! Parallel ion velocity on canonical grid
        real(GP), dimension(:), allocatable, public :: neutrals_dens_stag
        ! Neutrals density on staggered grid
        real(GP), dimension(:), allocatable, public :: neutrals_parmom_cano
        ! Parallel neutrals momentum on canonical grid
        real(GP), dimension(:), allocatable, public :: nupar_cano
        ! Parallel ion flux on canonical grid
        real(GP), dimension(:), allocatable, public :: nvpar_cano
        ! Parallel electron flux on canonical grid
        real(GP), dimension(:), allocatable, public :: jpar_cano
        ! Parallel current on canonical grid
        real(GP), dimension(:), allocatable, public :: apar_fluct_over_btor
        ! Magnetic potential fluctations normalised to Btor
        real(GP), dimension(:), allocatable, public :: radgrad_ne
        ! Radial gradient of density
        real(GP), dimension(:), allocatable, public :: radgrad_te
        ! Radial gradient of electron temperature
        real(GP), dimension(:), allocatable, public :: radgrad_ti
        ! Radial gradient of ion temperature
        real(GP), dimension(:), allocatable, public :: v_exb_rad
        ! Radial ExB velocity
        real(GP), dimension(:), allocatable, public :: v_dia_e_rad
        ! Radial electron diamagnetic velocity
        real(GP), dimension(:), allocatable, public :: v_dia_i_rad
        ! Radial ion diamagnetic velocity
        real(GP), dimension(:), allocatable, public :: b_1_rad
        ! Radial magnetic fluctation

        ! Sources
        real(GP), dimension(:), allocatable, public :: src_ext_ne
        !! External density source
        real(GP), dimension(:), allocatable, public :: src_ext_te
        !! External electron temperature source
        real(GP), dimension(:), allocatable, public :: src_ext_ti
        !! External ion temperature source
        real(GP), dimension(:), allocatable, public :: src_iz_ne
        !! Electron density source from ionization
        real(GP), dimension(:), allocatable, public :: src_rec_ne
        !! Electron density source from recombination
        real(GP), dimension(:), allocatable, public :: src_neut_te
        !! Electron temperature source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: src_neut_ti
        !! Ion temperature source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: src_neut_upar
        !! Parallel ion velocity source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: src_neut_vort
        !! Vorticity source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: src_neut_pressure
        !! Neutrals pressure source
        real(GP), dimension(:), allocatable, public :: src_neut_rcy
        !! Neutrals density source due to recycling
        real(GP), dimension(:), allocatable, public :: src_floor_ne
        !! Density source due to floor application
        real(GP), dimension(:), allocatable, public :: src_floor_neutrals_dens
        !! Neutrals density source due to floor application

        ! Changerates of base quantities
        real(GP), dimension(:), allocatable, public :: ddt_eth
        !! Changerate of thermal energy density
        real(GP), dimension(:), allocatable, public :: ddt_ne
        !! Changerate of electron density
        real(GP), dimension(:), allocatable, public :: ddt_te
        !! Changerate of electron temperature
        real(GP), dimension(:), allocatable, public :: ddt_ti
        !! Changerate of ion temperature
        real(GP), dimension(:), allocatable, public :: ddt_neutrals_dens
        !! Changerate of neutrals density

        ! Energy/Power
        real(GP), dimension(:), allocatable, public :: eth
        !! Stored thermal energy density
        real(GP), dimension(:), allocatable, public :: p_heat_e
        !! Electron heating power
        real(GP), dimension(:), allocatable, public :: p_heat_i
        !! Ion heating power
        real(GP), dimension(:), allocatable, public :: p_rad_imp
        !! Electron energy lost by impurity radiation
        real(GP), dimension(:), allocatable, public :: p_rad_iz
        !! Direct electron energy loss from ionization
        real(GP), dimension(:), allocatable, public :: p_rad_rec
        !! Direct electron energy loss by main plasma recombination
        real(GP), dimension(:), allocatable, public :: p_neut_e
        !! Electron energy source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: p_neut_i
        !! Ion energy source due to neutrals interactions
        real(GP), dimension(:), allocatable, public :: p_ext_par
        !! Parallel kinetic energy source due to external sources
        real(GP), dimension(:), allocatable, public :: p_neut_par_cano
        !! Parallel kinetic energy source due to neutrals interactions (part on cano)
        real(GP), dimension(:), allocatable, public :: p_neut_par_stag
        !! Parallel kinetic energy source due to neutrals interactions (part on stag)
        real(GP), dimension(:), allocatable, public :: p_ext_perp
        !! Perpendicular kinetic energy source due to external sources
        real(GP), dimension(:), allocatable, public :: p_neut_perp
        !! Perpendicular kinetic energy source due to neutrals interactions

        ! Parallel heat fluxes
        real(GP), dimension(:), allocatable, public :: qpar_cond_e
        !! Conductive parallel electron heat flux
        real(GP), dimension(:), allocatable, public :: qpar_cond_i
        !! Conductive parallel ion heat flux
        real(GP), dimension(:), allocatable, public :: qpar_conv_e
        !! Convective parallel electron heat flux
        real(GP), dimension(:), allocatable, public :: qpar_conv_i
        !! Convective parallel electron heat flux

        ! Boundaryfluxes
        ! ...parallel
        real(GP), dimension(:), allocatable, public :: parflux_ne_fwd
        !! Electron outflow through fwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_ne_bwd
        !! Electron outflow through bwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_ne_fwd_weighted
        !! Weighted electron outflow through fwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_ne_bwd_weighted
        !! Weighted electron outflow through bwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_neutrals_fwd
        !! Neutrals outflow through fwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_neutrals_bwd
        !! Neutrals outflow through bwd target due to parallel advection
        real(GP), dimension(:), allocatable, public :: parflux_neutralsdiss_fwd
        !! Neutrals outflow through fwd target due to parallel dissipation
        real(GP), dimension(:), allocatable, public :: parflux_neutralsdiss_bwd
        !! Neutrals outflow through bwd target due to parallel dissipation
        ! ...perpendicular
        real(GP), dimension(:), allocatable, public :: perpflux_ne_exb
        !! Electron outflow through perp boundary due to exb advection
        real(GP), dimension(:), allocatable, public :: perpflux_ne_dia
        !! Electron outflow through perp boundary due to diamag. advection
        real(GP), dimension(:), allocatable, public :: perpflux_neutrals_outer
        !! Neutrals outflow through outer perp boundary due to poloidal pressure diffusion
        real(GP), dimension(:), allocatable, public :: perpflux_neutrals_volint
        !! Volume integral of poloidal pressure diffusion

        ! Neutrals equation auxiliaries
        real(GP), dimension(:), allocatable, public :: neut_k_iz
        !! Ionization rate coefficient
        real(GP), dimension(:), allocatable, public :: neut_k_rec
        !! Recombination rate coefficient
        real(GP), dimension(:), allocatable, public :: neut_k_cx
        !! Charge exchange rate coefficient
        real(GP), dimension(:), allocatable, public :: neut_w_iz
        !! Ionization cooling rate coefficient
        real(GP), dimension(:), allocatable, public :: neut_w_rec
        !! Recombination cooling rate coefficient

        ! Continuity equation fluxes
        real(GP), dimension(:), allocatable, public :: cont_radflux_exb
        !! Radial ExB flux of ne
        real(GP), dimension(:), allocatable, public :: cont_radflux_dia
        !! Radial diamagnetic flux of ne
        real(GP), dimension(:), allocatable, public :: cont_radflux_visc
        !! Radial viscous fluxes of ne (hyperdiffusion + buffer)
        real(GP), dimension(:), allocatable, public :: cont_radflux_mag
        !! Radial electromagnetic flutter flux of ne

        ! Continuity equation: actually implemented terms
        real(GP), dimension(:), allocatable, public :: cont_exb_advection
        !! ExB advection (with Arakawa)
        real(GP), dimension(:), allocatable, public :: cont_flutter
        !! Magnetic flutter advection (with Arakawa)
        real(GP), dimension(:), allocatable, public :: cont_div_nupar
        !! Divergence of the parallel ion flux
        real(GP), dimension(:), allocatable, public :: cont_curv_pot
        !! ExB compression
        real(GP), dimension(:), allocatable, public :: cont_curv_pe
        !! Diamagnetic compression (electron velocity)
        real(GP), dimension(:), allocatable, public :: cont_hypvisc
        !! Hyperviscosity (dissipation)

        ! Vorticity equation
        real(GP), dimension(:), allocatable, public :: vort_curv_pi
        !! Diamagnetic compression (ion velocity)
        real(GP), dimension(:), allocatable, public :: vort_div_jpar
        !! Divergence of the parallel current (b0 background)
        real(GP), dimension(:), allocatable, public :: vort_flutter
        !! Magnetic flutter advection of jpar (with Arakawa), Maxwell stress
        real(GP), dimension(:), allocatable, public :: vort_hypvisc
        !! Hyperviscosity (dissipation)

        ! Thermal electron energy fluxes
        real(GP), dimension(:), allocatable, public :: ethe_ddt
        !! Changerate of E_the
        real(GP), dimension(:), allocatable, public :: ethe_radflux_exb
        !! Radial ExB flux of E_the
        real(GP), dimension(:), allocatable, public :: ethe_radflux_dia
        !! Diamagnetic flux of E_the
        real(GP), dimension(:), allocatable, public :: ethe_radflux_dia_real
        !! Diamagnetic flux of E_the - divergent part
        real(GP), dimension(:), allocatable, public :: ethe_pe_div_exb
        !! pe div v_E term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_vpar_gradpar_pe
        !! vpar gradpar pe term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_eta_jpar2
        !! Resistivity term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_jpar_gradpar_te
        !! jpar gradpar te term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_equipartition
        !! Equipartition term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_hypvisc
        !! Hyperviscous dissipation of E_the
        real(GP), dimension(:), allocatable, public :: ethe_buffer
        !! Buffer diffusion of E_the
        real(GP), dimension(:), allocatable, public :: ethe_src
        !! E_the source term in E_the equation RHS
        real(GP), dimension(:), allocatable, public :: ethe_radflux_mag_qtotal
        !! Radial flutter thermal conduction of electrons (total)
        real(GP), dimension(:), allocatable, public :: ethe_radflux_mag_qnormal
        !! Radial flutter thermal conduction of electrons (no flutter grad te)
        real(GP), dimension(:), allocatable, public ::  ethe_radflux_visc
        !! Radial viscous fluxes of thermal electron energy (hyperdiffusion + buffer)

        ! Thermal ion energy fluxes
        ! No need to compute separately ethi_ddt = ddt_eth - ethe_ddt
        real(GP), dimension(:), allocatable, public :: ethi_radflux_exb
        !! Radial ExB flux of E_thi
        real(GP), dimension(:), allocatable, public :: ethi_radflux_dia
        !! Radial diamagnetic flux of E_thi
        real(GP), dimension(:), allocatable, public :: ethi_pi_div_exb
        !! pi div v_E term in E_thi equation RHS
        real(GP), dimension(:), allocatable, public :: ethi_upar_gradpar_pi
        !! upar gradpar pi term in E_thi equation RHS
        real(GP), dimension(:), allocatable, public :: ethi_radflux_dia_real
        !! Radial diamagnetic flux of E_thi - divergent part
        real(GP), dimension(:), allocatable, public :: ethi_radflux_mag_adv
        !! Radial flutter advective flux of E_thi
        real(GP), dimension(:), allocatable, public :: ethi_radflux_mag_qtotal
        !! Radial flutter thermal conduction of ions (total)
        real(GP), dimension(:), allocatable, public :: ethi_radflux_mag_qnormal
        !! Radial flutter thermal conduction of ions (no flutter grad te)
        real(GP), dimension(:), allocatable, public ::  ethi_radflux_visc
        !! Radial viscous fluxes of thermal ion energy (hyperdiffusion + buffer)

        real(GP), dimension(:), allocatable, public :: ekin_perp
        !! Perpendicular kinetic energy
        real(GP), dimension(:), allocatable, public :: ekin_perp_ddt
        !! Changerate of perpendicular kinetic energy
        real(GP), dimension(:), allocatable, public :: ekin_par
        !! Parallel kinetic energy
        real(GP), dimension(:), allocatable, public :: ekin_par_ddt
        !! Changerate of E_kinpar
        real(GP), dimension(:), allocatable, public :: eem
        !! Electromagnetic energy
        real(GP), dimension(:), allocatable, public :: eem_ddt
        !! Electromagnetic energy

        real(GP), dimension(:), allocatable, public :: etrans_upar_gradpar_pe
        !! upar gradpar pi term in E_thi equation RHS
        real(GP), dimension(:), allocatable, public :: etrans_jpar_gradpar_pot
        !! jpar gradpar pot transfer term
        real(GP), dimension(:), allocatable, public :: etrans_upol_grad_pi
        ! upol grad pi term in E_thi equation RHS
        real(GP), dimension(:), allocatable, public :: ethi_upol_flux
        !! div(5/2 pi * upol) in Ethi equation
        real(GP), dimension(:), allocatable, public :: ekinperp_upol_flux
        !! div(pot * ne * upol) in Ekinperp equation
        real(GP), dimension(:), allocatable, public :: ekinperp_jpol_poynt_flux
        !! div(pot * ne * upol) in Ekinperp equation
        real(GP), dimension(:), allocatable, public :: eem_flux
        !! Electromagnetic flux of electromagnetic energy

        real(GP), dimension(:), allocatable, public :: aparv_fluct_cano
        !! Values of fluctuating part of electromagnetic potential on canonical mesh

        real(GP), dimension(:), allocatable, public :: diss_ne
        !! Dissipation (total) on density
        real(GP), dimension(:), allocatable, public :: diss_te
        !! Dissipation (total) on electron temperature
        real(GP), dimension(:), allocatable, public :: diss_pe
        !! Dissipation (total) on electron pressure
        real(GP), dimension(:), allocatable, public :: diss_ti
        !! Dissipation (total) on ion temperature
        real(GP), dimension(:), allocatable, public :: diss_pi
        !! Dissipation (total) on ion pressure
        real(GP), dimension(:), allocatable, public :: diss_upar
        !! Dissipation (total) on parallel velocity
        real(GP), dimension(:), allocatable, public :: diss_apar
        !! Dissipation (total) on parallel electromagnetic potential
        real(GP), dimension(:), allocatable, public :: diss_vort
        !! Dissipation (total) on vorticity
        real(GP), dimension(:), allocatable, public :: gstressv
        !! Ion stress function
        real(GP), dimension(:), allocatable, public :: gstressv_par
        !! Parallel part of ion stress function

    contains
        procedure, public :: compute => compute_diagnostics_packet
        final :: destructor
    end type

contains

    subroutine compute_diagnostics_packet(self, comm_handler, equi, equi_on_cano, equi_on_stag, &
                        mesh_cano, mesh_stag, map, penalisation_cano, penalisation_stag, polars_cano, &
                        polars_stag, parflux_utils_cano, parflux_utils_stag, perp_bnd_flux_cano, &
                        perp_bnd_flux_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                        boundaries_neutrals, cf_buffer_cano, cf_buffer_stag, cf_diss_cano, cf_diss_stag, &
                        tau, ne, te, ti, pot, vort, upar, jpar, apar, apar_fluct, gstress, &
                        neutrals_dens, neutrals_parmom, neutrals_pressure, sources_external, &
                        src_upar, src_vort, multistep_storage_cano, multistep_storage_stag)
        !! Compute diagnostics
        !! Only work for tratio=1
        class(diagnostics_packet_t), intent(inout) :: self
        !! Instance of class
        type(comm_handler_t), intent(in) :: comm_handler
        !! MPI communicators
        class(equilibrium_t), intent(inout) :: equi ! inout required for heat_flux_landau
        !! Equilibrium
        class(equilibrium_storage_t), intent(in) :: equi_on_cano
        !! Equilibrium storage on canonical plane enabling faster performance at certain locations
        class(equilibrium_storage_t), intent(in) :: equi_on_stag
        !! Equilibrium storage on staggered plane enabling faster performance at certain locations
        type(mesh_cart_t), intent(in) :: mesh_cano
        !! Mesh (canonical) within poloidal plane
        type(mesh_cart_t), intent(in) :: mesh_stag
        !! Mesh (staggered) within poloidal plane
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(penalisation_t), intent(in) :: penalisation_cano
        !! Penalisation (canonical)
        type(penalisation_t), intent(in) :: penalisation_stag
        !! Penalisation (staggered)
        type(polars_t), intent(in) :: polars_cano
        !! Polars (canonical)
        type(polars_t), intent(in) :: polars_stag
        !! Polars (staggered)
        type(parallel_target_flux_t), intent(in) :: parflux_utils_cano
        !! Parallel boundary flux utility (canonical)
        type(parallel_target_flux_t), intent(in) :: parflux_utils_stag
        !! Parallel boundary flux utility (staggered)
        type(perp_bnd_flux_t), intent(in) :: perp_bnd_flux_cano
        !! Perpendicular boundary flux utility (canonical)
        type(perp_bnd_flux_t), intent(in) :: perp_bnd_flux_stag
        !! Perpendicular boundary flux utility (staggered)
        type(inplane_operators_t), intent(inout) :: opsinplane_cano
        !! Inplane operators (canonical)
        type(inplane_operators_t), intent(inout) :: opsinplane_stag
        !! Inplane operators (staggered)
        type(boundaries_braginskii_t), intent(in) :: boundaries
        !! Boundary information for the Braginskii model
        type(boundaries_neutrals_t), intent(in) :: boundaries_neutrals
        !! Boundaries related with neutrals module
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: cf_buffer_cano
        !! Buffer function (canonical)
        real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: cf_buffer_stag
        !! Buffer function (staggered)
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: cf_diss_cano
        !! Envelope function for dissipation (canonical)
        real(GP), dimension(mesh_stag%get_n_points()), intent(in) :: cf_diss_stag
        !! Envelope function for dissipation (staggered)
        real(GP), intent(in) :: tau
        !! Time
        type(variable_t), intent(inout) :: ne
        !! Electron density
        type(variable_t), intent(inout) :: te
        !! Electron temperature
        type(variable_t), intent(inout) :: ti
        !! Ion temperature
        type(variable_t), intent(inout) :: pot
        !! Electrostatic potential
        type(variable_t), intent(inout) :: vort
        !! Generalised vorticity
        type(variable_t), intent(inout) :: upar
        !! Parallel ion velocity
        type(variable_t), intent(inout) :: jpar
        !! Parallel current
        type(variable_t), intent(inout) :: apar
        !! Parallel electromagnetic potential
        type(variable_t), intent(inout) :: apar_fluct ! inout required for mapping to cano
        !! Fluctation of apar used for flutter operators
        type(gyroviscosity_t), intent(inout) :: gstress
        !! Gyroviscosity
        type(variable_t), intent(inout) :: neutrals_dens
        !! Neutrals density
        type(variable_t), intent(inout) :: neutrals_parmom
        !! Neutrals parallel momentum
        type(variable_t), intent(in) :: neutrals_pressure
        !! Neutrals pressure
        type(sources_external_t), intent(inout) :: sources_external
        !! External sources
        real(GP), dimension(mesh_stag%get_n_points_inner()), intent(in) :: src_upar
        !! Parallel momentum source values on inner mesh points
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in) :: src_vort
        !! Vorticity source values on inner mesh points
        type(multistep_storage_t), intent(in) :: multistep_storage_cano
        !! Multistep storage for quantities on the canonical mesh
        type(multistep_storage_t), intent(in) :: multistep_storage_stag
        !! Multistep storage for quantities on the staggered mesh

        integer :: l, ki, kb
        real(GP) :: x, y, btor, eradx_cano, eradx_stag, erady_cano, erady_stag
        real(GP) :: aux1_gp, aux2_gp, aux3_gp, aux4_gp
        real(GP) :: q_perp_e_rad, q_perp_i_rad, gradpar_ne, gradpar_te, gradpar_ti
        real(GP) :: ddx_ne, ddy_ne, ddx_te, ddy_te, ddx_ti, ddy_ti, ddx_pot, ddy_pot, ddx_pi, ddy_pi, &
                    ddx_apar, ddy_apar, ddx_apar_over_btor, ddy_apar_over_btor, ddt_apar
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: aux1_inner, aux2_inner, aux3_inner
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: numdiss_ne_inner
        real(GP), dimension(mesh_cano%get_n_points_inner()) :: numdiss_vort_inner, numdiss_te_inner, numdiss_pe_inner
        real(GP), dimension(:), allocatable :: aux1, aux2

        integer, dimension(mesh_cano%get_n_points_boundary()) :: bnd_descrs_nmn_cano
        integer, dimension(mesh_stag%get_n_points_boundary()) :: bnd_descrs_nmn_stag
        type(rate_coeff_neutrals_t) :: k_iz, k_rec, w_iz, p_rec
        type(coronal_impurity_t) :: l_imp
        integer, dimension(48) :: slv_info
        real(GP), dimension(48,2) :: slv_rinfo

        ! Size of info-debug output (4*12 lorentzians for heat_flux_landau)
        real(GP) :: fac_free_streaming_e,fac_free_streaming_i
        real(GP) :: flutter_gradpar_logte, flutter_gradpar_logti, &
                    flutter_gradpar_ne, flutter_gradpar_te, flutter_gradpar_ti, &
                    gradpar_pot, flutter_gradpar_pot
        type(variable_t) :: pe, pi
        type(variable_t) :: apar_fluct_cano, nupar_stag
        real(GP), dimension(mesh_stag%get_n_points()) :: qpar_cond_e_normal, qpar_cond_i_normal
        real(GP), dimension(mesh_stag%get_n_points()) :: normal_gradpar_logte, total_gradpar_logte
        real(GP), dimension(mesh_stag%get_n_points()) :: normal_gradpar_logti, total_gradpar_logti
        ! Neutrals diffusion coefficient
        real(GP), dimension(mesh_cano%get_n_points()) :: diffco

        type(variable_t) :: varaux

        call perf_start('../compute_diagnostics_packet')

        ! Set perp boundary descriptors
        !$omp parallel default(none) private(kb) &
        !$omp          shared(mesh_cano, mesh_stag, bnd_descrs_nmn_cano, bnd_descrs_nmn_stag)
        !$omp do
        do kb = 1, mesh_cano%get_n_points_boundary()
            bnd_descrs_nmn_cano(kb) = BND_TYPE_NEUMANN
        enddo
        !$omp end do
        !$omp do
        do kb = 1, mesh_stag%get_n_points_boundary()
            bnd_descrs_nmn_stag(kb) = BND_TYPE_NEUMANN
        enddo
        !$omp end do
        !$omp end parallel

        ! -------------------------------------------------------------------------------
        ! Neutrals-related reaction rates

        allocate(self%neut_k_iz(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%neut_k_rec(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%neut_k_cx(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%neut_w_iz(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%neut_w_rec(mesh_cano%get_n_points()), source=0.0_GP)

        if (neutrals_on) then
            ! Initialize rate coefficients
            call k_iz%create(path_k_iz, ne_ref, te_ref, k_ref)
            call k_rec%create(path_k_rec, ne_ref, te_ref, k_ref)
            call w_iz%create(path_w_iz, ne_ref, te_ref, w_ref)
            call p_rec%create(path_p_rec, ne_ref, te_ref, w_ref)
            if (impy_concentration > GP_EPS) then
                call l_imp%create(impy_path, impy_tab_length, te_ref, w_ref, impy_concentration)
            endif

            !$omp parallel do default(none) private(l) &
            !$omp          shared(self, mesh_cano, ne, te, ti, k_iz, &
            !$omp                 k_rec, w_iz, p_rec, pot_iz, te_ref, s_cx)
            do l = 1, mesh_cano%get_n_points() 
                self%neut_k_iz(l)  = k_iz%eval(te%vals(l), ne%vals(l))
                self%neut_k_rec(l) = k_rec%eval(te%vals(l), ne%vals(l))
                self%neut_k_cx(l) = 2.93_GP * s_cx * sqrt(ti%vals(l))
                self%neut_w_iz(l) = w_iz%eval(te%vals(l), ne%vals(l))
                self%neut_w_rec(l) = p_rec%eval(te%vals(l), ne%vals(l)) - self%neut_k_rec(l) * pot_iz / te_ref
            enddo
            !$omp end parallel do
        endif

        ! -------------------------------------------------------------------------------
        ! Staggered / Canonical mapping / Pressure

        allocate(self%ne_stag(mesh_stag%get_n_points()))
        allocate(self%te_stag(mesh_stag%get_n_points()))
        allocate(self%ti_stag(mesh_stag%get_n_points()))
        allocate(self%pot_stag(mesh_stag%get_n_points()))
        allocate(self%upar_cano(mesh_cano%get_n_points()))
        allocate(self%neutrals_dens_stag(mesh_stag%get_n_points()))
        allocate(self%neutrals_parmom_cano(mesh_cano%get_n_points()))
        allocate(self%nupar_cano(mesh_cano%get_n_points()))
        allocate(self%nvpar_cano(mesh_cano%get_n_points()))
        allocate(self%jpar_cano(mesh_cano%get_n_points()))
        allocate(self%apar_fluct_over_btor(mesh_stag%get_n_points()))

        call apar_fluct_cano%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
        call nupar_stag%init(comm_handler, mesh_stag%get_n_points(), staggered=.true.)
        call nupar_stag%create_halos(comm_handler, 0, 1)
        call pe%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
        call pe%create_halos(comm_handler, 1, 0)
        call pi%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
        call pi%create_halos(comm_handler, 1, 0)

        call ne%start_comm_halos(comm_handler)
        call ne%finalize_comm_halos(comm_handler)
        call te%start_comm_halos(comm_handler)
        call te%finalize_comm_halos(comm_handler)
        call ti%start_comm_halos(comm_handler)
        call ti%finalize_comm_halos(comm_handler)
        call pot%start_comm_halos(comm_handler)
        call pot%finalize_comm_halos(comm_handler)
        call upar%start_comm_halos(comm_handler)
        call upar%finalize_comm_halos(comm_handler)
        call vort%start_comm_halos(comm_handler)
        call vort%finalize_comm_halos(comm_handler)
        call neutrals_dens%start_comm_halos(comm_handler)
        call neutrals_dens%finalize_comm_halos(comm_handler)
        call neutrals_parmom%start_comm_halos(comm_handler)
        call neutrals_parmom%finalize_comm_halos(comm_handler)
        call jpar%start_comm_halos(comm_handler)
        call jpar%finalize_comm_halos(comm_handler)
        call apar_fluct%start_comm_halos(comm_handler)
        call apar_fluct%finalize_comm_halos(comm_handler)
        call apar%start_comm_halos(comm_handler)
        call apar%finalize_comm_halos(comm_handler)

        !$omp parallel default(none) private(l) &
        !$omp          shared(self, mesh_cano, mesh_stag, map, equi_on_stag, ne, te, ti, pot, upar, neutrals_dens, &
        !$omp          neutrals_parmom, floor_ne, floor_te, floor_ti, floor_dens, jpar, apar_fluct, &
        !$omp          apar_fluct_cano, nupar_stag, pe, pi)
        call map%lift_cano_to_stag(ne%vals, ne%hfwd, self%ne_stag)
        call map%lift_cano_to_stag(te%vals, te%hfwd, self%te_stag)
        call map%lift_cano_to_stag(ti%vals, ti%hfwd, self%ti_stag)
        call map%lift_cano_to_stag(pot%vals, pot%hfwd, self%pot_stag)
        call map%lift_cano_to_stag(neutrals_dens%vals, neutrals_dens%hfwd, self%neutrals_dens_stag)
        call map%flat_stag_to_cano(upar%vals, upar%hbwd, self%upar_cano)
        call map%flat_stag_to_cano(jpar%vals, jpar%hbwd, self%jpar_cano)
        call map%flat_stag_to_cano(neutrals_parmom%vals, neutrals_parmom%hbwd, self%neutrals_parmom_cano)
        call map%flat_stag_to_cano(apar_fluct%vals, apar_fluct%hbwd, apar_fluct_cano%vals)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%nupar_cano(l) = ne%vals(l) * self%upar_cano(l)
            self%nvpar_cano(l) = ne%vals(l) * self%upar_cano(l) - self%jpar_cano(l)
            pe%vals(l) = ne%vals(l) * te%vals(l)
            pi%vals(l) = ne%vals(l) * ti%vals(l)
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            ! Apply floor to staggered ne, te, ti
            self%ne_stag(l) = max(self%ne_stag(l), floor_ne) 
            self%te_stag(l) = max(self%te_stag(l), floor_te) 
            self%ti_stag(l) = max(self%ti_stag(l), floor_ti) 
            self%neutrals_dens_stag(l) = max(self%neutrals_dens_stag(l), floor_dens)
            nupar_stag%vals(l) = self%ne_stag(l) * upar%vals(l)
            self%apar_fluct_over_btor(l) = apar_fluct%vals(l) / equi_on_stag%btor(l)
        enddo
        !$omp end do
        !$omp end parallel

        allocate(self%aparv_fluct_cano(mesh_cano%get_n_points()), source=apar_fluct_cano%vals)

        call nupar_stag%start_comm_halos(comm_handler)
        call nupar_stag%finalize_comm_halos(comm_handler)
        call pe%start_comm_halos(comm_handler)
        call pe%finalize_comm_halos(comm_handler)
        call pi%start_comm_halos(comm_handler)
        call pi%finalize_comm_halos(comm_handler)

        ! -------------------------------------------------------------------------------
        ! Radial auxiliaries

        allocate(self%radgrad_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%radgrad_te(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%radgrad_ti(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%v_exb_rad(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%v_dia_e_rad(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%v_dia_i_rad(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%b_1_rad(mesh_stag%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) &
        !$omp          private(l, ki, ddx_ne, ddy_ne, ddx_te, ddy_te, ddx_ti, ddy_ti, ddx_pot, &
        !$omp                  ddy_pot, ddx_apar_over_btor, ddy_apar_over_btor, eradx_cano, &
        !$omp                  eradx_stag, erady_cano, erady_stag, btor) &
        !$omp          shared(self, mesh_cano, mesh_stag, opsinplane_cano, opsinplane_stag, &
        !$omp                 equi_on_cano, equi_on_stag, beta, bnd_descrs_nmn_cano, &
        !$omp                 bnd_descrs_nmn_stag, ne, te, ti, pot, apar_fluct)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            btor    = equi_on_cano%btor(l)
            ddx_ne  = opsinplane_cano%ddx(ne%vals, l)
            ddy_ne  = opsinplane_cano%ddy(ne%vals, l)
            ddx_te  = opsinplane_cano%ddx(te%vals, l)
            ddy_te  = opsinplane_cano%ddy(te%vals, l)
            ddx_ti  = opsinplane_cano%ddx(ti%vals, l)
            ddy_ti  = opsinplane_cano%ddy(ti%vals, l)
            ddx_pot = opsinplane_cano%ddx(pot%vals, l)
            ddy_pot = opsinplane_cano%ddy(pot%vals, l)
            eradx_cano = equi_on_cano%erad(l,1)
            erady_cano = equi_on_cano%erad(l,2)

            self%v_exb_rad(l) = ( eradx_cano * ddy_pot - erady_cano * ddx_pot ) / btor
            self%v_dia_e_rad(l) = - ( + eradx_cano * (  te%vals(l) * ddy_ne + ne%vals(l) * ddy_te ) &
                              - erady_cano * (  te%vals(l) * ddx_ne + ne%vals(l) * ddx_te ) &
                            ) / ( btor * ne%vals(l) )
            self%v_dia_i_rad(l) = ( + eradx_cano * (  ti%vals(l) * ddy_ne + ne%vals(l) * ddy_ti ) &
                              - erady_cano * (  ti%vals(l) * ddx_ne + ne%vals(l) * ddx_ti ) &
                            ) / ( btor * ne%vals(l) )
            self%radgrad_ne(l) =  ddx_ne * eradx_cano + ddy_ne * erady_cano
            self%radgrad_te(l) =  ddx_te * eradx_cano + ddy_te * erady_cano
            self%radgrad_ti(l) =  ddx_ti * eradx_cano + ddy_ti * erady_cano
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            ddx_apar_over_btor = opsinplane_stag%ddx(self%apar_fluct_over_btor, l)
            ddy_apar_over_btor = opsinplane_stag%ddy(self%apar_fluct_over_btor, l)            
            eradx_stag = equi_on_stag%erad(l,1)
            erady_stag = equi_on_stag%erad(l,2)

            self%b_1_rad(l) = ( - eradx_stag * ddy_apar_over_btor + erady_stag * ddx_apar_over_btor ) * beta
        enddo
        !$omp end do
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%radgrad_ne)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%radgrad_te)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%radgrad_ti)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%v_exb_rad)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%v_dia_e_rad)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%v_dia_i_rad)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%b_1_rad)
        call extrapolate_ghost_points(mesh_cano, self%radgrad_ne)
        call extrapolate_ghost_points(mesh_cano, self%radgrad_te)
        call extrapolate_ghost_points(mesh_cano, self%radgrad_ti)
        call extrapolate_ghost_points(mesh_cano, self%v_exb_rad)
        call extrapolate_ghost_points(mesh_cano, self%v_dia_e_rad)
        call extrapolate_ghost_points(mesh_cano, self%v_dia_i_rad)
        call extrapolate_ghost_points(mesh_stag, self%b_1_rad)
        !$omp end parallel

        ! -------------------------------------------------------------------------------
        ! Parallel boundary fluxes

        ! Compute final scalar fluxes for now. TODO: store individiual gridpoint fluxes in packet
        allocate(self%parflux_ne_fwd(1))
        allocate(self%parflux_ne_bwd(1))
        allocate(self%parflux_ne_fwd_weighted(1))
        allocate(self%parflux_ne_bwd_weighted(1))
        allocate(self%parflux_neutrals_fwd(1))
        allocate(self%parflux_neutrals_bwd(1))
        allocate(self%parflux_neutralsdiss_fwd(1))
        allocate(self%parflux_neutralsdiss_bwd(1))

        call parflux_utils_cano%compute(comm_handler, mesh_cano, penalisation_cano, equi_on_cano, &
                                    self%nvpar_cano, aux1_gp, aux2_gp, mode='DEFAULT')
        self%parflux_ne_fwd = aux1_gp
        self%parflux_ne_bwd = aux2_gp

        call parflux_utils_cano%compute(comm_handler, mesh_cano, penalisation_cano, equi_on_cano, &
                                    self%nvpar_cano, aux1_gp, aux2_gp, mode='WEIGHTED')
        self%parflux_ne_fwd_weighted = aux1_gp
        self%parflux_ne_bwd_weighted = aux2_gp

        ! Neutrals parallel momentum flux
        call parflux_utils_cano%compute(comm_handler, mesh_cano, penalisation_cano, equi_on_cano, &
                                    self%neutrals_parmom_cano, aux1_gp, aux2_gp, mode='DEFAULT')
        self%parflux_neutrals_fwd = aux1_gp
        self%parflux_neutrals_bwd = aux2_gp

        ! Neutrals parallel dissipation flux
        allocate(aux1(mesh_stag%get_n_points()))

        call neutrals_dens%start_comm_halos(comm_handler)
        call neutrals_dens%finalize_comm_halos(comm_handler)
        !$omp parallel do default(none) private(l) shared(mesh_stag, map, aux1, neutrals_dens, pardiss_coeff_dens)
        do l = 1, mesh_stag%get_n_points()
            aux1(l) = - pardiss_coeff_dens * map%par_grad_single(neutrals_dens%vals, neutrals_dens%hfwd, l)
        enddo
        !$omp end parallel do
        call parflux_utils_stag%compute(comm_handler, mesh_stag, penalisation_stag, equi_on_stag, &
                                    aux1, aux1_gp, aux2_gp, mode='DEFAULT')
        self%parflux_neutralsdiss_fwd = aux1_gp
        self%parflux_neutralsdiss_bwd = aux2_gp

        deallocate(aux1)

        ! -------------------------------------------------------------------------------
        ! Perpendicular boundary fluxes

        allocate(self%perpflux_ne_exb(1))
        allocate(self%perpflux_ne_dia(1))
        allocate(self%perpflux_neutrals_outer(1))
        allocate(self%perpflux_neutrals_volint(mesh_cano%get_n_points_inner()))

        allocate(aux1(perp_bnd_flux_cano%outer%get_np_total()))

        ! ExB flux 
        call compute_drift_flux(equi, equi_on_cano, mesh_cano, map%get_dphi(), &
                                perp_bnd_flux_cano%outer, pot%vals, aux1, ne%vals)
        self%perpflux_ne_exb = get_drift_flux_total(comm_handler, perp_bnd_flux_cano%outer, aux1, &
                                                    sum_over_planes=.true.)
        self%perpflux_ne_exb = self%perpflux_ne_exb / delta

        ! Diamagnetic flux
        call compute_drift_flux(equi, equi_on_cano, mesh_cano, map%get_dphi(), &
                                perp_bnd_flux_cano%outer, te%vals, aux1, ne%vals)
        self%perpflux_ne_dia = get_drift_flux_total(comm_handler, perp_bnd_flux_cano%outer, aux1, &
                                                    sum_over_planes=.true.)

        call compute_drift_flux(equi, equi_on_cano, mesh_cano, map%get_dphi(), &
                                perp_bnd_flux_cano%outer, ne%vals, aux1, te%vals)
        self%perpflux_ne_dia = self%perpflux_ne_dia + &
                               get_drift_flux_total(comm_handler, perp_bnd_flux_cano%outer, aux1, &
                                                    sum_over_planes=.true.)
        self%perpflux_ne_dia = -self%perpflux_ne_dia / delta

        ! Diffusive neutrals flux
        !$omp parallel do default(none) private(l) shared(self, mesh_cano, diffco, ne)
        do l = 1, mesh_cano%get_n_points()
            diffco(l) = 1.0_GP / ( ne%vals(l) * self%neut_k_cx(l))
        enddo
        !$omp end parallel do
        call compute_diffusive_flux(equi, mesh_cano, map%get_dphi(), perp_bnd_flux_cano%outer, &
                                    perp_bnd_flux_cano%conn_outer, neutrals_pressure%vals, aux1, diffco)
        self%perpflux_neutrals_outer = get_diffusive_flux_total(comm_handler, perp_bnd_flux_cano%outer, &
                                                                aux1, sum_over_planes=.true.)
        deallocate(aux1)

        ! Volume integral of diffusion (as sanity check)
        allocate(aux1(mesh_cano%get_n_points()))
        !$omp parallel default(none) private(ki, l) shared(self, mesh_cano, neutrals_pressure, ne, aux1)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            aux1(l) = 1.0_GP / (ne%vals(l) * self%neut_k_cx(l))            
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            self%perpflux_neutrals_volint(ki) = helmholtz_single_inner(mesh_cano, neutrals_pressure%vals, l, &
                                                                       co=aux1, xiv=1.0_GP, lambdav=0.0_GP)
        enddo
        !$omp end do
        !$omp end parallel
        deallocate(aux1)

        ! -------------------------------------------------------------------------------
        ! External sources

        allocate(self%src_ext_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_ext_te(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_ext_ti(mesh_cano%get_n_points()), source=0.0_GP)

        aux1_inner = 0.0_GP
        aux2_inner = 0.0_GP
        aux3_inner = 0.0_GP

        call sources_external%eval(comm_handler, equi_on_cano, mesh_cano, &
                                    polars_cano, tau, ne, pot, vort, &
                                    upar, jpar, apar_fluct, te, ti, &
                                    src_ne=aux1_inner, &
                                    src_te=aux2_inner, &
                                    src_ti=aux3_inner)

        !$omp parallel do default(none) private(l, ki) shared(self, mesh_cano, &
        !$omp          aux1_inner, aux2_inner, aux3_inner)
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            self%src_ext_ne(l) = aux1_inner(ki)
            self%src_ext_te(l) = aux2_inner(ki)
            self%src_ext_ti(l) = aux3_inner(ki)
        enddo
        !$omp end parallel do

        ! -------------------------------------------------------------------------------
        ! Neutrals-related sources

        allocate(self%src_iz_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_rec_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_te(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_ti(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_upar(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_vort(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_pressure(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_neut_rcy(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) private(ki, l, aux1_gp, aux2_gp) &
        !$omp          shared(self, mesh_cano, mesh_stag, map, ne, te, ti, neutrals_dens, neutrals_parmom, neutrals_pressure, &
        !$omp                 k_iz, s_cx, upar, equi_on_cano, rho_min_for_parmom_advection, src_upar, src_vort)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            self%src_neut_vort(l) = src_vort(ki)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            self%src_neut_upar(l) = src_upar(ki)
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%src_iz_ne(l) = self%neut_k_iz(l) * ne%vals(l) * neutrals_dens%vals(l)
            self%src_rec_ne(l) = - self%neut_k_rec(l) * ne%vals(l)**2

            aux1_gp = map%flat_stag_to_cano_single(neutrals_parmom%vals, neutrals_parmom%hbwd, l) / neutrals_dens%vals(l)

            self%src_neut_te(l) = - 2.0_GP / 3.0_GP * ( self%neut_w_iz(l) * neutrals_dens%vals(l) + self%neut_w_rec(l) * ne%vals(l) ) &
                                  - te%vals(l) * ( self%neut_k_iz(l) * neutrals_dens%vals(l) - self%neut_k_rec(l) * ne%vals(l) )

            aux2_gp = ( self%upar_cano(l) - aux1_gp )**2
            if (equi_on_cano%rho(l) < rho_min_for_parmom_advection) then
                ! Towards the plasma core, N is close to zero, and computing
                ! parallel neutrals velocity related terms (where a division
                ! by N occurs) can lead to numerical problems.
                aux2_gp = 0.0_GP
            endif

            self%src_neut_ti(l) = neutrals_dens%vals(l) * ( self%neut_k_iz(l) + self%neut_k_cx(l) ) * ( &
                                  + ( neutrals_pressure%vals(l) / neutrals_dens%vals(l) - ti%vals(l) ) &
                                  + 1.0_GP / 3.0_GP * aux2_gp )

            self%src_neut_pressure(l) = + self%neut_k_rec(l) * ne%vals(l)**2 * ti%vals(l)  &
                                        - self%neut_k_iz(l) * ne%vals(l) * neutrals_pressure%vals(l) &
                                        + self%neut_k_cx(l) * ne%vals(l) * neutrals_dens%vals(l) * ti%vals(l) &
                                        - self%neut_k_cx(l) * ne%vals(l) * neutrals_pressure%vals(l) &
                                        + ne%vals(l) / 3.0_GP * ( ne%vals(l) * self%neut_k_rec(l) + &
                                                                  neutrals_dens%vals(l) * self%neut_k_cx(l) ) &
                                           * ( self%upar_cano(l) - aux1_gp )**2
        enddo
        !$omp end do
        !$omp end parallel

        call compute_recycling_source(comm_handler, equi, mesh_cano, map, equi_on_cano, &
                                      penalisation_cano, parflux_utils_cano, perp_bnd_flux_cano, &
                                      ne, pot, te, self%nvpar_cano, apar_fluct_cano%vals, &
                                      neutrals_dens, self%neutrals_parmom_cano, &
                                      self%src_neut_rcy)

        ! -------------------------------------------------------------------------------
        ! Numerical sources
        
        allocate(self%src_floor_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%src_floor_neutrals_dens(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) private(l) shared(self, mesh_cano, src_floor_ne, src_floor_neutrals_dens)
        if (allocated(src_floor_ne)) then
            !$omp do
            do l = 1, mesh_cano%get_n_points() 
                self%src_floor_ne(l) = src_floor_ne(l)
            enddo
            !$omp end do
        endif
        !$omp do
        do l = 1, mesh_cano%get_n_points() 
            self%src_floor_neutrals_dens(l) = src_floor_neutrals_dens(l)
        enddo
        !$omp end do
        !$omp end parallel

        ! -------------------------------------------------------------------------------
        ! Energies

        allocate(self%eth(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) private(l) shared(self, mesh_cano, ne, te, ti)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%eth(l) = 1.5_GP * ne%vals(l) * ( te%vals(l) + ti%vals(l) )
        enddo
        !$omp end do
        !$omp end parallel

        ! -------------------------------------------------------------------------------
        ! Changerates

        allocate(self%ddt_eth(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ddt_ne(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ddt_te(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ddt_ti(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ddt_neutrals_dens(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel do default(none) private(l, aux1_gp, aux2_gp) &
        !$omp          shared(self, mesh_cano, multistep_storage_cano, dtau)
        do l = 1, mesh_cano%get_n_points()
            aux1_gp = multistep_storage_cano%vstore(l,1,1)
            aux2_gp = multistep_storage_cano%vstore(l,2,1)
            self%ddt_ne(l) = ( aux1_gp - aux2_gp ) / dtau
            aux1_gp = multistep_storage_cano%vstore(l,1,2)
            aux2_gp = multistep_storage_cano%vstore(l,2,2)
            self%ddt_te(l) = ( aux1_gp - aux2_gp ) / dtau
            aux1_gp = multistep_storage_cano%vstore(l,1,3)
            aux2_gp = multistep_storage_cano%vstore(l,2,3)
            self%ddt_ti(l) = ( aux1_gp - aux2_gp ) / dtau
            aux1_gp = multistep_storage_cano%vstore(l,1,5)
            aux2_gp = multistep_storage_cano%vstore(l,2,5)
            self%ddt_neutrals_dens(l) = ( aux1_gp - aux2_gp ) / dtau

            aux1_gp = multistep_storage_cano%vstore(l,1,1) * &
                     (multistep_storage_cano%vstore(l,1,2) + multistep_storage_cano%vstore(l,1,3))
            aux2_gp = multistep_storage_cano%vstore(l,2,1) * &
                     (multistep_storage_cano%vstore(l,2,2) + multistep_storage_cano%vstore(l,2,3))
            self%ddt_eth(l) = 1.5_GP * ( aux1_gp - aux2_gp ) / dtau
        enddo
        !$omp end parallel do

        ! -------------------------------------------------------------------------------
        ! Heat fluxes

        allocate(self%qpar_cond_e(mesh_stag%get_n_points()))
        allocate(self%qpar_cond_i(mesh_stag%get_n_points()))
        allocate(self%qpar_conv_e(mesh_stag%get_n_points()))
        allocate(self%qpar_conv_i(mesh_stag%get_n_points()))
        
        !$omp parallel default(none) private(ki, kb, l, flutter_gradpar_logte, flutter_gradpar_logti) &
        !$omp          shared(self, mesh_stag, map, opsinplane_stag, te, ti, apar_fluct, &
        !$omp                 normal_gradpar_logte, normal_gradpar_logti, total_gradpar_logte, total_gradpar_logti, &
        !$omp                 swb_etemp_flutter_pardiff_grad, swb_itemp_flutter_pardiff_grad)
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)            
            normal_gradpar_logte(l) = map%par_grad_single(te%vals, te%hfwd, l) / self%te_stag(l)
            flutter_gradpar_logte = swb_etemp_flutter_pardiff_grad * &
                                opsinplane_stag%flutter_grad(apar_fluct%vals, self%te_stag, l) / self%te_stag(l)
            total_gradpar_logte(l) = normal_gradpar_logte(l) + flutter_gradpar_logte

            normal_gradpar_logti(l) = map%par_grad_single(ti%vals, ti%hfwd, l) / self%ti_stag(l)
            flutter_gradpar_logti = swb_itemp_flutter_pardiff_grad * &
                                opsinplane_stag%flutter_grad(apar_fluct%vals, self%ti_stag, l) / self%ti_stag(l)
            total_gradpar_logti(l) = normal_gradpar_logti(l) + flutter_gradpar_logti
        enddo
        !$omp end do
        !$omp end parallel

        select case(heatflux_model)
        case('BRAGINSKII_LIM')
            fac_free_streaming_e = chipar0_e * sqrt(mass_ratio_ei) / &
                                 (free_streaming_fraction_e * free_streaming_limiter_qfac )   
            call heat_flux_braginskii(mesh_stag, self%te_stag, fac_free_streaming_e, &
                    total_gradpar_logte, self%ne_stag, self%qpar_cond_e, "elec", bnd_descrs_nmn_stag) 
            call heat_flux_braginskii(mesh_stag, self%te_stag, fac_free_streaming_e, &
                    normal_gradpar_logte, self%ne_stag, qpar_cond_e_normal, "elec", bnd_descrs_nmn_stag)
            fac_free_streaming_i = chipar0_i / (free_streaming_fraction_i * free_streaming_limiter_qfac )
            call heat_flux_braginskii(mesh_stag, self%ti_stag, fac_free_streaming_i, &
                    total_gradpar_logti, self%ne_stag, self%qpar_cond_i, "ions", bnd_descrs_nmn_stag) 
            call heat_flux_braginskii(mesh_stag, self%ti_stag, fac_free_streaming_i, &
                    normal_gradpar_logti, self%ne_stag, qpar_cond_i_normal, "ions", bnd_descrs_nmn_stag)
        case('FREE_STREAMING')
            call heat_flux_free_streaming(mesh_stag, self%te_stag, self%ne_stag, &
                    total_gradpar_logte, self%qpar_cond_e, 'elec', bnd_descrs_nmn_stag)
            call heat_flux_free_streaming(mesh_stag, self%te_stag, self%ne_stag, &
                    normal_gradpar_logte, qpar_cond_e_normal, 'elec', bnd_descrs_nmn_stag)                
            call heat_flux_free_streaming(mesh_stag, self%ti_stag, self%ne_stag, &
                    total_gradpar_logti, self%qpar_cond_i, 'ions', bnd_descrs_nmn_stag)
            call heat_flux_free_streaming(mesh_stag, self%ti_stag, self%ne_stag, &
                    normal_gradpar_logti, qpar_cond_i_normal, 'ions', bnd_descrs_nmn_stag) 
        case('LANDAU')
            ! Solve q_total, including flutter on rhs
            ! Flutter on lhs depends on the switch
            call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                    map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                                    self%ne_stag, self%te_stag, self%ti_stag, total_gradpar_logte, &
                                    total_gradpar_logti, tau, self%qpar_cond_e, 'elec', &
                                    apar_fluct, apar_fluct_cano, bnd_descrs_nmn_cano, &
                                    iters=slv_info(1:12), ress=slv_rinfo(1:12,1), true_ress=slv_rinfo(1:12,2))
            call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                    map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, & 
                                    self%ne_stag, self%te_stag, self%ti_stag, total_gradpar_logte, & 
                                    total_gradpar_logti, tau, self%qpar_cond_i, 'ions', &
                                    apar_fluct, apar_fluct_cano, bnd_descrs_nmn_cano, &
                                    iters=slv_info(13:24), ress=slv_rinfo(13:24,1), true_ress=slv_rinfo(13:24,2))
            ! Solve q_normal, excluding flutter on rhs
            call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                    map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                                    self%ne_stag, self%te_stag, self%ti_stag, normal_gradpar_logte, &
                                    normal_gradpar_logti, tau, qpar_cond_e_normal, 'elec', &
                                    apar_fluct, apar_fluct_cano, bnd_descrs_nmn_cano, &
                                    iters=slv_info(25:36), ress=slv_rinfo(25:36,1), true_ress=slv_rinfo(25:36,2))
            call heat_flux_landau(comm_handler, equi, equi_on_cano, equi_on_stag, mesh_cano, mesh_stag, &
                                    map, penalisation_stag, opsinplane_cano, opsinplane_stag, boundaries, &
                                    self%ne_stag, self%te_stag, self%ti_stag, normal_gradpar_logte, &
                                    normal_gradpar_logti, tau, qpar_cond_i_normal, 'ions', &
                                    apar_fluct, apar_fluct_cano, bnd_descrs_nmn_cano, &
                                    iters=slv_info(37:48), ress=slv_rinfo(37:48,1), true_ress=slv_rinfo(37:48,2))
        end select

        ! Convective parallel heat fluxes
        !$omp parallel do default(none) private(l) &
        !$omp          shared(self, mesh_stag, upar, jpar)
        do l = 1, mesh_stag%get_n_points()
            self%qpar_conv_e(l) = 2.5_GP * self%te_stag(l) * (upar%vals(l) * self%ne_stag(l) - jpar%vals(l))
            self%qpar_conv_i(l) = 2.5_GP * self%ti_stag(l) * upar%vals(l) * self%ne_stag(l)
        end do
        !$omp end parallel do

        ! -------------------------------------------------------------------------------
        ! Continuity equation terms

        allocate(self%cont_radflux_exb(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_radflux_dia(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_radflux_mag(mesh_stag%get_n_points()), source=0.0_GP)

        allocate(self%cont_exb_advection(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_flutter(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_div_nupar(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_curv_pot(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_curv_pe(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%cont_hypvisc(mesh_cano%get_n_points()), source=0.0_GP)

        allocate(self%vort_curv_pi(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%vort_div_jpar(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%vort_hypvisc(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%vort_flutter(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) &
        !$omp          private(l, ki, eradx_cano, erady_cano, btor) &
        !$omp          shared(self, mesh_cano, mesh_stag, map, opsinplane_cano, equi_on_cano, &
        !$omp                 bnd_descrs_nmn_cano, bnd_descrs_nmn_stag, &
        !$omp                 ne, vort, pot, te, ti, upar, jpar, apar_fluct_cano, nupar_stag, beta, delta, &
        !$omp                 numdiss_ne_inner, numdiss_vort_inner, cf_diss_cano, &
        !$omp                 perpdiss_order_ne, perpdiss_coeff_ne, perpdiss_order_vort, perpdiss_coeff_vort)
        call opsinplane_cano%apply_dissipation_inplane(ne%vals, perpdiss_order_ne, numdiss_ne_inner, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane(vort%vals, perpdiss_order_vort, numdiss_vort_inner, cf_diss_cano)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            eradx_cano = equi_on_cano%erad(l, 1)
            erady_cano = equi_on_cano%erad(l, 2)
            btor       = equi_on_cano%btor(l)

            self%cont_radflux_exb(l) = ne%vals(l) * self%v_exb_rad(l)
            self%cont_radflux_dia(l) = - 2.0_GP * ne%vals(l) * te%vals(l) * erady_cano / delta

            ! Actually implemented, volumetric finite differences terms
            self%cont_exb_advection(l) = delta / btor * opsinplane_cano%arakawa(pot%vals, ne%vals, l)
            self%cont_flutter(l)  = opsinplane_cano%flutter_div(apar_fluct_cano%vals, self%jpar_cano, l)  &
                                    - opsinplane_cano%flutter_div(apar_fluct_cano%vals, self%nupar_cano, l)
            self%cont_div_nupar(l)= - map%par_divb_single(nupar_stag%vals, nupar_stag%hbwd, l)
            self%cont_curv_pot(l) = ne%vals(l) * opsinplane_cano%curvature(pot%vals, l)
            self%cont_curv_pe(l)  = - ne%vals(l) * opsinplane_cano%curvature(te%vals, l) &
                                    - te%vals(l) * opsinplane_cano%curvature(ne%vals, l)
            self%cont_hypvisc(l)  = perpdiss_coeff_ne * numdiss_ne_inner(ki)

            self%vort_curv_pi(l)  = - ne%vals(l) * opsinplane_cano%curvature(ti%vals, l) &
                                    - ti%vals(l) * opsinplane_cano%curvature(ne%vals, l)
            self%vort_div_jpar(l) = map%par_divb_single(jpar%vals, jpar%hbwd, l)
            self%vort_flutter(l)  = opsinplane_cano%flutter_div(apar_fluct_cano%vals, self%jpar_cano, l)
            self%vort_hypvisc(l)  = perpdiss_coeff_vort * numdiss_vort_inner(ki)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)
            self%cont_radflux_mag(l) = self%b_1_rad(l) * (self%ne_stag(l) * upar%vals(l) - jpar%vals(l))
        enddo
        !$omp end do
        ! Treat boundary and ghost points
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%cont_radflux_exb)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%cont_radflux_dia)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%cont_radflux_mag)
        call extrapolate_ghost_points(mesh_cano, self%cont_radflux_exb)
        call extrapolate_ghost_points(mesh_cano, self%cont_radflux_dia)
        call extrapolate_ghost_points(mesh_stag, self%cont_radflux_mag)
        !$omp end parallel

        ! Dissipative particle flux
        allocate(self%cont_radflux_visc(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipative_radflux(mesh_cano, equi_on_cano, opsinplane_cano, bnd_descrs_nmn_cano, &
                                         perpdiss_order_ne, cf_diss_cano, cf_buffer_cano, &
                                         perpdiss_coeff_ne, buffer_coeff_ne, &
                                         ne, self%cont_radflux_visc)

        ! -------------------------------------------------------------------------------
        ! Thermal electron energy equation terms

        allocate(self%ethe_ddt(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_src(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_radflux_exb(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_radflux_dia(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_radflux_dia_real(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_pe_div_exb(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_vpar_gradpar_pe(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethe_eta_jpar2(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethe_jpar_gradpar_te(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethe_equipartition(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_hypvisc(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_buffer(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethe_radflux_mag_qnormal(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethe_radflux_mag_qtotal(mesh_stag%get_n_points()), source=0.0_GP)

        ! Volume variants of numerical dissipation terms
        call opsinplane_cano%apply_dissipation_inplane( &
                            ne%vals, &
                            perpdiss_order_ne, &
                            numdiss_ne_inner, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane( &
                            te%vals, &
                            perpdiss_order_te, &
                            numdiss_te_inner, cf_diss_cano)
        call opsinplane_cano%apply_dissipation_inplane( &
                            pe%vals, &
                            perpdiss_order_pe, &
                            numdiss_pe_inner, cf_diss_cano)

        !$omp parallel default(none) &
        !$omp          private(l, ki, aux1_gp, aux2_gp, aux3_gp, aux4_gp, ddx_te, ddy_te, &
        !$omp                  btor, eradx_cano, erady_cano, q_perp_e_rad, &
        !$omp                  gradpar_ne, flutter_gradpar_ne, gradpar_te, flutter_gradpar_te) &
        !$omp          shared(self, mesh_cano, mesh_stag, map, equi_on_cano, opsinplane_cano, &
        !$omp                 opsinplane_stag, multistep_storage_cano, bnd_descrs_nmn_cano, bnd_descrs_nmn_stag, &
        !$omp                 ne, pot, te, pe, ti, upar, jpar, apar_fluct, neutrals_dens, &
        !$omp                 numdiss_ne_inner, numdiss_te_inner, numdiss_pe_inner, cf_buffer_cano, &
        !$omp                 buffer_coeff_ne, buffer_coeff_te, buffer_coeff_pe, &
        !$omp                 perpdiss_coeff_ne, perpdiss_coeff_te, perpdiss_coeff_pe, &
        !$omp                 qpar_cond_e_normal, etapar0, nu_e0, mass_ratio_ei, beta, delta, dtau)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            aux1_gp = multistep_storage_cano%vstore(l,1,1)
            aux2_gp = multistep_storage_cano%vstore(l,2,1)
            aux3_gp = multistep_storage_cano%vstore(l,1,2)
            aux4_gp = multistep_storage_cano%vstore(l,2,2)
            self%ethe_ddt(l) = ( aux1_gp * aux3_gp - aux2_gp * aux4_gp ) * 1.5_GP / dtau

            self%ethe_src(l) = &
                + 1.5_GP * ( te%vals(l) * self%src_ext_ne(l) + ne%vals(l) * self%src_ext_te(l) ) & ! neutrals-independent
                + 1.5_GP * te%vals(l) * (self%src_iz_ne(l) + self%src_rec_ne(l)) &
                + 1.5_GP * ne%vals(l) &
                * ( - 2.0_GP/3.0_GP * (neutrals_dens%vals(l) * self%neut_w_iz(l) + ne%vals(l) * self%neut_w_rec(l)) &
                    - te%vals(l) * (neutrals_dens%vals(l) * self%neut_k_iz(l) - ne%vals(l) * self%neut_k_rec(l)) )
        enddo
        !$omp enddo
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            ddx_te     = opsinplane_cano%ddx(te%vals, l)
            ddy_te     = opsinplane_cano%ddy(te%vals, l)
            eradx_cano = equi_on_cano%erad(l,1)
            erady_cano = equi_on_cano%erad(l,2)
            btor       = equi_on_cano%btor(l)

            q_perp_e_rad = - 2.5_GP * ne%vals(l) * te%vals(l) / btor &
                            * ( eradx_cano * ddy_te - erady_cano * ddx_te )

            ! Ethe fluxes
            self%ethe_radflux_exb(l) = 1.5_GP * ne%vals(l) * te%vals(l) * self%v_exb_rad(l)
            self%ethe_radflux_dia(l)  = 2.5_GP * ne%vals(l) * te%vals(l) * self%v_dia_e_rad(l) + q_perp_e_rad
            self%ethe_radflux_dia_real(l)  = - 5.0_GP * ne%vals(l) * te%vals(l)**2 * erady_cano / delta
            ! Ethe RHS
            self%ethe_pe_div_exb(l) = ne%vals(l) * te%vals(l) * opsinplane_cano%curvature(pot%vals, l)
            self%ethe_equipartition(l) = - nu_e0 * mass_ratio_ei * 3.0_GP * ne%vals(l)**2 / sqrt(te%vals(l)**3) &
                                    * (te%vals(l) - ti%vals(l) )
            ! Ethe numerical dissipation
            self%ethe_hypvisc(l) = 1.5_GP * (  perpdiss_coeff_ne * numdiss_ne_inner(ki) * te%vals(l) &
                                    + perpdiss_coeff_te * numdiss_te_inner(ki) * ne%vals(l) &
                                    + perpdiss_coeff_pe * numdiss_pe_inner(ki))
            self%ethe_buffer(l) = 1.5_GP * ( &
                + buffer_coeff_ne * opsinplane_cano%nabla_pol_sqrd(ne%vals, l, cf_buffer_cano) * te%vals(l) &
                + buffer_coeff_te * opsinplane_cano%nabla_pol_sqrd(te%vals, l, cf_buffer_cano) * ne%vals(l) &
                + buffer_coeff_pe * opsinplane_cano%nabla_pol_sqrd(pe%vals, l, cf_buffer_cano) )
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)

            gradpar_ne = map%par_grad_single(ne%vals, ne%hfwd, l)
            flutter_gradpar_ne = opsinplane_stag%flutter_grad(apar_fluct%vals, self%ne_stag, l)
            gradpar_te = map%par_grad_single(te%vals, te%hfwd, l)
            flutter_gradpar_te = opsinplane_stag%flutter_grad(apar_fluct%vals, self%te_stag, l)
            
            ! Ethe RHS
            self%ethe_vpar_gradpar_pe(l) = ( upar%vals(l) - jpar%vals(l) / self%ne_stag(l)) &
                                    * ( self%ne_stag(l) * (gradpar_te + flutter_gradpar_te) + &
                                        self%te_stag(l) * (gradpar_ne + flutter_gradpar_ne) )
            self%ethe_eta_jpar2(l) = etapar0 / sqrt(self%te_stag(l)**3) * jpar%vals(l)**2
            self%ethe_jpar_gradpar_te(l) = - 0.71 * jpar%vals(l) * (gradpar_te + flutter_gradpar_te)
            ! Flutter
            self%ethe_radflux_mag_qnormal(l) = self%b_1_rad(l) * qpar_cond_e_normal(l)
            self%ethe_radflux_mag_qtotal(l) = self%b_1_rad(l) * self%qpar_cond_e(l)
        enddo
        !$omp end do
        ! Treat boundary + ghost points
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_radflux_exb)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_radflux_dia)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_radflux_dia_real)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_pe_div_exb)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethe_vpar_gradpar_pe)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethe_eta_jpar2)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethe_jpar_gradpar_te)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_equipartition)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_hypvisc)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethe_buffer)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethe_radflux_mag_qnormal)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethe_radflux_mag_qtotal)
        call extrapolate_ghost_points(mesh_cano, self%ethe_radflux_exb)
        call extrapolate_ghost_points(mesh_cano, self%ethe_radflux_dia)
        call extrapolate_ghost_points(mesh_cano, self%ethe_radflux_dia_real)
        call extrapolate_ghost_points(mesh_cano, self%ethe_pe_div_exb)
        call extrapolate_ghost_points(mesh_stag, self%ethe_vpar_gradpar_pe)
        call extrapolate_ghost_points(mesh_stag, self%ethe_eta_jpar2)
        call extrapolate_ghost_points(mesh_stag, self%ethe_jpar_gradpar_te)
        call extrapolate_ghost_points(mesh_cano, self%ethe_equipartition)
        call extrapolate_ghost_points(mesh_cano, self%ethe_hypvisc)
        call extrapolate_ghost_points(mesh_cano, self%ethe_buffer)
        call extrapolate_ghost_points(mesh_stag, self%ethe_radflux_mag_qnormal)
        call extrapolate_ghost_points(mesh_stag, self%ethe_radflux_mag_qtotal)
        !$omp end parallel

        ! Viscous electron thermal energy flux
        allocate(self%ethe_radflux_visc(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipative_radflux(mesh_cano, equi_on_cano, opsinplane_cano, bnd_descrs_nmn_cano, &
                                         perpdiss_order_pe, cf_diss_cano, cf_buffer_cano, &
                                         perpdiss_coeff_pe, buffer_coeff_pe, &
                                         pe, self%ethe_radflux_visc)
        self%ethe_radflux_visc = 1.5_GP * self%ethe_radflux_visc

        ! -------------------------------------------------------------------------------
        ! Thermal ion energy equation terms

        allocate(self%ethi_radflux_exb(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethi_radflux_dia(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethi_radflux_dia_real(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethi_radflux_mag_adv(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethi_radflux_mag_qnormal(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethi_radflux_mag_qtotal(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ethi_pi_div_exb(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ethi_upar_gradpar_pi(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%etrans_upar_gradpar_pe(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%etrans_jpar_gradpar_pot(mesh_stag%get_n_points()), source=0.0_GP)
        
        !$omp parallel default(none) &
        !$omp          private(l, ki, ddx_ti, ddy_ti, btor, eradx_cano, erady_cano, q_perp_i_rad, &
        !$omp                  gradpar_ne, flutter_gradpar_ne, gradpar_ti, flutter_gradpar_ti, &
        !$omp                  gradpar_te, flutter_gradpar_te, gradpar_pot, flutter_gradpar_pot) &
        !$omp          shared(self, mesh_cano, mesh_stag, map, equi_on_cano, opsinplane_cano, &
        !$omp                 opsinplane_stag, bnd_descrs_nmn_cano, bnd_descrs_nmn_stag, &
        !$omp                 ne, te, ti, pot, upar, jpar, apar_fluct, qpar_cond_i_normal, delta)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            ddx_ti     = opsinplane_cano%ddx(ti%vals, l)
            ddy_ti     = opsinplane_cano%ddy(ti%vals, l)
            eradx_cano = equi_on_cano%erad(l,1)
            erady_cano = equi_on_cano%erad(l,2)
            btor       = equi_on_cano%btor(l)

            q_perp_i_rad = 2.5_GP * ne%vals(l) * ti%vals(l) / btor &
                            * ( eradx_cano * ddy_ti - erady_cano * ddx_ti )

            ! Ethi fluxes
            self%ethi_radflux_exb(l) = 1.5_GP * ne%vals(l) * ti%vals(l) * self%v_exb_rad(l)
            self%ethi_radflux_dia(l)  = 2.5_GP * ne%vals(l) * ti%vals(l) * self%v_dia_i_rad(l) + q_perp_i_rad
            self%ethi_radflux_dia_real(l)  = 5.0_GP * ne%vals(l) * ti%vals(l)**2 * erady_cano / delta
            ! Transfer terms
            self%ethi_pi_div_exb(l) = ne%vals(l) * ti%vals(l) * opsinplane_cano%curvature(pot%vals, l)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)

            gradpar_ne = map%par_grad_single(ne%vals, ne%hfwd, l)
            flutter_gradpar_ne = opsinplane_stag%flutter_grad(apar_fluct%vals, self%ne_stag, l)

            gradpar_ti = map%par_grad_single(ti%vals, ti%hfwd, l)
            flutter_gradpar_ti = opsinplane_stag%flutter_grad(apar_fluct%vals, self%ti_stag, l)

            gradpar_te = map%par_grad_single(te%vals, te%hfwd, l)
            flutter_gradpar_te = opsinplane_stag%flutter_grad(apar_fluct%vals, self%te_stag, l)

            gradpar_pot = map%par_grad_single(pot%vals, pot%hfwd, l)
            flutter_gradpar_pot = opsinplane_stag%flutter_grad(apar_fluct%vals, self%pot_stag, l)

            ! Flutter fluxes
            self%ethi_radflux_mag_qnormal(l) = self%b_1_rad(l) * qpar_cond_i_normal(l)
            self%ethi_radflux_mag_qtotal(l) = self%b_1_rad(l) * self%qpar_cond_i(l)
            self%ethi_radflux_mag_adv(l) = 1.5_GP * self%b_1_rad(l) * self%ti_stag(l) * self%ne_stag(l) * upar%vals(l)
            ! Transfer terms
            self%ethi_upar_gradpar_pi(l) = ( upar%vals(l) ) &
                                    * ( self%ne_stag(l) * (gradpar_ti + flutter_gradpar_ti) + &
                                        self%ti_stag(l) * (gradpar_ne + flutter_gradpar_ne) )

            self%etrans_upar_gradpar_pe(l) = ( upar%vals(l) ) &
                                        * ( self%ne_stag(l) * (gradpar_te + flutter_gradpar_te) + &
                                            self%te_stag(l) * (gradpar_ne + flutter_gradpar_ne) )

            self%etrans_jpar_gradpar_pot(l) = jpar%vals(l) * (gradpar_pot + flutter_gradpar_pot)
        enddo
        !$omp end do
        ! Treat boundary + ghost points
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethi_radflux_exb)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethi_radflux_dia)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethi_radflux_dia_real)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethi_radflux_mag_qnormal)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethi_radflux_mag_qtotal)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethi_radflux_mag_adv)
        call set_perpbnds(mesh_cano, bnd_descrs_nmn_cano, self%ethi_pi_div_exb)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%ethi_upar_gradpar_pi)
        call set_perpbnds(mesh_stag, bnd_descrs_nmn_stag, self%etrans_upar_gradpar_pe)
        call extrapolate_ghost_points(mesh_cano, self%ethi_radflux_exb)
        call extrapolate_ghost_points(mesh_cano, self%ethi_radflux_dia)
        call extrapolate_ghost_points(mesh_cano, self%ethi_radflux_dia_real)
        call extrapolate_ghost_points(mesh_stag, self%ethi_radflux_mag_qnormal)
        call extrapolate_ghost_points(mesh_stag, self%ethi_radflux_mag_qtotal)
        call extrapolate_ghost_points(mesh_stag, self%ethi_radflux_mag_adv)
        call extrapolate_ghost_points(mesh_cano, self%ethi_pi_div_exb)
        call extrapolate_ghost_points(mesh_stag, self%ethi_upar_gradpar_pi)
        call extrapolate_ghost_points(mesh_stag, self%etrans_upar_gradpar_pe)
        !$omp end parallel

        ! Viscous ion thermal energy flux
        allocate(self%ethi_radflux_visc(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipative_radflux(mesh_cano, equi_on_cano, opsinplane_cano, bnd_descrs_nmn_cano, &
                                         perpdiss_order_pi, cf_diss_cano, cf_buffer_cano, &
                                         perpdiss_coeff_pi, buffer_coeff_pi, &
                                         pi, self%ethi_radflux_visc)
        self%ethi_radflux_visc = 1.5_GP * self%ethi_radflux_visc

        ! -----------------------------------------------------------------------------------------
        ! Perpendicular kinetic energy
        ! -----------------------------------------------------------------------------------------

        allocate(self%ekin_perp(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ekin_perp_ddt(mesh_cano%get_n_points()), source=0.0_GP)

        allocate(aux1(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(aux2(mesh_cano%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) &
        !$omp private(ki, l, x, y, ddx_pot, ddy_pot, ddx_pi, ddy_pi, aux1_gp, aux2_gp) &
        !$omp shared(self, mesh_cano, equi_on_cano, opsinplane_cano, dtau, multistep_storage_cano, &
        !$omp        aux1, aux2, pot, ne, ti)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
                aux1(l) = ne%vals(l) * ti%vals(l)
                aux2(l) = multistep_storage_cano%vstore(l,2,1) * multistep_storage_cano%vstore(l,2,3)
        enddo
        !$omp end do
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)

            ! Perpendicular kinetic energy
            ddx_pot = opsinplane_cano%ddx(pot%vals, l)
            ddy_pot = opsinplane_cano%ddy(pot%vals, l)
            ddx_pi = opsinplane_cano%ddx(aux1, l)
            ddy_pi = opsinplane_cano%ddy(aux1, l)
            self%ekin_perp(l) = &
                0.5_GP * ne%vals(l) / equi_on_cano%btor(l)**2 &
                * ( + ddx_pot**2 + ddy_pot**2 &
                    + (ddx_pi / ne%vals(l) )**2 &
                    + (ddy_pi / ne%vals(l) )**2 &
                    + 2.0 / ne%vals(l) &
                        * ( ddx_pot*ddx_pi + ddy_pot*ddy_pi ) &
                    )

            ! Changerate of perpendicular kinetic energy
            ddx_pot = opsinplane_cano%ddx(multistep_storage_cano%vstore(:,2,4), l)
            ddy_pot = opsinplane_cano%ddy(multistep_storage_cano%vstore(:,2,4), l)
            ddx_pi = opsinplane_cano%ddx(aux2, l)
            ddy_pi = opsinplane_cano%ddy(aux2, l)

            aux1_gp = multistep_storage_cano%vstore(l,2,1) ! Density at t-1
            aux2_gp = &
                0.5_GP * aux1_gp / equi_on_cano%btor(l)**2 &
                * ( + ddx_pot**2 + ddy_pot**2 &
                    + (ddx_pi / aux1_gp )**2 &
                    + (ddy_pi / aux1_gp )**2 &
                    + 2.0 / aux1_gp &
                        * ( ddx_pot*ddx_pi + ddy_pot*ddy_pi ) &
                    )

            self%ekin_perp_ddt(l) = (self%ekin_perp(l) - aux2_gp) / dtau

        enddo
        !$omp end do
        !$omp end parallel
        deallocate(aux1)
        deallocate(aux2)

        ! -----------------------------------------------------------------------------------------
        ! Parallel kinetic energy
        ! -----------------------------------------------------------------------------------------
        
        allocate(self%ekin_par(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%ekin_par_ddt(mesh_stag%get_n_points()), source=0.0_GP)

        call varaux%init(comm_handler, mesh_cano%get_n_points(), staggered=.false., &
                         init_vals=multistep_storage_cano%vstore(:,2,1))
        call varaux%create_halos(comm_handler, 1, 0)

        call varaux%start_comm_halos(comm_handler)
        call varaux%finalize_comm_halos(comm_handler)
        !$omp parallel default(none), private(l, aux1_gp, aux2_gp) &
        !$omp shared(self, mesh_stag, map, multistep_storage_stag, dtau, varaux, upar)
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            aux1_gp = 0.5_GP * self%ne_stag(l) * upar%vals(l)**2
            aux2_gp = 0.5_GP * map%lift_cano_to_stag_single(varaux%vals, varaux%hfwd, l) &
                      * multistep_storage_stag%vstore(l,2,1)**2

            self%ekin_par(l) = aux1_gp
            self%ekin_par_ddt(l) = (aux1_gp - aux2_gp) / dtau
        enddo
        !$omp end do
        !$omp end parallel

        ! -----------------------------------------------------------------------------------------
        ! Electromagnetic energy
        ! -----------------------------------------------------------------------------------------
        allocate(self%eem(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%eem_ddt(mesh_stag%get_n_points()), source=0.0_GP)
        allocate(self%eem_flux(mesh_stag%get_n_points()), source=0.0_GP)

        !$omp parallel default(none) &
        !$omp private(ki, l, ddx_apar, ddy_apar, ddt_apar, aux1_gp, eradx_stag, erady_stag) &
        !$omp shared(self, mesh_stag, equi_on_stag, opsinplane_stag, multistep_storage_stag, beta, dtau, apar)
        !$omp do
        do ki = 1, mesh_stag%get_n_points_inner()
            l = mesh_stag%inner_indices(ki)

            ! Electromagnetic energy
            ddx_apar = opsinplane_stag%ddx(apar%vals, l)
            ddy_apar = opsinplane_stag%ddy(apar%vals, l)
            self%eem(l) = 0.5_GP * beta * (ddx_apar**2 + ddy_apar**2)

            ! Flux of electromagnetic energy:
            ddt_apar   = ( multistep_storage_stag%vstore(l,1,3) - multistep_storage_stag%vstore(l,2,3)) / dtau
            eradx_stag = equi_on_stag%erad(l,1)
            erady_stag = equi_on_stag%erad(l,2)
            self%eem_flux(l) = beta * ddt_apar * (eradx_stag * ddx_apar + erady_stag * ddy_apar)

            ! Changerate of electromagnetic energy
            ddx_apar = opsinplane_stag%ddx(multistep_storage_stag%vstore(:,2,3), l)
            ddy_apar = opsinplane_stag%ddy(multistep_storage_stag%vstore(:,2,3), l)
            aux1_gp = 0.5_GP  * beta * (ddx_apar**2 + ddy_apar**2)
            self%eem_ddt(l) = (self%eem(l) - aux1_gp) / dtau

        enddo
        !$omp end do
        !$omp end parallel

        ! -----------------------------------------------------------------------------------------
        ! Terms related with gyroviscosity
        ! -----------------------------------------------------------------------------------------

        call gstress%drive(comm_handler, mesh_cano, mesh_stag, equi_on_stag, &
                           ne%vals, ti%vals, upar%vals, self%qpar_cond_i)

        allocate(self%gstressv(mesh_cano%get_n_points()), source=0.0_GP)
        call gstress%eval_gstress(mesh_cano, equi_on_cano, opsinplane_cano, map, &
                                  ne%vals, pot%vals, ti%vals, self%aparv_fluct_cano, &
                                  self%gstressv, &
                                  eval_flow_crv=.true., eval_flow_par=.true., &
                                  eval_heat_crv=.false., eval_heat_par=.false.)

        allocate(self%gstressv_par(mesh_cano%get_n_points()), source=0.0_GP)
        call gstress%eval_gstress(mesh_cano, equi_on_cano, opsinplane_cano, map, &
                                  ne%vals, pot%vals, ti%vals, self%aparv_fluct_cano, &
                                  self%gstressv_par, &
                                  eval_flow_crv=.false., eval_flow_par=.true., &
                                  eval_heat_crv=.false., eval_heat_par=.false.)

        ! -----------------------------------------------------------------------------------------
        ! Terms related with polarisation velocity
        ! -----------------------------------------------------------------------------------------

        allocate(self%ethi_upol_flux(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%etrans_upol_grad_pi(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ekinperp_upol_flux(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%ekinperp_jpol_poynt_flux(mesh_cano%get_n_points()), source=0.0_GP)

        allocate(aux1(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(aux2(mesh_cano%get_n_points()), source=0.0_GP)

        call gstress%eval_curvature(mesh_cano, equi_on_cano, opsinplane_cano, map, &
                                   ne%vals, pot%vals, ti%vals, self%aparv_fluct_cano, aux1_inner)

        call compute_div_jpol(comm_handler, equi, equi_on_cano, mesh_cano, mesh_stag, map, &
                              penalisation_cano, opsinplane_cano, boundaries, &
                              ne, ti, pot, self%upar_cano, self%aparv_fluct_cano, &
                              nev_prev=multistep_storage_cano%vstore(:,2,1), &
                              tiv_prev=multistep_storage_cano%vstore(:,2,3), &
                              potv_prev=multistep_storage_cano%vstore(:,2,4), &
                              div_jpol=aux2, &
                              co=ti%vals)

        !$omp parallel default(none) private(l) shared(mesh_cano, self, ne, aux1, aux2)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%ethi_upol_flux(l) = 2.5_GP * aux2(l)
            self%etrans_upol_grad_pi(l) = aux2(l)
            aux1(l) = 1.0_GP / ne%vals(l)
        enddo
        !$omp end do
        !$omp end parallel

        call compute_div_jpol(comm_handler, equi, equi_on_cano, mesh_cano, mesh_stag, map, &
                              penalisation_cano, opsinplane_cano, boundaries, &
                              ne, ti, pot, self%upar_cano, self%aparv_fluct_cano, &
                              nev_prev=multistep_storage_cano%vstore(:,2,1), &
                              tiv_prev=multistep_storage_cano%vstore(:,2,3), &
                              potv_prev=multistep_storage_cano%vstore(:,2,4), &
                              div_jpol=aux2, &
                              co=aux1)

        !$omp parallel default(none) private(l) shared(self, mesh_cano, ne, ti, aux2)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%etrans_upol_grad_pi(l) = self%etrans_upol_grad_pi(l) - ne%vals(l) * ti%vals(l) * aux2(l)
        enddo
        !$omp end do
        !$omp end parallel

        ! Flux of perpendicular kinetic energy due to polarisation velocity
        !$omp parallel default(none) private(l) shared(mesh_cano, self, ne, aux1)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            aux1(l) = self%ekin_perp(l) / ne%vals(l)
        enddo
        !$omp end do
        !$omp end parallel

        call compute_div_jpol(comm_handler, equi, equi_on_cano, mesh_cano, mesh_stag, map, &
                              penalisation_cano, opsinplane_cano, boundaries, &
                              ne, ti, pot, self%upar_cano, self%aparv_fluct_cano, &
                              nev_prev=multistep_storage_cano%vstore(:,2,1), &
                              tiv_prev=multistep_storage_cano%vstore(:,2,3), &
                              potv_prev=multistep_storage_cano%vstore(:,2,4), &
                              div_jpol=self%ekinperp_upol_flux, &
                              co=aux1)

        ! Poynting flux part related with polarisation velocity
        call compute_div_jpol(comm_handler, equi, equi_on_cano, mesh_cano, mesh_stag, map, &
                              penalisation_cano, opsinplane_cano, boundaries, &
                              ne, ti, pot, self%upar_cano, self%aparv_fluct_cano, &
                              nev_prev=multistep_storage_cano%vstore(:,2,1), &
                              tiv_prev=multistep_storage_cano%vstore(:,2,3), &
                              potv_prev=multistep_storage_cano%vstore(:,2,4), &
                              div_jpol=self%ekinperp_jpol_poynt_flux, &
                              co=pot%vals)

        deallocate(aux1)
        deallocate(aux2)

        ! -----------------------------------------------------------------------------------------
        ! Terms related with dissipation
        ! -----------------------------------------------------------------------------------------
        allocate(self%diss_ne(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_ne, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_ne, buffer_coeff_ne, pardiss_coeff_ne, &
                                 ne, is_stag=.false., dissv=self%diss_ne)

        allocate(self%diss_te(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_te, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_te, buffer_coeff_te, 0.0_GP, &
                                 te, is_stag=.false., dissv=self%diss_te)

        allocate(self%diss_pe(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_pe, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_pe, buffer_coeff_pe, 0.0_GP, &
                                 pe, is_stag=.false., dissv=self%diss_pe)

        allocate(self%diss_ti(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_ti, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_ti, buffer_coeff_ti, 0.0_GP, &
                                 ti, is_stag=.false., dissv=self%diss_ti)

        allocate(self%diss_pi(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_pi, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_pi, buffer_coeff_pi, 0.0_GP, &
                                 pi, is_stag=.false., dissv=self%diss_pi)

        allocate(self%diss_upar(mesh_stag%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_stag, mesh_cano, map, opsinplane_stag, &
                                 perpdiss_order_upar, cf_diss_stag, cf_buffer_stag, &
                                 perpdiss_coeff_upar, buffer_coeff_upar, 0.0_GP, &
                                 upar, is_stag=.true., dissv=self%diss_upar)

        allocate(self%diss_apar(mesh_stag%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_stag, mesh_cano, map, opsinplane_stag, &
                                 perpdiss_order_ohm, cf_diss_stag, cf_buffer_stag, &
                                 perpdiss_coeff_ohm, buffer_coeff_ohm, pardiss_coeff_ohm, &
                                 apar, is_stag=.true., dissv=self%diss_apar)

        allocate(self%diss_vort(mesh_cano%get_n_points()), source=0.0_GP)
        call compute_dissipation(comm_handler, mesh_cano, mesh_stag, map, opsinplane_cano, &
                                 perpdiss_order_vort, cf_diss_cano, cf_buffer_cano, &
                                 perpdiss_coeff_vort, buffer_coeff_vort, pardiss_coeff_vort, &
                                 vort, is_stag=.false., dissv=self%diss_vort)


        ! -------------------------------------------------------------------------------
        ! Power sources due to external sources, neutrals and impurities

        allocate(self%p_heat_e(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_heat_i(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_rad_iz(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_rad_rec(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_rad_imp(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_neut_e(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_neut_i(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_ext_perp(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_neut_perp(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_ext_par(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_neut_par_cano(mesh_cano%get_n_points()), source=0.0_GP)
        allocate(self%p_neut_par_stag(mesh_stag%get_n_points()), source=0.0_GP)
        
        !$omp parallel default(none) private(l) &
        !$omp          shared(self, equi, mesh_cano, mesh_stag, ne, te, ti, pot, &
        !$omp                 neutrals_dens, l_imp, impy_concentration, upar)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            self%p_heat_e(l) = 3.0_GP/2.0_GP * (  self%src_ext_ne(l) * te%vals(l) &
                                                + self%src_ext_te(l) * ne%vals(l) )
            self%p_heat_i(l) = 3.0_GP/2.0_GP * (  self%src_ext_ne(l) * ti%vals(l) &
                                                + self%src_ext_ti(l) * ne%vals(l) )
            self%p_rad_iz(l) = - self%neut_w_iz(l) * neutrals_dens%vals(l) * ne%vals(l)
            self%p_rad_rec(l) = - self%neut_w_rec(l) * ne%vals(l)**2

            if (impy_concentration > GP_EPS) then
                self%p_rad_imp(l) = l_imp%eval(te%vals, l) * ne%vals(l)**2 * impy_concentration
            endif
            self%p_neut_e(l) = 3.0_GP/2.0_GP * ( te%vals(l) * (self%src_iz_ne(l) + self%src_rec_ne(l)) &
                                               + ne%vals(l) * self%src_neut_te(l) )
            self%p_neut_i(l) = 3.0_GP/2.0_GP * ( ti%vals(l) * (self%src_iz_ne(l) + self%src_rec_ne(l)) &
                                               + ne%vals(l) * self%src_neut_ti(l) )

            self%p_ext_perp(l) = self%ekin_perp(l) / ne%vals(l) * self%src_ext_ne(l)
            self%p_neut_perp(l) = self%ekin_perp(l) / ne%vals(l) * (self%src_iz_ne(l) + self%src_rec_ne(l)) &
                                  + pot%vals(l) * self%src_neut_vort(l)
            
            self%p_ext_par(l) = 0.5_GP * self%upar_cano(l)**2 * self%src_ext_ne(l)
            ! NOTE: p_neut_par is split in cano and stag component and added together later
            self%p_neut_par_cano(l) = 0.5_GP * self%upar_cano(l)**2 * (self%src_iz_ne(l) + self%src_rec_ne(l))
        enddo
        !$omp end do
        !$omp do
        do l = 1, mesh_stag%get_n_points()
            self%p_neut_par_stag(l) = self%ne_stag(l) * upar%vals(l) * self%src_neut_upar(l)
        enddo
        !$omp end do
        !$omp end parallel

        call perf_stop('../compute_diagnostics_packet')

    end subroutine

    subroutine destructor(self)
        ! Destructor
        type(diagnostics_packet_t), intent(inout) :: self
        !! Instance of type

        deallocate(self%neut_k_iz)
        deallocate(self%neut_k_rec)
        deallocate(self%neut_k_cx)
        deallocate(self%neut_w_iz)
        deallocate(self%neut_w_rec)

        deallocate(self%ne_stag)
        deallocate(self%te_stag)
        deallocate(self%ti_stag)
        deallocate(self%pot_stag)
        deallocate(self%neutrals_dens_stag)
        deallocate(self%upar_cano)
        deallocate(self%neutrals_parmom_cano)
        deallocate(self%nupar_cano)
        deallocate(self%nvpar_cano)
        deallocate(self%jpar_cano)
        deallocate(self%aparv_fluct_cano)

        deallocate(self%radgrad_ne)
        deallocate(self%radgrad_te)
        deallocate(self%radgrad_ti)
        deallocate(self%v_exb_rad)
        deallocate(self%v_dia_e_rad)
        deallocate(self%v_dia_i_rad)
        deallocate(self%b_1_rad)

        deallocate(self%parflux_ne_fwd)
        deallocate(self%parflux_ne_bwd)
        deallocate(self%parflux_ne_fwd_weighted)
        deallocate(self%parflux_ne_bwd_weighted)
        deallocate(self%parflux_neutrals_fwd)
        deallocate(self%parflux_neutrals_bwd)
        deallocate(self%parflux_neutralsdiss_fwd)
        deallocate(self%parflux_neutralsdiss_bwd)

        deallocate(self%perpflux_ne_exb)
        deallocate(self%perpflux_ne_dia)
        deallocate(self%perpflux_neutrals_outer)
        deallocate(self%perpflux_neutrals_volint)

        deallocate(self%src_ext_ne)
        deallocate(self%src_ext_te)
        deallocate(self%src_ext_ti)

        deallocate(self%src_iz_ne)
        deallocate(self%src_rec_ne)
        deallocate(self%src_neut_te)
        deallocate(self%src_neut_ti)
        deallocate(self%src_neut_upar)
        deallocate(self%src_neut_vort)
        deallocate(self%src_neut_pressure)
        deallocate(self%src_neut_rcy)

        deallocate(self%src_floor_ne)
        deallocate(self%src_floor_neutrals_dens)

        deallocate(self%p_heat_e)
        deallocate(self%p_heat_i)
        deallocate(self%p_rad_iz)
        deallocate(self%p_rad_rec)
        deallocate(self%p_rad_imp)
        deallocate(self%p_neut_e)
        deallocate(self%p_neut_i)
        deallocate(self%p_ext_perp)
        deallocate(self%p_neut_perp)
        deallocate(self%p_ext_par)
        deallocate(self%p_neut_par_cano)
        deallocate(self%p_neut_par_stag)

        deallocate(self%eth)

        deallocate(self%ddt_eth)
        deallocate(self%ddt_ne)
        deallocate(self%ddt_te)
        deallocate(self%ddt_ti)
        deallocate(self%ddt_neutrals_dens)

        deallocate(self%qpar_cond_e)
        deallocate(self%qpar_cond_i)
        deallocate(self%qpar_conv_e)
        deallocate(self%qpar_conv_i)

        deallocate(self%cont_radflux_exb)
        deallocate(self%cont_radflux_dia)
        deallocate(self%cont_radflux_visc)
        deallocate(self%cont_radflux_mag)

        deallocate(self%cont_exb_advection)
        deallocate(self%cont_flutter)
        deallocate(self%cont_div_nupar)
        deallocate(self%cont_curv_pot)
        deallocate(self%cont_curv_pe)
        deallocate(self%cont_hypvisc)

        deallocate(self%vort_curv_pi)
        deallocate(self%vort_div_jpar)
        deallocate(self%vort_hypvisc)
        deallocate(self%vort_flutter)

        deallocate(self%ethe_ddt)
        deallocate(self%ethe_src)
        deallocate(self%ethe_radflux_exb)
        deallocate(self%ethe_radflux_dia)
        deallocate(self%ethe_radflux_dia_real)
        deallocate(self%ethe_pe_div_exb)
        deallocate(self%ethe_vpar_gradpar_pe)
        deallocate(self%ethe_eta_jpar2)
        deallocate(self%ethe_jpar_gradpar_te)
        deallocate(self%ethe_equipartition)
        deallocate(self%ethe_hypvisc)
        deallocate(self%ethe_buffer)
        deallocate(self%ethe_radflux_mag_qnormal)
        deallocate(self%ethe_radflux_mag_qtotal)
        deallocate(self%ethe_radflux_visc)

        deallocate(self%ethi_radflux_exb)
        deallocate(self%ethi_radflux_dia)
        deallocate(self%ethi_radflux_dia_real)
        deallocate(self%ethi_radflux_mag_adv)
        deallocate(self%ethi_radflux_mag_qnormal)
        deallocate(self%ethi_radflux_mag_qtotal)
        deallocate(self%ethi_radflux_visc)
        deallocate(self%ethi_pi_div_exb)
        deallocate(self%ethi_upar_gradpar_pi)
        deallocate(self%etrans_upar_gradpar_pe)
        deallocate(self%etrans_jpar_gradpar_pot)

        deallocate(self%ekin_perp)
        deallocate(self%ekin_perp_ddt)

        deallocate(self%ekin_par)
        deallocate(self%ekin_par_ddt)

        deallocate(self%eem)
        deallocate(self%eem_ddt)
        deallocate(self%eem_flux)

        deallocate(self%ethi_upol_flux)
        deallocate(self%etrans_upol_grad_pi)
        deallocate(self%ekinperp_upol_flux)
        deallocate(self%ekinperp_jpol_poynt_flux)

        deallocate(self%diss_ne)
        deallocate(self%diss_te)
        deallocate(self%diss_pe)
        deallocate(self%diss_ti)
        deallocate(self%diss_pi)
        deallocate(self%diss_upar)
        deallocate(self%diss_apar)
        deallocate(self%diss_vort)

        deallocate(self%gstressv)
        deallocate(self%gstressv_par)

    end subroutine

    subroutine compute_div_jpol(comm_handler, equi, equi_storage, mesh_cano, mesh_stag, map, &
                                penalisation, opsinplane, boundaries, &
                                ne, ti, pot, uparv_cano, aparv_fluct_cano, &
                                nev_prev, tiv_prev, potv_prev, div_jpol, co, &
                                gstress, crv_gstress)
        !! Computes divergence of polarisation velocity, i.e.
        !! div_jpol = div(co * ne * upol), where
        !! upol = -1/Btor^2 * [d/dt + (v_ExB + upar* b) * nabla] * [\nabla_\perp pot + 1/n * \nabla_\perp pi]
        type(comm_handler_t), intent(in) :: comm_handler
        !! MPI communicators
        class(equilibrium_t), intent(in) :: equi
        !! Equilibrium
        class(equilibrium_storage_t), intent(in) :: equi_storage
        !! Equilibrium storage
        type(mesh_cart_t), intent(in) :: mesh_cano
        !! Mesh on canonical grid
        type(mesh_cart_t), intent(in) :: mesh_stag
        !! Mesh on staggered grid
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(inplane_operators_t), intent(inout) :: opsinplane
        !! Inplane operators
        type(penalisation_t), intent(in) :: penalisation
        !! Penalisation
        type(boundaries_braginskii_t), intent(in) :: boundaries
        !! Boundary information for the Braginskii model
        type(variable_t), intent(in) :: ne
        !! Density
        type(variable_t), intent(in) :: ti
        !! Ion temperature
        type(variable_t), intent(in) :: pot
        !! Electrostatic potential
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: uparv_cano
         !! Values of parallel velocity on canonical mesh
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: aparv_fluct_cano
        !! Values of fluctuating parallel electromagnetic potential on canonical mesh
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: nev_prev
        !! Values of density at previous timestep
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: tiv_prev
        !! Values of ion density at previous timestep
        real(GP), dimension(mesh_cano%get_n_points()), intent(in) :: potv_prev
        !! Values of electrostatic potential at previous timestep
        real(GP), dimension(mesh_cano%get_n_points()), intent(out) :: div_jpol
        !! Divergence of (co times) polarisation current
        real(GP), dimension(mesh_cano%get_n_points()), intent(in), optional :: co
        !! Coefficient under divergence, default = 1
        real(GP), dimension(mesh_cano%get_n_points()), intent(in), optional :: gstress
        !! Ion viscous stress function, if not present only the nonviscous part is computed
        real(GP), dimension(mesh_cano%get_n_points_inner()), intent(in), optional :: crv_gstress
        !! Curvature of ion viscous stress function

        integer :: l, ki
        real(GP) :: phi, x, y
        real(GP), dimension(mesh_cano%get_n_points()) :: cov, vortv, vortv_prev, cov_flutter, &
                                                    aparv_fluct_cano_over_btor
        type(variable_t) :: logne

        phi = mesh_cano%get_phi()

        ! Determine coefficient under divergence
        if (present(co)) then
            !$omp parallel default(none) private(l, x, y) &
            !$omp shared(equi, equi_storage, mesh_cano, phi, ne, co, cov)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                cov(l) = co(l) * ne%vals(l) * equi%jacobian(x, y, phi) / (equi_storage%absb(l)**2)
            enddo
            !$omp end do
            !$omp end parallel
        else
            !$omp parallel default(none) private(l, x, y) &
            !$omp shared(equi, equi_storage, mesh_cano, phi, ne, cov)
            !$omp do
            do l = 1, mesh_cano%get_n_points()
                x = mesh_cano%get_x(l)
                y = mesh_cano%get_y(l)
                cov(l) = ne%vals(l) * equi%jacobian(x, y, phi) / (equi_storage%absb(l)**2)
            enddo
            !$omp end do
            !$omp end parallel
        endif
        !$omp parallel default(none) private(l) &
        !$omp shared(mesh_cano, equi_storage, cov, uparv_cano, aparv_fluct_cano, &
        !$omp        aparv_fluct_cano_over_btor, cov_flutter)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            cov_flutter(l) = cov(l) * uparv_cano(l) * equi_storage%btor(l)
            aparv_fluct_cano_over_btor(l) = aparv_fluct_cano(l) / equi_storage%btor(l)
        enddo
        !$omp end do
        !$omp end parallel

        ! Compute temporal derivative in polarisation
        call compute_vorticity(equi, mesh_cano, boundaries, cov, pot%vals, ne%vals, ti%vals, vortv)
        call compute_vorticity(equi, mesh_cano, boundaries, cov, potv_prev, nev_prev, tiv_prev, vortv_prev)

        div_jpol = 0.0_GP

        !$omp parallel default(none) private(ki, l, x, y) &
        !$omp shared(mesh_cano, equi, equi_storage, opsinplane, phi, dtau, delta, beta, &
        !$omp        div_jpol, vortv, vortv_prev, cov, cov_flutter, &
        !$omp        pot, ti, ne, uparv_cano, aparv_fluct_cano)
        !$omp do
        do ki = 1, mesh_cano%get_n_points_inner()
            l = mesh_cano%inner_indices(ki)
            x = mesh_cano%get_x(l)
            y = mesh_cano%get_y(l)
            ! Compute temporal derivative, ExB and flutter advection of polarisation
            div_jpol(l) = - (vortv(l) - vortv_prev(l) ) / dtau &
                + delta / equi%jacobian(x, y, phi) * &
                    ( opsinplane%polarisation_advection( &
                            1.0_GP, cov, pot%vals, pot%vals, ti%vals, ne%vals, l) &
                       - beta * opsinplane%polarisation_advection( &
                            1.0_GP, cov_flutter, aparv_fluct_cano, pot%vals, ti%vals, ne%vals, l) &
                    )
        enddo
        !$omp end do
        !$omp end parallel

        ! Compute parallel advection in polarisation
        call logne%init(comm_handler, mesh_cano%get_n_points(), staggered=.false.)
        call logne%create_halos(comm_handler, 1, 1)
        !$omp parallel default(none) private(l) shared(mesh_cano, ne, logne)
        !$omp do
        do l = 1, mesh_cano%get_n_points()
            logne%vals(l) = log(ne%vals(l))
        enddo
        !$omp end do
        !$omp end parallel
        call logne%start_comm_halos(comm_handler)
        call logne%finalize_comm_halos(comm_handler)

        call parallel_polarisation_advection(comm_handler, equi, mesh_cano, mesh_stag, map, penalisation, &
                                             boundaries, cov, &
                                             pot, ti, ne, logne, uparv_cano, div_jpol)

        ! Compute gyroviscous contribution
        if (present(gstress)) then
            if (present(co)) then
                cov = co
            else
                cov = 1.0_GP
            endif
            !$omp parallel default(none) private(ki, l) &
            !$omp shared(mesh_cano, opsinplane, equi_storage, delta, cov, gstress, crv_gstress, div_jpol)
            !$omp do
            do ki = 1, mesh_cano%get_n_points_inner()
                l = mesh_cano%inner_indices(ki)

                div_jpol(l) = div_jpol(l) - cov(l) / 6.0_GP * crv_gstress(ki) &
                              - 0.5_GP * gstress(l) * opsinplane%curvature(cov, l) &
                              - delta / (3.0_GP * equi_storage%btor(l)) &
                                * opsinplane%arakawa(cov, gstress, l)
            enddo
            !$omp end do
            !$omp end parallel
        endif

    end subroutine

    subroutine compute_dissipation(comm_handler, mesh_var, mesh_dual, map, opsinplane, &
                                   order_diss_perp, cf_diss_perp, cf_buffer, &
                                   nu_perpdiss, nu_buffer, nu_pardiss, &
                                   var, is_stag, dissv)
        !! Computes dissipation on variable var.
        !! The dissipation includes perpendicular (hyper)-dissipation, dissipation in buffer zones
        !! and parallel dissipation
        type(comm_handler_t), intent(in) :: comm_handler
        !! MPI communicators
        type(mesh_cart_t), intent(in) :: mesh_var
        !! Mesh on which var is defined
        type(mesh_cart_t), intent(in) :: mesh_dual
        !! Dual mesh to mesh_var 
        type(parallel_map_t), intent(in) :: map
        !! Parallel map
        type(inplane_operators_t), intent(inout) :: opsinplane
        !! Inplane operators
        integer, intent(in) :: order_diss_perp
        !! Order of perpendicular dissipation operator
        real(GP), dimension(mesh_var%get_n_points()), intent(in) :: cf_diss_perp
        !! Envelope function for perpendicular dissipation
        real(GP), dimension(mesh_var%get_n_points()), intent(in) :: cf_buffer
        !! Envelope function for buffer diffusion
        real(GP), intent(in) :: nu_perpdiss
        !! Perpendicular (hyper)-viscosity coefficent
        real(GP), intent(in) :: nu_buffer
        !! Buffer diffusion coefficient
        real(GP), intent(in) :: nu_pardiss
        !! Parallel diffusion coefficient
        type(variable_t), intent(in) :: var
        !! Variable that dissipation shall be computed for
        !! Note: MPI communication over planes must already have been performed
        !! for computation of parallel dissipation
        logical, intent(in) :: is_stag
        !! If true variable is located on the staggered mesh
        real(GP), dimension(mesh_var%get_n_points()), intent(out) :: dissv
        !! Dissipation values

        integer :: ki, l
        real(GP), dimension(mesh_var%get_n_points_inner()) :: wrk
        type(variable_t) :: varaux

        ! Compute parallel dissipation
        if (is_stag) then
            call varaux%init(comm_handler, mesh_dual%get_n_points(), staggered=.false.)
            call varaux%create_halos(comm_handler, 1, 0)
            !$omp parallel default(none) shared(map, var, varaux)
            call map%par_divb(var%vals, var%hbwd, varaux%vals)
            !$omp end parallel
            call varaux%start_comm_halos(comm_handler)
            call varaux%finalize_comm_halos(comm_handler)
            !$omp parallel default(none) shared(map, varaux, dissv)
            call map%par_grad(varaux%vals, varaux%hfwd, dissv)
            !$omp end parallel
        else
            call varaux%init(comm_handler, mesh_dual%get_n_points(), staggered=.true.)
            call varaux%create_halos(comm_handler, 0, 1)
            !$omp parallel default(none) shared(map, var, varaux)
            call map%par_grad(var%vals, var%hfwd, varaux%vals)
            !$omp end parallel
            call varaux%start_comm_halos(comm_handler)
            call varaux%finalize_comm_halos(comm_handler)
            !$omp parallel default(none) shared(map, varaux, dissv)
            call map%par_divb(varaux%vals, varaux%hbwd, dissv)
            !$omp end parallel
        endif

        !$omp parallel default(none) private(ki, l) &
        !$omp shared(mesh_var, opsinplane, order_diss_perp, cf_diss_perp, nu_perpdiss, &
        !$omp        cf_buffer, nu_buffer, nu_pardiss, var, wrk, dissv)

        ! Compute perpendicualr dissipation
        call opsinplane%apply_dissipation_inplane(var%vals, order_diss_perp, wrk, cf_diss_perp)

        ! Combine all dissipations including buffer
        !$omp do
        do ki = 1, mesh_var%get_n_points_inner()
            l = mesh_var%inner_indices(ki)
            dissv(l) = nu_pardiss * dissv(l) + nu_perpdiss * wrk(ki) &
                       + nu_buffer * opsinplane%nabla_pol_sqrd(var%vals, l, cf_buffer)
        enddo
        !$omp end do
        !$omp end parallel

    end subroutine

    subroutine compute_dissipative_radflux(mesh, equi_storage, opsinplane, bnd_descrs_nmn, &
                                           order_diss_perp, cf_diss_perp, cf_buffer, &
                                           nu_perpdiss, nu_buffer, &
                                           var, diss_flux)
        !! Computes radial flux due to dissipation and buffer zones on variable,
        !! including perpendicular (hyper)-dissipation and dissipation in buffer zones

        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh on canonical grid
        type(equilibrium_storage_t) :: equi_storage
        !! Equilibrium storage
        type(inplane_operators_t), intent(inout) :: opsinplane
        !! Inplane operators
        integer, dimension(mesh%get_n_points_boundary()), intent(in) :: bnd_descrs_nmn
        !! Boundary descriptor for Neumann boundary conditions everywhere
        integer, intent(in) :: order_diss_perp
        !! Order of perpendicular dissipation operator
        real(GP), dimension(mesh%get_n_points()), intent(in) :: cf_diss_perp
        !! Envelope function for perpendicular dissipation
        real(GP), dimension(mesh%get_n_points()), intent(in) :: cf_buffer
        !! Envelope function for buffer diffusion
        real(GP), intent(in) :: nu_perpdiss
        !! Perpendicular (hyper)-viscosity coefficent
        real(GP), intent(in) :: nu_buffer
        !! Buffer diffusion coefficient
        type(variable_t), intent(in) :: var
        !! Variable that dissipation shall be computed for
        real(GP), dimension(mesh%get_n_points()), intent(out) :: diss_flux
        !! Dissipative radial flux

        integer :: l, ki, order_diss_perp_prev
        real(GP) :: eradx, erady, dvar_drad, dhypviscprev_drad
        real(GP), dimension(mesh%get_n_points()) :: hypvisc_prev
        real(GP), dimension(mesh%get_n_points_inner()) :: wrk

        ! Compute hyperdiffusion of previous order (one order less)
        order_diss_perp_prev = order_diss_perp - 1
        if ( order_diss_perp_prev == 0 ) then
            hypvisc_prev = var%vals
        else
            !$omp parallel default(none) private(l, ki) &
            !$omp          shared(mesh, opsinplane, bnd_descrs_nmn, order_diss_perp_prev, cf_diss_perp, &
            !$omp                 var, wrk, hypvisc_prev)
            call opsinplane%apply_dissipation_inplane( &
                                var%vals, &
                                order_diss_perp_prev, &
                                wrk, cf_diss_perp)
            !$omp do
            do ki = 1, mesh%get_n_points_inner()
                l = mesh%inner_indices(ki)
                hypvisc_prev(l) = wrk(ki)
            enddo
            !$omp end do
            call set_perpbnds(mesh, bnd_descrs_nmn, hypvisc_prev)
            call extrapolate_ghost_points(mesh, hypvisc_prev)
            !$omp end parallel
        endif

        !$omp parallel default(none) &
        !$omp private(ki, l, eradx, erady, dvar_drad, dhypviscprev_drad) &
        !$omp shared(mesh, opsinplane, equi_storage, bnd_descrs_nmn, delta, &
        !$omp        nu_perpdiss, nu_buffer, cf_diss_perp, cf_buffer, var, hypvisc_prev, diss_flux)
        !$omp do
        do ki = 1, mesh%get_n_points_inner()
            l = mesh%inner_indices(ki)
            eradx   = equi_storage%erad(l, 1)
            erady   = equi_storage%erad(l, 2)

            dvar_drad = eradx * opsinplane%ddx(var%vals, l) + erady * opsinplane%ddy(var%vals, l)
            dhypviscprev_drad = eradx * opsinplane%ddx(hypvisc_prev, l) + erady * opsinplane%ddy(hypvisc_prev, l)

            diss_flux(l) = (nu_perpdiss * cf_diss_perp(l) * dhypviscprev_drad + &
                            nu_buffer * cf_buffer(l) * dvar_drad) / delta
        enddo
        !$omp end do

        call set_perpbnds(mesh, bnd_descrs_nmn, diss_flux)
        call extrapolate_ghost_points(mesh, diss_flux)

        !$omp end parallel

    end subroutine

end module