33 const DiffusionType difftype,
const amrex::Real dt,
bool mesh_mapping)
35 amrex::Real factor = 0.0_rt;
50 amrex::Abort(
"Invalid diffusion type");
55 auto fstate = std::is_same<Scheme, fvm::Godunov>::value
59 const int nlevels =
fields.repo.num_active_levels();
62 auto& field =
fields.field;
63 if (field.in_uniform_space() && mesh_mapping) {
64 field.to_stretched_space();
67 if (field_old.in_uniform_space() && mesh_mapping) {
68 field_old.to_stretched_space();
73 auto& src_term =
fields.src_term;
74 auto& diff_term =
fields.diff_term.state(fstate);
75 auto& conv_term =
fields.conv_term.state(fstate);
76 auto& mask_cell =
fields.repo.get_int_field(
"mask_cell");
77 Field const* mesh_detJ =
81 for (
int lev = 0; lev < nlevels; ++lev) {
82 const auto& fld_arrs = field(lev).arrays();
83 const auto& fld_o_arrs = field_old(lev).const_arrays();
84 const auto& rho_o_arrs = den_old(lev).const_arrays();
85 const auto& rho_arrs = den_new(lev).const_arrays();
86 const auto& src_arrs = src_term(lev).const_arrays();
87 const auto& diff_arrs = diff_term(lev).const_arrays();
88 const auto& ddt_o_arrs = conv_term(lev).const_arrays();
89 const auto& imask_arrs = mask_cell(lev).const_arrays();
90 const auto& detJ_arrs =
91 mesh_mapping ? ((*mesh_detJ)(lev).const_arrays())
92 : amrex::MultiArray4<amrex::Real const>();
94 if (PDE::multiply_rho) {
98 field(lev), amrex::IntVect(0), PDE::ndim,
100 int nbx,
int i,
int j,
int k,
int n)
noexcept {
102 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
104 fld_arrs[nbx](i, j, k, n) =
105 rho_o_arrs[nbx](i, j, k) * det_j *
106 fld_o_arrs[nbx](i, j, k, n) +
107 static_cast<amrex::Real
>(imask_arrs[nbx](i, j, k)) *
109 (ddt_o_arrs[nbx](i, j, k, n) +
110 det_j * src_arrs[nbx](i, j, k, n) +
111 factor * diff_arrs[nbx](i, j, k, n));
113 fld_arrs[nbx](i, j, k, n) /= rho_arrs[nbx](i, j, k);
116 fld_arrs[nbx](i, j, k, n) /= det_j;
121 field(lev), amrex::IntVect(0), PDE::ndim,
122 [=] AMREX_GPU_DEVICE(
123 int nbx,
int i,
int j,
int k,
int n)
noexcept {
125 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
127 fld_arrs[nbx](i, j, k, n) =
128 det_j * fld_o_arrs[nbx](i, j, k, n) +
129 static_cast<amrex::Real
>(imask_arrs[nbx](i, j, k)) *
131 (ddt_o_arrs[nbx](i, j, k, n) +
132 det_j * src_arrs[nbx](i, j, k, n) +
133 factor * diff_arrs[nbx](i, j, k, n));
136 fld_arrs[nbx](i, j, k, n) /= det_j;
141 amrex::Gpu::streamSynchronize();
151 const DiffusionType difftype,
const amrex::Real dt,
bool mesh_mapping)
153 amrex::Real ofac = 0.0_rt;
154 amrex::Real nfac = 0.0_rt;
172 amrex::Abort(
"Invalid diffusion type");
175 const int nlevels =
fields.repo.num_active_levels();
178 auto& field =
fields.field;
179 if (field.in_uniform_space() && mesh_mapping) {
180 field.to_stretched_space();
183 if (field_old.in_uniform_space() && mesh_mapping) {
184 field_old.to_stretched_space();
189 auto& src_term =
fields.src_term;
190 auto& diff_term =
fields.diff_term;
191 auto& conv_term =
fields.conv_term;
194 auto& mask_cell =
fields.repo.get_int_field(
"mask_cell");
195 Field const* mesh_detJ =
199 for (
int lev = 0; lev < nlevels; ++lev) {
200 const auto& fld_arrs = field(lev).arrays();
201 const auto& fld_o_arrs = field_old(lev).const_arrays();
202 const auto& rho_o_arrs = den_old(lev).const_arrays();
203 const auto& rho_arrs = den_new(lev).const_arrays();
204 const auto& src_arrs = src_term(lev).const_arrays();
205 const auto& diff_arrs = diff_term(lev).const_arrays();
206 const auto& ddt_arrs = conv_term(lev).const_arrays();
207 const auto& diff_o_arrs = diff_term_old(lev).const_arrays();
208 const auto& ddt_o_arrs = conv_term_old(lev).const_arrays();
209 const auto& imask_arrs = mask_cell(lev).const_arrays();
210 const auto& detJ_arrs =
211 mesh_mapping ? ((*mesh_detJ)(lev).const_arrays())
212 : amrex::MultiArray4<amrex::Real const>();
214 if (PDE::multiply_rho) {
218 field(lev), amrex::IntVect(0), PDE::ndim,
219 [=] AMREX_GPU_DEVICE(
220 int nbx,
int i,
int j,
int k,
int n)
noexcept {
222 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
224 fld_arrs[nbx](i, j, k, n) =
225 rho_o_arrs[nbx](i, j, k) * det_j *
226 fld_o_arrs[nbx](i, j, k, n) +
227 static_cast<amrex::Real
>(imask_arrs[nbx](i, j, k)) *
229 (0.5_rt * (ddt_o_arrs[nbx](i, j, k, n) +
230 ddt_arrs[nbx](i, j, k, n)) +
231 ofac * diff_o_arrs[nbx](i, j, k, n) +
232 nfac * diff_arrs[nbx](i, j, k, n) +
233 det_j * src_arrs[nbx](i, j, k, n));
235 fld_arrs[nbx](i, j, k, n) /= rho_arrs[nbx](i, j, k);
238 fld_arrs[nbx](i, j, k, n) /= det_j;
243 field(lev), amrex::IntVect(0), PDE::ndim,
244 [=] AMREX_GPU_DEVICE(
245 int nbx,
int i,
int j,
int k,
int n)
noexcept {
247 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
249 fld_arrs[nbx](i, j, k, n) =
250 det_j * fld_o_arrs[nbx](i, j, k, n) +
251 static_cast<amrex::Real
>(imask_arrs[nbx](i, j, k)) *
253 (0.5_rt * (ddt_o_arrs[nbx](i, j, k, n) +
254 ddt_arrs[nbx](i, j, k, n)) +
255 ofac * diff_o_arrs[nbx](i, j, k, n) +
256 nfac * diff_arrs[nbx](i, j, k, n) +
257 det_j * src_arrs[nbx](i, j, k, n));
260 fld_arrs[nbx](i, j, k, n) /= det_j;
265 amrex::Gpu::streamSynchronize();
276 const auto& d_divtau =
fields.diff_term;
279 const auto& repo =
fields.repo;
280 const auto& mask_cell = repo.get_int_field(
"mask_cell");
282 const int nlevels = repo.num_active_levels();
283 for (
int lev = 0; lev < nlevels; ++lev) {
284 auto f_arrs = dof(lev).arrays();
285 const auto& d_diff_arrs = d_divtau(lev).const_arrays();
286 const auto& mask_arrs = mask_cell(lev).const_arrays();
287 const auto& rho_arrs =
density(lev).const_arrays();
290 dof(lev), amrex::IntVect(0), dof.num_comp(),
291 [=] AMREX_GPU_DEVICE(
292 int nbx,
int i,
int j,
int k,
int n)
noexcept {
294 0.5_rt * dt * (amrex::Real)mask_arrs[nbx](i, j, k);
295 if (PDE::multiply_rho) {
296 factor /= rho_arrs[nbx](i, j, k);
298 f_arrs[nbx](i, j, k, n) +=
299 factor * d_diff_arrs[nbx](i, j, k, n);