33 const DiffusionType difftype,
const amrex::Real dt,
bool mesh_mapping)
35 amrex::Real factor = 0.0_rt;
50 amrex::Abort(
"Invalid diffusion type");
58 const int nlevels =
fields.repo.num_active_levels();
61 auto& field =
fields.field;
62 if (field.in_uniform_space() && mesh_mapping) {
63 field.to_stretched_space();
66 if (field_old.in_uniform_space() && mesh_mapping) {
67 field_old.to_stretched_space();
72 auto& src_term =
fields.src_term;
73 auto& diff_term =
fields.diff_term.state(fstate);
74 auto& conv_term =
fields.conv_term.state(fstate);
75 auto& mask_cell =
fields.repo.get_int_field(
"mask_cell");
76 Field const* mesh_detJ =
80 for (
int lev = 0; lev < nlevels; ++lev) {
81 const auto& fld_arrs = field(lev).arrays();
82 const auto& fld_o_arrs = field_old(lev).const_arrays();
83 const auto& rho_o_arrs = den_old(lev).const_arrays();
84 const auto& rho_arrs = den_new(lev).const_arrays();
85 const auto& src_arrs = src_term(lev).const_arrays();
86 const auto& diff_arrs = diff_term(lev).const_arrays();
87 const auto& ddt_o_arrs = conv_term(lev).const_arrays();
88 const auto& imask_arrs = mask_cell(lev).const_arrays();
89 const auto& detJ_arrs =
90 mesh_mapping ? ((*mesh_detJ)(lev).const_arrays())
91 : amrex::MultiArray4<amrex::Real const>();
93 if (PDE::multiply_rho) {
97 field(lev), amrex::IntVect(0), PDE::ndim,
98 [=] AMREX_GPU_DEVICE(
int nbx,
int i,
int j,
int k,
int n) {
100 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
102 fld_arrs[nbx](i, j, k, n) =
103 (rho_o_arrs[nbx](i, j, k) * det_j *
104 fld_o_arrs[nbx](i, j, k, n)) +
105 (
static_cast<amrex::Real
>(
106 imask_arrs[nbx](i, j, k)) *
108 (ddt_o_arrs[nbx](i, j, k, n) +
109 det_j * src_arrs[nbx](i, j, k, n) +
110 factor * diff_arrs[nbx](i, j, k, n)));
112 fld_arrs[nbx](i, j, k, n) /= rho_arrs[nbx](i, j, k);
115 fld_arrs[nbx](i, j, k, n) /= det_j;
120 field(lev), amrex::IntVect(0), PDE::ndim,
121 [=] AMREX_GPU_DEVICE(
int nbx,
int i,
int j,
int k,
int n) {
123 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
125 fld_arrs[nbx](i, j, k, n) =
126 (det_j * fld_o_arrs[nbx](i, j, k, n)) +
127 (
static_cast<amrex::Real
>(
128 imask_arrs[nbx](i, j, k)) *
130 (ddt_o_arrs[nbx](i, j, k, n) +
131 det_j * src_arrs[nbx](i, j, k, n) +
132 factor * diff_arrs[nbx](i, j, k, n)));
135 fld_arrs[nbx](i, j, k, n) /= det_j;
140 amrex::Gpu::streamSynchronize();
150 const DiffusionType difftype,
const amrex::Real dt,
bool mesh_mapping)
152 amrex::Real ofac = 0.0_rt;
153 amrex::Real nfac = 0.0_rt;
171 amrex::Abort(
"Invalid diffusion type");
174 const int nlevels =
fields.repo.num_active_levels();
177 auto& field =
fields.field;
178 if (field.in_uniform_space() && mesh_mapping) {
179 field.to_stretched_space();
182 if (field_old.in_uniform_space() && mesh_mapping) {
183 field_old.to_stretched_space();
188 auto& src_term =
fields.src_term;
189 auto& diff_term =
fields.diff_term;
190 auto& conv_term =
fields.conv_term;
193 auto& mask_cell =
fields.repo.get_int_field(
"mask_cell");
194 Field const* mesh_detJ =
198 for (
int lev = 0; lev < nlevels; ++lev) {
199 const auto& fld_arrs = field(lev).arrays();
200 const auto& fld_o_arrs = field_old(lev).const_arrays();
201 const auto& rho_o_arrs = den_old(lev).const_arrays();
202 const auto& rho_arrs = den_new(lev).const_arrays();
203 const auto& src_arrs = src_term(lev).const_arrays();
204 const auto& diff_arrs = diff_term(lev).const_arrays();
205 const auto& ddt_arrs = conv_term(lev).const_arrays();
206 const auto& diff_o_arrs = diff_term_old(lev).const_arrays();
207 const auto& ddt_o_arrs = conv_term_old(lev).const_arrays();
208 const auto& imask_arrs = mask_cell(lev).const_arrays();
209 const auto& detJ_arrs =
210 mesh_mapping ? ((*mesh_detJ)(lev).const_arrays())
211 : amrex::MultiArray4<amrex::Real const>();
213 if (PDE::multiply_rho) {
217 field(lev), amrex::IntVect(0), PDE::ndim,
218 [=] AMREX_GPU_DEVICE(
int nbx,
int i,
int j,
int k,
int n) {
220 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
222 fld_arrs[nbx](i, j, k, n) =
223 (rho_o_arrs[nbx](i, j, k) * det_j *
224 fld_o_arrs[nbx](i, j, k, n)) +
225 (
static_cast<amrex::Real
>(
226 imask_arrs[nbx](i, j, k)) *
228 (0.5_rt * (ddt_o_arrs[nbx](i, j, k, n) +
229 ddt_arrs[nbx](i, j, k, n)) +
230 ofac * diff_o_arrs[nbx](i, j, k, n) +
231 nfac * diff_arrs[nbx](i, j, k, n) +
232 det_j * src_arrs[nbx](i, j, k, n)));
234 fld_arrs[nbx](i, j, k, n) /= rho_arrs[nbx](i, j, k);
237 fld_arrs[nbx](i, j, k, n) /= det_j;
242 field(lev), amrex::IntVect(0), PDE::ndim,
243 [=] AMREX_GPU_DEVICE(
int nbx,
int i,
int j,
int k,
int n) {
245 mesh_mapping ? (detJ_arrs[nbx](i, j, k)) : 1.0_rt;
247 fld_arrs[nbx](i, j, k, n) =
248 (det_j * fld_o_arrs[nbx](i, j, k, n)) +
249 (
static_cast<amrex::Real
>(
250 imask_arrs[nbx](i, j, k)) *
252 (0.5_rt * (ddt_o_arrs[nbx](i, j, k, n) +
253 ddt_arrs[nbx](i, j, k, n)) +
254 ofac * diff_o_arrs[nbx](i, j, k, n) +
255 nfac * diff_arrs[nbx](i, j, k, n) +
256 det_j * src_arrs[nbx](i, j, k, n)));
259 fld_arrs[nbx](i, j, k, n) /= det_j;
264 amrex::Gpu::streamSynchronize();
275 const auto& d_divtau =
fields.diff_term;
278 const auto& repo =
fields.repo;
279 const auto& mask_cell = repo.get_int_field(
"mask_cell");
281 const int nlevels = repo.num_active_levels();
282 for (
int lev = 0; lev < nlevels; ++lev) {
283 auto f_arrs = dof(lev).arrays();
284 const auto& d_diff_arrs = d_divtau(lev).const_arrays();
285 const auto& mask_arrs = mask_cell(lev).const_arrays();
286 const auto& rho_arrs =
density(lev).const_arrays();
289 dof(lev), amrex::IntVect(0), dof.num_comp(),
290 [=] AMREX_GPU_DEVICE(
int nbx,
int i,
int j,
int k,
int n) {
292 0.5_rt * dt * (amrex::Real)mask_arrs[nbx](i, j, k);
293 if (PDE::multiply_rho) {
294 factor /= rho_arrs[nbx](i, j, k);
296 f_arrs[nbx](i, j, k, n) +=
297 factor * d_diff_arrs[nbx](i, j, k, n);