From 8eefb02c1d2cb351f39296d25b53bd5c27c6ae34 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 28 Aug 2024 11:54:01 -0700 Subject: [PATCH 01/86] Adding RZ implementation of external particle field for Ohms Law Hybrid solver. --- .../HybridPICModel/HybridPICModel.H | 4 +++ .../HybridPICModel/HybridPICModel.cpp | 11 +++++++ .../HybridPICSolveE.cpp | 31 +++++++++++++++++-- Source/Initialization/WarpXInitData.cpp | 5 +-- 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 3a49d5fad4b..23419ff7e11 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -187,6 +187,10 @@ public: std::array< amrex::ParserExecutor<4>, 3> m_J_external; bool m_external_field_has_time_dependence = false; + /** External B field */ + bool m_add_ext_particle_B_field = false; + std::array< amrex::ParserExecutor<4>, 3> m_B_external; + // Declare multifabs specifically needed for the hybrid-PIC model amrex::Vector< std::unique_ptr > rho_fp_temp; amrex::Vector, 3 > > current_fp_temp; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 70efc04e259..f6863dba891 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -11,6 +11,7 @@ #include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" +#include "Particles/MultiParticleContainer.H" #include "WarpX.H" using namespace amrex; @@ -153,6 +154,16 @@ void HybridPICModel::InitData () } auto & warpx = WarpX::GetInstance(); + const auto& mypc = warpx.GetPartContainer(); + + if ( mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { + constexpr auto num_arguments = 4; //x,y,z,t + m_B_external[0] = mypc.m_Bx_particle_parser->compile(); + m_B_external[1] = mypc.m_By_particle_parser->compile(); + m_B_external[2] = mypc.m_Bz_particle_parser->compile(); + + m_add_ext_particle_B_field = true; + } // Get the grid staggering of the fields involved in calculating E amrex::IntVect Jx_stag = warpx.getField(FieldType::current_fp, 0,0).ixType().toIntVect(); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index baeaf7a6c18..4883a4cadeb 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -419,6 +419,18 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; + const bool include_B_ext_part = hybrid_model->m_add_ext_particle_B_field; + const auto Br_part = hybrid_model->m_B_external[0]; + const auto Bt_part = hybrid_model->m_B_external[1]; + const auto Bz_part = hybrid_model->m_B_external[2]; + + auto & warpx = WarpX::GetInstance(); + auto t = warpx.gett_new(lev); + + auto dx_lev = warpx.Geom(lev).CellSizeArray(); + const RealBox& real_box = warpx.Geom(lev).ProbDomain(); + const auto nodal_flag = IntVect::TheNodeVector(); + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Er_stag = hybrid_model->Ex_IndexType; @@ -492,9 +504,22 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, 0, 0); // interpolate the B field to a nodal grid - auto const Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); - auto const Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + auto Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); + auto Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + + if (include_B_ext_part) { + // Determine r and z on nodal mesh at i and j + const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real y = 0._rt; + const amrex::Real fac_z = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + + Br_interp += Br_part(x,y,z,t); + Bt_interp += Bt_part(x,y,z,t); + Bz_interp += Bz_part(x,y,z,t); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 0cf9496e63e..cbbfc26dfd6 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1338,8 +1338,9 @@ void WarpX::CheckKnownIssues() mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" ); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - !external_particle_field_used, - "The hybrid-PIC algorithm does not work with external fields " + (!external_particle_field_used + || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function"), + "The hybrid-PIC algorithm only works with analytical external B fields " "applied directly to particles." ); } From 786a5c992fc978e20910ae09ab6750d109d4fd87 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 28 Aug 2024 15:36:23 -0700 Subject: [PATCH 02/86] Adding Cartesian implementation. --- .../HybridPICSolveE.cpp | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 4883a4cadeb..be03407a0ae 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -753,6 +754,18 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; + const bool include_B_ext_part = hybrid_model->m_add_ext_particle_B_field; + const auto Bx_part = hybrid_model->m_B_external[0]; + const auto By_part = hybrid_model->m_B_external[1]; + const auto Bz_part = hybrid_model->m_B_external[2]; + + auto & warpx = WarpX::GetInstance(); + auto t = warpx.gett_new(lev); + + auto dx_lev = warpx.Geom(lev).CellSizeArray(); + const RealBox& real_box = warpx.Geom(lev).ProbDomain(); + const auto nodal_flag = IntVect::TheNodeVector(); + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Ex_stag = hybrid_model->Ex_IndexType; @@ -826,9 +839,23 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, k, 0); // interpolate the B field to a nodal grid - auto const Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); - auto const By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + auto Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); + auto By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + + if (include_B_ext_part) { + // Determine r and z on nodal mesh at i and j + const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real fac_y = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real fac_z = (1._rt - nodal_flag[2]) * dx_lev[2] * 0.5_rt; + const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + + Br_interp += Bx_part(x,y,z,t); + Bt_interp += By_part(x,y,z,t); + Bz_interp += Bz_part(x,y,z,t); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( From cb99d2af8a3a3e9f9a87c0c680ea716701d19956 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:10:57 -0700 Subject: [PATCH 03/86] Adding 3D implementation of analytical particle fields. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FiniteDifferenceSolver/HybridPICSolveE.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index be03407a0ae..cfe0b9d1e8d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -846,15 +846,15 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (include_B_ext_part) { // Determine r and z on nodal mesh at i and j const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; const amrex::Real fac_y = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; const amrex::Real fac_z = (1._rt - nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - Br_interp += Bx_part(x,y,z,t); - Bt_interp += By_part(x,y,z,t); - Bz_interp += Bz_part(x,y,z,t); + Bx_interp += Bx_part(xx,yy,zz,t); + By_interp += By_part(xx,yy,zz,t); + Bz_interp += Bz_part(xx,yy,zz,t); } // calculate enE = (J - Ji) x B From d866bda67f7080e4be3067f43e65a65bdecaa4c7 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:26:23 -0700 Subject: [PATCH 04/86] Fixing up external fields for ohms law hybrid solver in RZ and 3D. --- .../HybridPICModel/HybridPICModel.H | 5 +- .../HybridPICModel/HybridPICModel.cpp | 9 +++ .../HybridPICSolveE.cpp | 79 +++++++++++++++++++ Source/Initialization/WarpXInitData.cpp | 5 +- Source/WarpX.cpp | 4 +- 5 files changed, 97 insertions(+), 5 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 23419ff7e11..0e9c6faae7c 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -187,10 +187,13 @@ public: std::array< amrex::ParserExecutor<4>, 3> m_J_external; bool m_external_field_has_time_dependence = false; - /** External B field */ + /** External E/B fields */ bool m_add_ext_particle_B_field = false; std::array< amrex::ParserExecutor<4>, 3> m_B_external; + bool m_add_ext_particle_E_field = false; + std::array< amrex::ParserExecutor<4>, 3> m_E_external; + // Declare multifabs specifically needed for the hybrid-PIC model amrex::Vector< std::unique_ptr > rho_fp_temp; amrex::Vector, 3 > > current_fp_temp; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index f6863dba891..07ad9c25256 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -165,6 +165,15 @@ void HybridPICModel::InitData () m_add_ext_particle_B_field = true; } + if ( mypc.m_B_ext_particle_s == "parse_e_ext_particle_function") { + constexpr auto num_arguments = 4; //x,y,z,t + m_E_external[0] = mypc.m_Ex_particle_parser->compile(); + m_E_external[1] = mypc.m_Ey_particle_parser->compile(); + m_E_external[2] = mypc.m_Ez_particle_parser->compile(); + + m_add_ext_particle_E_field = true; + } + // Get the grid staggering of the fields involved in calculating E amrex::IntVect Jx_stag = warpx.getField(FieldType::current_fp, 0,0).ixType().toIntVect(); amrex::IntVect Jy_stag = warpx.getField(FieldType::current_fp, 0,1).ixType().toIntVect(); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index cfe0b9d1e8d..52f99178414 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -425,6 +425,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const auto Bt_part = hybrid_model->m_B_external[1]; const auto Bz_part = hybrid_model->m_B_external[2]; + const bool include_E_ext_part = hybrid_model->m_add_ext_particle_E_field; + const auto Er_part = hybrid_model->m_E_external[0]; + const auto Et_part = hybrid_model->m_E_external[1]; + const auto Ez_part = hybrid_model->m_E_external[2]; + auto & warpx = WarpX::GetInstance(); auto t = warpx.gett_new(lev); @@ -632,6 +637,17 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); Er(i, j, 0) -= eta_h * nabla2Jr; } + + if (include_E_ext_part) { + // Determine r and z on nodal mesh at i and j + const amrex::Real fac_x = (1._rt - Er_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real y = 0._rt; + const amrex::Real fac_z = (1._rt - Er_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + + Er(i, j, 0) -= Er_part(x,y,z,t); + } }, // Et calculation @@ -675,6 +691,17 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } // Note: Hyper-resisitivity should be revisited here when modal decomposition is implemented + + if (include_E_ext_part) { + // Determine r and z on nodal mesh at i and j + const amrex::Real fac_x = (1._rt - Et_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real y = 0._rt; + const amrex::Real fac_z = (1._rt - Et_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + + Et(i, j, 0) -= Et_part(x,y,z,t); + } }, // Ez calculation @@ -714,6 +741,17 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); Ez(i, j, 0) -= eta_h * nabla2Jz; } + + if (include_E_ext_part) { + // Determine r and z on nodal mesh at i and j + const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real y = 0._rt; + const amrex::Real fac_z = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + + Ez(i, j, 0) -= Ez_part(x,y,z,t); + } } ); @@ -759,6 +797,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const auto By_part = hybrid_model->m_B_external[1]; const auto Bz_part = hybrid_model->m_B_external[2]; + const bool include_E_ext_part = hybrid_model->m_add_ext_particle_E_field; + const auto Ex_part = hybrid_model->m_E_external[0]; + const auto Ey_part = hybrid_model->m_E_external[1]; + const auto Ez_part = hybrid_model->m_E_external[2]; + auto & warpx = WarpX::GetInstance(); auto t = warpx.gett_new(lev); @@ -962,6 +1005,18 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k); Ex(i, j, k) -= eta_h * nabla2Jx; } + + if (include_E_ext_part) { + // Determine x, y, and z on nodal mesh at i, j, & k + const amrex::Real fac_x = (1._rt - Ex_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real fac_y = (1._rt - Ex_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real fac_z = (1._rt - Ex_stag[2]) * dx_lev[2] * 0.5_rt; + const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + + Ex(i, j, k) -= Ex_part(x,y,z,t); + } }, // Ey calculation @@ -1006,6 +1061,18 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto nabla2Jy = T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k); Ey(i, j, k) -= eta_h * nabla2Jy; } + + if (include_E_ext_part) { + // Determine x, y, and z on nodal mesh at i, j, & k + const amrex::Real fac_x = (1._rt - Ey_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real fac_y = (1._rt - Ey_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real fac_z = (1._rt - Ey_stag[2]) * dx_lev[2] * 0.5_rt; + const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + + Ey(i, j, k) -= Ey_part(x,y,z,t); + } }, // Ez calculation @@ -1046,6 +1113,18 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); Ez(i, j, k) -= eta_h * nabla2Jz; } + + if (include_E_ext_part) { + // Determine x, y, and z on nodal mesh at i, j, & k + const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; + const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real fac_y = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; + const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real fac_z = (1._rt - Ez_stag[2]) * dx_lev[2] * 0.5_rt; + const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + + Ez(i, j, k) -= Ez_part(x,y,z,t); + } } ); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index cbbfc26dfd6..743428fa2b3 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1339,8 +1339,9 @@ void WarpX::CheckKnownIssues() ); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (!external_particle_field_used - || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function"), - "The hybrid-PIC algorithm only works with analytical external B fields " + || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function" + || mypc->m_E_ext_particle_s == "parse_e_ext_particle_function"), + "The hybrid-PIC algorithm only works with analytical external E/B fields " "applied directly to particles." ); } diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index ef1668de4c0..647b3e0a6b2 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -810,8 +810,8 @@ WarpX::ReadParameters () { // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, - "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + //WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, + // "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); } #endif From f82bb978b33190f50343f9fb4aa32df93856571c Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 12 Sep 2024 09:41:19 -0700 Subject: [PATCH 05/86] Fixing bug that leads to garbage E fields. --- .../HybridPICModel/HybridPICModel.cpp | 2 +- .../FiniteDifferenceSolver/HybridPICSolveE.cpp | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 07ad9c25256..388275109be 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -165,7 +165,7 @@ void HybridPICModel::InitData () m_add_ext_particle_B_field = true; } - if ( mypc.m_B_ext_particle_s == "parse_e_ext_particle_function") { + if ( mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { constexpr auto num_arguments = 4; //x,y,z,t m_E_external[0] = mypc.m_Ex_particle_parser->compile(); m_E_external[1] = mypc.m_Ey_particle_parser->compile(); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 52f99178414..fb622dee9fb 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -433,6 +433,14 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto & warpx = WarpX::GetInstance(); auto t = warpx.gett_new(lev); + // amrex::Print() + // << "Bz = " << Bz_part(0,0,0,t) + // << ", Br = " << Br_part(0,0,0,t) + // << ", Et = " << Et_part(0,0,0,t) + // << ", t=" << t + // << ", include E?: " << include_E_ext_part + // << std::endl; + auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); const auto nodal_flag = IntVect::TheNodeVector(); From 98fd8af0a8cb711fc9f656058a5b0bc3cf3ec396 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Sat, 14 Sep 2024 22:55:21 -0700 Subject: [PATCH 06/86] Fixed issue with external parser in HybridPIC model object --- .../HybridPICModel/HybridPICModel.H | 8 +++ .../HybridPICModel/HybridPICModel.cpp | 35 +++++++--- .../HybridPICSolveE.cpp | 68 ++++++++----------- Source/Particles/Gather/GetExternalFields.H | 6 +- 4 files changed, 68 insertions(+), 49 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 0e9c6faae7c..e17bb554275 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -189,9 +189,17 @@ public: /** External E/B fields */ bool m_add_ext_particle_B_field = false; + std::string m_Bx_ext_part_function = "0.0"; + std::string m_By_ext_part_function = "0.0"; + std::string m_Bz_ext_part_function = "0.0"; + std::array< std::unique_ptr, 3> m_B_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_B_external; bool m_add_ext_particle_E_field = false; + std::string m_Ex_ext_part_function = "0.0"; + std::string m_Ey_ext_part_function = "0.0"; + std::string m_Ez_ext_part_function = "0.0"; + std::array< std::unique_ptr, 3> m_E_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_E_external; // Declare multifabs specifically needed for the hybrid-PIC model diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 388275109be..621765d5a49 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -55,6 +55,15 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jx_external_grid_function(x,y,z,t)", m_Jx_ext_grid_function); pp_hybrid.query("Jy_external_grid_function(x,y,z,t)", m_Jy_ext_grid_function); pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); + + // external fields + const ParmParse pp_part("particles"); + pp_hybrid.query("Bx_external_particle_function(x,y,z,t)", m_Bx_ext_part_function); + pp_hybrid.query("By_external_particle_function(x,y,z,t)", m_By_ext_part_function); + pp_hybrid.query("Bz_external_particle_function(x,y,z,t)", m_Bz_ext_part_function); + pp_hybrid.query("Ex_external_particle_function(x,y,z,t)", m_Ex_ext_part_function); + pp_hybrid.query("Ey_external_particle_function(x,y,z,t)", m_Ey_ext_part_function); + pp_hybrid.query("Ez_external_particle_function(x,y,z,t)", m_Ez_ext_part_function); } void HybridPICModel::AllocateMFs (int nlevs_max) @@ -157,19 +166,29 @@ void HybridPICModel::InitData () const auto& mypc = warpx.GetPartContainer(); if ( mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { - constexpr auto num_arguments = 4; //x,y,z,t - m_B_external[0] = mypc.m_Bx_particle_parser->compile(); - m_B_external[1] = mypc.m_By_particle_parser->compile(); - m_B_external[2] = mypc.m_Bz_particle_parser->compile(); + m_B_external_parser[0] = std::make_unique( + utils::parser::makeParser(m_Bx_ext_part_function,{"x","y","z","t"})); + m_B_external_parser[1] = std::make_unique( + utils::parser::makeParser(m_By_ext_part_function,{"x","y","z","t"})); + m_B_external_parser[2] = std::make_unique( + utils::parser::makeParser(m_Bz_ext_part_function,{"x","y","z","t"})); + m_B_external[0] = m_B_external_parser[0]->compile<4>(); + m_B_external[1] = m_B_external_parser[1]->compile<4>(); + m_B_external[2] = m_B_external_parser[2]->compile<4>(); m_add_ext_particle_B_field = true; } if ( mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { - constexpr auto num_arguments = 4; //x,y,z,t - m_E_external[0] = mypc.m_Ex_particle_parser->compile(); - m_E_external[1] = mypc.m_Ey_particle_parser->compile(); - m_E_external[2] = mypc.m_Ez_particle_parser->compile(); + m_E_external_parser[0] = std::make_unique( + utils::parser::makeParser(m_Ex_ext_part_function,{"x","y","z","t"})); + m_E_external_parser[1] = std::make_unique( + utils::parser::makeParser(m_Ey_ext_part_function,{"x","y","z","t"})); + m_E_external_parser[2] = std::make_unique( + utils::parser::makeParser(m_Ez_ext_part_function,{"x","y","z","t"})); + m_E_external[0] = m_E_external_parser[0]->compile<4>(); + m_E_external[1] = m_E_external_parser[0]->compile<4>(); + m_E_external[2] = m_E_external_parser[0]->compile<4>(); m_add_ext_particle_E_field = true; } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index fb622dee9fb..2b36ea4c5f3 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -433,14 +433,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto & warpx = WarpX::GetInstance(); auto t = warpx.gett_new(lev); - // amrex::Print() - // << "Bz = " << Bz_part(0,0,0,t) - // << ", Br = " << Br_part(0,0,0,t) - // << ", Et = " << Et_part(0,0,0,t) - // << ", t=" << t - // << ", include E?: " << include_E_ext_part - // << std::endl; - auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); const auto nodal_flag = IntVect::TheNodeVector(); @@ -525,14 +517,14 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_B_ext_part) { // Determine r and z on nodal mesh at i and j const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real yy = 0._rt; const amrex::Real fac_z = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - Br_interp += Br_part(x,y,z,t); - Bt_interp += Bt_part(x,y,z,t); - Bz_interp += Bz_part(x,y,z,t); + Br_interp += Br_part(xx,yy,zz,t); + Bt_interp += Bt_part(xx,yy,zz,t); + Bz_interp += Bz_part(xx,yy,zz,t); } // calculate enE = (J - Ji) x B @@ -649,12 +641,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_E_ext_part) { // Determine r and z on nodal mesh at i and j const amrex::Real fac_x = (1._rt - Er_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real yy = 0._rt; const amrex::Real fac_z = (1._rt - Er_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - Er(i, j, 0) -= Er_part(x,y,z,t); + Er(i, j, 0) -= Er_part(xx,yy,zz,t); } }, @@ -703,12 +695,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_E_ext_part) { // Determine r and z on nodal mesh at i and j const amrex::Real fac_x = (1._rt - Et_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real yy = 0._rt; const amrex::Real fac_z = (1._rt - Et_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - Et(i, j, 0) -= Et_part(x,y,z,t); + Et(i, j, 0) -= Et_part(xx,yy,zz,t); } }, @@ -753,12 +745,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_E_ext_part) { // Determine r and z on nodal mesh at i and j const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real yy = 0._rt; const amrex::Real fac_z = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; + const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - Ez(i, j, 0) -= Ez_part(x,y,z,t); + Ez(i, j, 0) -= Ez_part(xx,yy,zz,t); } } ); @@ -1017,13 +1009,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (include_E_ext_part) { // Determine x, y, and z on nodal mesh at i, j, & k const amrex::Real fac_x = (1._rt - Ex_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; const amrex::Real fac_y = (1._rt - Ex_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; const amrex::Real fac_z = (1._rt - Ex_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - Ex(i, j, k) -= Ex_part(x,y,z,t); + Ex(i, j, k) -= Ex_part(xx,yy,zz,t); } }, @@ -1073,13 +1065,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (include_E_ext_part) { // Determine x, y, and z on nodal mesh at i, j, & k const amrex::Real fac_x = (1._rt - Ey_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; const amrex::Real fac_y = (1._rt - Ey_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; const amrex::Real fac_z = (1._rt - Ey_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - Ey(i, j, k) -= Ey_part(x,y,z,t); + Ey(i, j, k) -= Ey_part(xx,yy,zz,t); } }, @@ -1125,13 +1117,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (include_E_ext_part) { // Determine x, y, and z on nodal mesh at i, j, & k const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; + const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; const amrex::Real fac_y = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; + const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; const amrex::Real fac_z = (1._rt - Ez_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; + const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - Ez(i, j, k) -= Ez_part(x,y,z,t); + Ez(i, j, k) -= Ez_part(xx,yy,zz,t); } } ); diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 7000d6d7c26..90a61bd25db 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -112,9 +112,9 @@ struct GetExternalEBField lab_time = m_gamma_boost*m_time + m_uz_boost*z*inv_c2; z = m_gamma_boost*z + m_uz_boost*m_time; } - Bx = m_Bxfield_partparser(x, y, z, lab_time); - By = m_Byfield_partparser(x, y, z, lab_time); - Bz = m_Bzfield_partparser(x, y, z, lab_time); + Bx = m_Bxfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + By = m_Byfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + Bz = m_Bzfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); } if (m_Etype == RepeatedPlasmaLens || From 5f754286eb544a08bad69bf1bd6e901e7b375cbc Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 16 Sep 2024 16:46:18 -0700 Subject: [PATCH 07/86] Rebasing to latest development branch Adding time value for particle field lookup in field substepping. --- .../FiniteDifferenceSolver.H | 3 ++ .../HybridPICModel/HybridPICModel.H | 9 ++++-- .../HybridPICModel/HybridPICModel.cpp | 31 ++++++++++++------- .../HybridPICSolveE.cpp | 9 +++--- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 21 +++++++++++-- 5 files changed, 51 insertions(+), 22 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 0a9f21e6863..90f8c87df48 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -159,6 +159,7 @@ class FiniteDifferenceSolver std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -244,6 +245,7 @@ class FiniteDifferenceSolver std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -349,6 +351,7 @@ class FiniteDifferenceSolver std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index e17bb554275..ebd71be7e84 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -91,6 +91,7 @@ public: amrex::Vector, 3>> const& Bfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, + amrex::Real t, bool solve_for_Faraday); void HybridPICSolveE ( @@ -99,6 +100,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, + amrex::Real t, int lev, bool solve_for_Faraday); void HybridPICSolveE ( @@ -107,6 +109,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, + amrex::Real t, int lev, PatchType patch_type, bool solve_for_Faraday); void BfieldEvolveRK ( @@ -115,7 +118,7 @@ public: amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, DtType a_dt_type, + amrex::Real t, amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); void BfieldEvolveRK ( @@ -124,7 +127,7 @@ public: amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, int lev, DtType dt_type, + amrex::Real t, amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); void FieldPush ( @@ -133,7 +136,7 @@ public: amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, DtType dt_type, + amrex::Real t, amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); /** diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 621765d5a49..236dd708725 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -465,6 +465,7 @@ void HybridPICModel::HybridPICSolveE ( amrex::Vector, 3>> const& Bfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, + amrex::Real t, const bool solve_for_Faraday) { auto& warpx = WarpX::GetInstance(); @@ -472,7 +473,7 @@ void HybridPICModel::HybridPICSolveE ( { HybridPICSolveE( Efield[lev], Jfield[lev], Bfield[lev], rhofield[lev], - edge_lengths[lev], lev, solve_for_Faraday + edge_lengths[lev], t, lev, solve_for_Faraday ); } } @@ -483,12 +484,13 @@ void HybridPICModel::HybridPICSolveE ( std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, + amrex::Real t, const int lev, const bool solve_for_Faraday) { WARPX_PROFILE("WarpX::HybridPICSolveE()"); HybridPICSolveE( - Efield, Jfield, Bfield, rhofield, edge_lengths, lev, + Efield, Jfield, Bfield, rhofield, edge_lengths, t, lev, PatchType::fine, solve_for_Faraday ); if (lev > 0) @@ -504,6 +506,7 @@ void HybridPICModel::HybridPICSolveE ( std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, + amrex::Real t, const int lev, PatchType patch_type, const bool solve_for_Faraday) { @@ -514,7 +517,7 @@ void HybridPICModel::HybridPICSolveE ( Efield, current_fp_ampere[lev], Jfield, current_fp_external[lev], Bfield, rhofield, electron_pressure_fp[lev], - edge_lengths, lev, this, solve_for_Faraday + edge_lengths, t, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } @@ -576,14 +579,14 @@ void HybridPICModel::BfieldEvolveRK ( amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, DtType dt_type, + amrex::Real t, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { BfieldEvolveRK( - Bfield, Efield, Jfield, rhofield, edge_lengths, dt, lev, dt_type, + Bfield, Efield, Jfield, rhofield, edge_lengths, t, dt, lev, dt_type, ng, nodal_sync ); } @@ -595,7 +598,7 @@ void HybridPICModel::BfieldEvolveRK ( amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, int lev, DtType dt_type, + amrex::Real t, amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { // Make copies of the B-field multifabs at t = n and create multifabs for @@ -618,11 +621,13 @@ void HybridPICModel::BfieldEvolveRK ( K[ii].setVal(0.0); } + amrex::Real t_eval = t; + // The Runge-Kutta scheme begins here. // Step 1: FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - 0.5_rt*dt, dt_type, ng, nodal_sync + t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -636,9 +641,10 @@ void HybridPICModel::BfieldEvolveRK ( } // Step 2: + t_eval = t+0.5_rt*dt; FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - 0.5_rt*dt, dt_type, ng, nodal_sync + t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -658,7 +664,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 3: FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - dt, dt_type, ng, nodal_sync + t_eval, dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -672,9 +678,10 @@ void HybridPICModel::BfieldEvolveRK ( } // Step 4: + t_eval = t + dt; FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - 0.5_rt*dt, dt_type, ng, nodal_sync + t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -708,7 +715,7 @@ void HybridPICModel::FieldPush ( amrex::Vector, 3>> const& Jfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real dt, DtType dt_type, + amrex::Real t, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); @@ -716,7 +723,7 @@ void HybridPICModel::FieldPush ( // Calculate J = curl x B / mu0 CalculateCurrentAmpere(Bfield, edge_lengths); // Calculate the E-field from Ohm's law - HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); + HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, t, true); warpx.FillBoundaryE(ng, nodal_sync); // Push forward the B-field using Faraday's law warpx.EvolveB(dt, dt_type); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 2b36ea4c5f3..ed47fee8433 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -360,6 +360,7 @@ void FiniteDifferenceSolver::HybridPICSolveE ( std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -370,14 +371,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( HybridPICSolveECylindrical ( Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + edge_lengths, t, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + edge_lengths, t, lev, hybrid_model, solve_for_Faraday ); #endif @@ -398,6 +399,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -431,7 +433,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const auto Ez_part = hybrid_model->m_E_external[2]; auto & warpx = WarpX::GetInstance(); - auto t = warpx.gett_new(lev); auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); @@ -776,6 +777,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( std::unique_ptr const& rhofield, std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, + amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -803,7 +805,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const auto Ez_part = hybrid_model->m_E_external[2]; auto & warpx = WarpX::GetInstance(); - auto t = warpx.gett_new(lev); auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index c16f0193b8d..2f711f31456 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -88,6 +88,12 @@ void WarpX::HybridPICEvolveFields () } } + // Calculate the electron pressure at t=n using rho^n + m_hybrid_pic_model->CalculateElectronPressure(DtType::FirstHalf); + + amrex::Real t_start = gett_old(0); + amrex::Real sub_dt = 0.5_rt/sub_steps*dt[0]; + // Push the B field from t=n to t=n+1/2 using the current and density // at t=n, while updating the E field along with B using the electron // momentum equation @@ -95,7 +101,9 @@ void WarpX::HybridPICEvolveFields () { m_hybrid_pic_model->BfieldEvolveRK( Bfield_fp, Efield_fp, current_fp_temp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_edge_lengths, + t_start + static_cast(sub_step)*sub_dt, + sub_dt, DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -113,12 +121,19 @@ void WarpX::HybridPICEvolveFields () ); } + // Calculate the electron pressure at t=n+1/2 + m_hybrid_pic_model->CalculateElectronPressure(DtType::SecondHalf); + + t_start += 0.5_rt*dt[0]; + // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities for (int sub_step = 0; sub_step < sub_steps; sub_step++) { m_hybrid_pic_model->BfieldEvolveRK( Bfield_fp, Efield_fp, current_fp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_edge_lengths, + t_start + static_cast(sub_step)*sub_dt, + sub_dt, DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -148,7 +163,7 @@ void WarpX::HybridPICEvolveFields () // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculateCurrentAmpere(Bfield_fp, m_edge_lengths); m_hybrid_pic_model->HybridPICSolveE( - Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, false + Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, gett_new(0), false ); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); From 36caed125b95c27fe5c940b2315e7c087e583f28 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 21:52:38 +0000 Subject: [PATCH 08/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H | 6 +++--- Source/Initialization/WarpXInitData.cpp | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index ebd71be7e84..584125ac683 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -91,7 +91,7 @@ public: amrex::Vector, 3>> const& Bfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - amrex::Real t, + amrex::Real t, bool solve_for_Faraday); void HybridPICSolveE ( @@ -100,7 +100,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, - amrex::Real t, + amrex::Real t, int lev, bool solve_for_Faraday); void HybridPICSolveE ( @@ -109,7 +109,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, - amrex::Real t, + amrex::Real t, int lev, PatchType patch_type, bool solve_for_Faraday); void BfieldEvolveRK ( diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 743428fa2b3..a191ddf6e2d 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1338,7 +1338,7 @@ void WarpX::CheckKnownIssues() mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" ); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (!external_particle_field_used + (!external_particle_field_used || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function" || mypc->m_E_ext_particle_s == "parse_e_ext_particle_function"), "The hybrid-PIC algorithm only works with analytical external E/B fields " From f0ad73a72b9f3cef3c4d163e75ed30cb40b46105 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 18 Sep 2024 15:27:15 -0700 Subject: [PATCH 09/86] Removing electron pressure calculation. Fixing merge error. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 2f711f31456..0195c7a3356 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -88,9 +88,6 @@ void WarpX::HybridPICEvolveFields () } } - // Calculate the electron pressure at t=n using rho^n - m_hybrid_pic_model->CalculateElectronPressure(DtType::FirstHalf); - amrex::Real t_start = gett_old(0); amrex::Real sub_dt = 0.5_rt/sub_steps*dt[0]; @@ -121,9 +118,6 @@ void WarpX::HybridPICEvolveFields () ); } - // Calculate the electron pressure at t=n+1/2 - m_hybrid_pic_model->CalculateElectronPressure(DtType::SecondHalf); - t_start += 0.5_rt*dt[0]; // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities From 1482f8bcf163aa2263d8c17354c77eefd8089b82 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:48:32 -0700 Subject: [PATCH 10/86] Adding grid based split field advance for Hybrid solver to improve performance. --- Python/pywarpx/fields.py | 34 ++++ Python/pywarpx/picmi.py | 64 +++++++ .../HybridPICModel/HybridPICModel.H | 78 ++++++-- .../HybridPICModel/HybridPICModel.cpp | 177 ++++++++++++------ .../HybridPICSolveE.cpp | 152 ++++----------- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 67 ++++++- Source/WarpX.cpp | 5 +- 7 files changed, 388 insertions(+), 189 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index cbdd8d4517a..518c13e9bb9 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -696,6 +696,40 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): mf_name="Bfield_fp_external[z]", level=level, include_ghosts=include_ghosts ) +def ExHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Efield_hyb_external[x]", level=level, include_ghosts=include_ghosts + ) + + +def EyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Efield_hyb_external[y]", level=level, include_ghosts=include_ghosts + ) + + +def EzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Efield_hyb_external[z]", level=level, include_ghosts=include_ghosts + ) + + +def BxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Bfield_hyb_external[x]", level=level, include_ghosts=include_ghosts + ) + + +def ByHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Bfield_hyb_external[y]", level=level, include_ghosts=include_ghosts + ) + + +def BzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="Bfield_hyb_external[z]", level=level, include_ghosts=include_ghosts + ) def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 0d51a8723b4..c8c4c0433d3 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1783,6 +1783,9 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. + + Ex/y/z_external_function: str + Function of space and time specifying external (non-plasma) E-fields. """ def __init__( @@ -1798,6 +1801,12 @@ def __init__( Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, + Ex_expression=None, + Ey_expression=None, + Ez_expression=None, + Bx_expression=None, + By_expression=None, + Bz_expression=None, **kw, ): self.grid = grid @@ -1816,6 +1825,24 @@ def __init__( self.Jy_external_function = Jy_external_function self.Jz_external_function = Jz_external_function + self.add_external_fields = None + + self.Ex_external_function = Ex_expression + self.Ey_external_function = Ey_expression + self.Ez_external_function = Ez_expression + + self.Bx_external_function = Bx_expression + self.By_external_function = By_expression + self.Bz_external_function = Bz_expression + + if (Ex_expression is not None + or Ey_expression is not None + or Ez_expression is not None + or Bx_expression is not None + or By_expression is not None + or Bz_expression is not None): + self.add_external_fields = True + # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): @@ -1864,6 +1891,43 @@ def solver_initialize_inputs(self): self.Jz_external_function, self.mangle_dict ), ) + pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields + pywarpx.hybridpicmodel.__setattr__( + "Bx_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Bx_external_function, self.mangle_dict + ), + ) + pywarpx.hybridpicmodel.__setattr__( + "By_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.By_external_function, self.mangle_dict + ), + ) + pywarpx.hybridpicmodel.__setattr__( + "Bz_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Bz_external_function, self.mangle_dict + ), + ) + pywarpx.hybridpicmodel.__setattr__( + "Ex_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Ex_external_function, self.mangle_dict + ), + ) + pywarpx.hybridpicmodel.__setattr__( + "Ey_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Ey_external_function, self.mangle_dict + ), + ) + pywarpx.hybridpicmodel.__setattr__( + "Ez_external_grid_function(x,y,z,t)", + pywarpx.my_constants.mangle_expression( + self.Ez_external_function, self.mangle_dict + ), + ) class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 584125ac683..eaadf3e7a95 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -21,6 +21,9 @@ #include #include +#include +#include +#include #include @@ -38,10 +41,26 @@ public: /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ void AllocateMFs (int nlevs_max); - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, - const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, - const amrex::IntVect& jz_nodal_flag, const amrex::IntVect& rho_nodal_flag); + void AllocateLevelMFs ( + int lev, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + const int ncomps, + const amrex::IntVect& ngJ, + const amrex::IntVect& ngRho, + const amrex::IntVect& ngE, + const amrex::IntVect& ngB, + const amrex::IntVect& jx_nodal_flag, + const amrex::IntVect& jy_nodal_flag, + const amrex::IntVect& jz_nodal_flag, + const amrex::IntVect& rho_nodal_flag, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag + ); /** Helper function to clear values from hybrid-PIC specific multifabs. */ void ClearLevel (int lev); @@ -57,11 +76,39 @@ public: void GetCurrentExternal ( amrex::Vector, 3>> const& edge_lengths ); + void GetCurrentExternal ( std::array< std::unique_ptr, 3> const& edge_lengths, int lev ); + void GetFieldsExternal ( + amrex::Vector, 3>> const& edge_lengths + ); + + void GetFieldsExternal ( + amrex::Vector, 3>> const& edge_lengths, + amrex::Real t); + + void GetFieldsExternal ( + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev + ); + + void GetExternalFieldFromExpression ( + std::array< std::unique_ptr, 3> const& field, + std::array< amrex::ParserExecutor<4>, 3> const& expression, + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev + ); + + void GetExternalFieldFromExpression ( + std::array< std::unique_ptr, 3> const& field, + std::array< amrex::ParserExecutor<4>, 3> const& expression, + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev, amrex::Real t + ); + /** * \brief * Function to calculate the total current based on Ampere's law while @@ -152,7 +199,7 @@ public: * charge density (and assumption of quasi-neutrality) using the user * specified electron equation of state. * - * \param[out] Pe_filed scalar electron pressure MultiFab at a given level + * \param[out] Pe_field scalar electron pressure MultiFab at a given level * \param[in] rho_field scalar ion charge density Multifab at a given level */ void FillElectronPressureMF ( @@ -191,17 +238,17 @@ public: bool m_external_field_has_time_dependence = false; /** External E/B fields */ - bool m_add_ext_particle_B_field = false; - std::string m_Bx_ext_part_function = "0.0"; - std::string m_By_ext_part_function = "0.0"; - std::string m_Bz_ext_part_function = "0.0"; + bool m_add_external_fields = false; + + std::string m_Bx_ext_grid_function = "0.0"; + std::string m_By_ext_grid_function = "0.0"; + std::string m_Bz_ext_grid_function = "0.0"; std::array< std::unique_ptr, 3> m_B_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_B_external; - bool m_add_ext_particle_E_field = false; - std::string m_Ex_ext_part_function = "0.0"; - std::string m_Ey_ext_part_function = "0.0"; - std::string m_Ez_ext_part_function = "0.0"; + std::string m_Ex_ext_grid_function = "0.0"; + std::string m_Ey_ext_grid_function = "0.0"; + std::string m_Ez_ext_grid_function = "0.0"; std::array< std::unique_ptr, 3> m_E_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_E_external; @@ -212,6 +259,11 @@ public: amrex::Vector, 3 > > current_fp_external; amrex::Vector< std::unique_ptr > electron_pressure_fp; + amrex::Vector, 3 > > Bfield_hyb_external; + amrex::Vector, 3 > > Efield_hyb_external; + // amrex::Vector, 3 > > Bfield_hyb_self; + // amrex::Vector, 3 > > Efield_hyb_self; + // Helper functions to retrieve hybrid-PIC multifabs [[nodiscard]] amrex::MultiFab* get_pointer_current_fp_ampere (int lev, int direction) const diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 236dd708725..64e2ab0d445 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -57,13 +57,16 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); // external fields - const ParmParse pp_part("particles"); - pp_hybrid.query("Bx_external_particle_function(x,y,z,t)", m_Bx_ext_part_function); - pp_hybrid.query("By_external_particle_function(x,y,z,t)", m_By_ext_part_function); - pp_hybrid.query("Bz_external_particle_function(x,y,z,t)", m_Bz_ext_part_function); - pp_hybrid.query("Ex_external_particle_function(x,y,z,t)", m_Ex_ext_part_function); - pp_hybrid.query("Ey_external_particle_function(x,y,z,t)", m_Ey_ext_part_function); - pp_hybrid.query("Ez_external_particle_function(x,y,z,t)", m_Ez_ext_part_function); + pp_hybrid.query("add_external_fields", m_add_external_fields); + + if (m_add_external_fields) { + pp_hybrid.query("Bx_external_grid_function(x,y,z,t)", m_Bx_ext_grid_function); + pp_hybrid.query("By_external_grid_function(x,y,z,t)", m_By_ext_grid_function); + pp_hybrid.query("Bz_external_grid_function(x,y,z,t)", m_Bz_ext_grid_function); + pp_hybrid.query("Ex_external_grid_function(x,y,z,t)", m_Ex_ext_grid_function); + pp_hybrid.query("Ey_external_grid_function(x,y,z,t)", m_Ey_ext_grid_function); + pp_hybrid.query("Ez_external_grid_function(x,y,z,t)", m_Ez_ext_grid_function); + } } void HybridPICModel::AllocateMFs (int nlevs_max) @@ -73,14 +76,30 @@ void HybridPICModel::AllocateMFs (int nlevs_max) current_fp_temp.resize(nlevs_max); current_fp_ampere.resize(nlevs_max); current_fp_external.resize(nlevs_max); + + if (m_add_external_fields) { + Bfield_hyb_external.resize(nlevs_max); + Efield_hyb_external.resize(nlevs_max); + // Bfield_hyb_self.resize(nlevs_max); + // Efield_hyb_self.resize(nlevs_max); + } } -void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm, - const int ncomps, const IntVect& ngJ, const IntVect& ngRho, - const IntVect& jx_nodal_flag, - const IntVect& jy_nodal_flag, - const IntVect& jz_nodal_flag, - const IntVect& rho_nodal_flag) +void HybridPICModel::AllocateLevelMFs ( + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngJ, const IntVect& ngRho, + const IntVect& ngE, const IntVect& ngB, + const IntVect& jx_nodal_flag, + const IntVect& jy_nodal_flag, + const IntVect& jz_nodal_flag, + const IntVect& rho_nodal_flag, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) { // The "electron_pressure_fp" multifab stores the electron pressure calculated // from the specified equation of state. @@ -120,6 +139,22 @@ void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const Distri WarpX::AllocInitMultiFab(current_fp_external[lev][2], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[z]", 0.0_rt); + if (m_add_external_fields) { + // These are nodal to match when B-field is added in evaluation of Ohm's law + WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][0], amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngB, lev, "Bfield_hyb_external[x]", 0.0_rt); + WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][1], amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngB, lev, "Bfield_hyb_external[y]", 0.0_rt); + WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][2], amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngB, lev, "Bfield_hyb_external[z]", 0.0_rt); + WarpX::AllocInitMultiFab(Efield_hyb_external[lev][0], amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngE, lev, "Efield_hyb_external[x]", 0.0_rt); + WarpX::AllocInitMultiFab(Efield_hyb_external[lev][1], amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngE, lev, "Efield_hyb_external[y]", 0.0_rt); + WarpX::AllocInitMultiFab(Efield_hyb_external[lev][2], amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngE, lev, "Efield_hyb_external[z]", 0.0_rt); + } + #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (ncomps == 1), @@ -135,6 +170,10 @@ void HybridPICModel::ClearLevel (int lev) current_fp_temp[lev][i].reset(); current_fp_ampere[lev][i].reset(); current_fp_external[lev][i].reset(); + if (m_add_external_fields) { + Bfield_hyb_external[lev][i].reset(); + Efield_hyb_external[lev][i].reset(); + } } } @@ -165,33 +204,25 @@ void HybridPICModel::InitData () auto & warpx = WarpX::GetInstance(); const auto& mypc = warpx.GetPartContainer(); - if ( mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { - m_B_external_parser[0] = std::make_unique( - utils::parser::makeParser(m_Bx_ext_part_function,{"x","y","z","t"})); - m_B_external_parser[1] = std::make_unique( - utils::parser::makeParser(m_By_ext_part_function,{"x","y","z","t"})); - m_B_external_parser[2] = std::make_unique( - utils::parser::makeParser(m_Bz_ext_part_function,{"x","y","z","t"})); - m_B_external[0] = m_B_external_parser[0]->compile<4>(); - m_B_external[1] = m_B_external_parser[1]->compile<4>(); - m_B_external[2] = m_B_external_parser[2]->compile<4>(); - - m_add_ext_particle_B_field = true; - } - - if ( mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { - m_E_external_parser[0] = std::make_unique( - utils::parser::makeParser(m_Ex_ext_part_function,{"x","y","z","t"})); - m_E_external_parser[1] = std::make_unique( - utils::parser::makeParser(m_Ey_ext_part_function,{"x","y","z","t"})); - m_E_external_parser[2] = std::make_unique( - utils::parser::makeParser(m_Ez_ext_part_function,{"x","y","z","t"})); - m_E_external[0] = m_E_external_parser[0]->compile<4>(); - m_E_external[1] = m_E_external_parser[0]->compile<4>(); - m_E_external[2] = m_E_external_parser[0]->compile<4>(); - - m_add_ext_particle_E_field = true; - } + m_B_external_parser[0] = std::make_unique( + utils::parser::makeParser(m_Bx_ext_grid_function,{"x","y","z","t"})); + m_B_external_parser[1] = std::make_unique( + utils::parser::makeParser(m_By_ext_grid_function,{"x","y","z","t"})); + m_B_external_parser[2] = std::make_unique( + utils::parser::makeParser(m_Bz_ext_grid_function,{"x","y","z","t"})); + m_B_external[0] = m_B_external_parser[0]->compile<4>(); + m_B_external[1] = m_B_external_parser[1]->compile<4>(); + m_B_external[2] = m_B_external_parser[2]->compile<4>(); + + m_E_external_parser[0] = std::make_unique( + utils::parser::makeParser(m_Ex_ext_grid_function,{"x","y","z","t"})); + m_E_external_parser[1] = std::make_unique( + utils::parser::makeParser(m_Ey_ext_grid_function,{"x","y","z","t"})); + m_E_external_parser[2] = std::make_unique( + utils::parser::makeParser(m_Ez_ext_grid_function,{"x","y","z","t"})); + m_E_external[0] = m_E_external_parser[0]->compile<4>(); + m_E_external[1] = m_E_external_parser[1]->compile<4>(); + m_E_external[2] = m_E_external_parser[2]->compile<4>(); // Get the grid staggering of the fields involved in calculating E amrex::IntVect Jx_stag = warpx.getField(FieldType::current_fp, 0,0).ixType().toIntVect(); @@ -285,6 +316,7 @@ void HybridPICModel::InitData () } #endif GetCurrentExternal(edge_lengths, lev); + GetFieldsExternal(edge_lengths, lev); } } @@ -296,36 +328,73 @@ void HybridPICModel::GetCurrentExternal ( auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - GetCurrentExternal(edge_lengths[lev], lev); + GetExternalFieldFromExpression(current_fp_external[lev], m_J_external, edge_lengths[lev], lev); } } - void HybridPICModel::GetCurrentExternal ( std::array< std::unique_ptr, 3> const& edge_lengths, int lev) { - // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser - // except that the parsers include time dependence. - auto & warpx = WarpX::GetInstance(); + if (!m_external_field_has_time_dependence) { return; } + GetExternalFieldFromExpression(current_fp_external[lev], m_J_external, edge_lengths, lev); +} +void HybridPICModel::GetFieldsExternal ( + amrex::Vector, 3>> const& edge_lengths, + amrex::Real t) +{ + auto& warpx = WarpX::GetInstance(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) + { + GetExternalFieldFromExpression(Bfield_hyb_external[lev], m_B_external, edge_lengths[lev], lev, t); + GetExternalFieldFromExpression(Efield_hyb_external[lev], m_E_external, edge_lengths[lev], lev, t); + } +} + +void HybridPICModel::GetFieldsExternal ( + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev) +{ + GetExternalFieldFromExpression(Bfield_hyb_external[lev], m_B_external, edge_lengths, lev); + GetExternalFieldFromExpression(Efield_hyb_external[lev], m_E_external, edge_lengths, lev); +} + +void HybridPICModel::GetExternalFieldFromExpression ( + std::array< std::unique_ptr, 3> const& field, + std::array< amrex::ParserExecutor<4>, 3> const& expression, + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev) +{ + auto & warpx = WarpX::GetInstance(); auto t = warpx.gett_new(lev); + GetExternalFieldFromExpression(field, expression, edge_lengths, lev, t); +} +void HybridPICModel::GetExternalFieldFromExpression ( + std::array< std::unique_ptr, 3> const& field, + std::array< amrex::ParserExecutor<4>, 3> const& expression, + std::array< std::unique_ptr, 3> const& edge_lengths, + int lev, amrex::Real t) +{ + // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser + // except that the parsers include time dependence. + auto & warpx = WarpX::GetInstance(); auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - auto& mfx = current_fp_external[lev][0]; - auto& mfy = current_fp_external[lev][1]; - auto& mfz = current_fp_external[lev][2]; + auto& mfx = field[0]; + auto& mfy = field[1]; + auto& mfz = field[2]; const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); // avoid implicit lambda capture - auto Jx_external = m_J_external[0]; - auto Jy_external = m_J_external[1]; - auto Jz_external = m_J_external[2]; + auto x_external = expression[0]; + auto y_external = expression[1]; + auto z_external = expression[2]; for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { @@ -371,7 +440,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the x-component of the field. - mfxfab(i,j,k) = Jx_external(x,y,z,t); + mfxfab(i,j,k) = x_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary @@ -397,7 +466,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); + mfyfab(i,j,k) = y_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary @@ -423,7 +492,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the z-component of the field. - mfzfab(i,j,k) = Jz_external(x,y,z,t); + mfzfab(i,j,k) = z_external(x,y,z,t); } ); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index ed47fee8433..924c3a17a88 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -422,21 +422,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; - const bool include_B_ext_part = hybrid_model->m_add_ext_particle_B_field; - const auto Br_part = hybrid_model->m_B_external[0]; - const auto Bt_part = hybrid_model->m_B_external[1]; - const auto Bz_part = hybrid_model->m_B_external[2]; - - const bool include_E_ext_part = hybrid_model->m_add_ext_particle_E_field; - const auto Er_part = hybrid_model->m_E_external[0]; - const auto Et_part = hybrid_model->m_E_external[1]; - const auto Ez_part = hybrid_model->m_E_external[2]; - - auto & warpx = WarpX::GetInstance(); - - auto dx_lev = warpx.Geom(lev).CellSizeArray(); - const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - const auto nodal_flag = IntVect::TheNodeVector(); + const bool include_external_fields = hybrid_model->m_add_external_fields; + auto const& Bfield_external = hybrid_model->Bfield_hyb_external[0]; // lev=0 + auto const& Efield_external = hybrid_model->Efield_hyb_external[0]; // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -497,6 +485,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 const& Br_ext = Bfield_external[0]->const_array(mfi); + Array4 const& Bt_ext = Bfield_external[1]->const_array(mfi); + Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ @@ -515,17 +507,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); - if (include_B_ext_part) { - // Determine r and z on nodal mesh at i and j - const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real yy = 0._rt; - const amrex::Real fac_z = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - - Br_interp += Br_part(xx,yy,zz,t); - Bt_interp += Bt_part(xx,yy,zz,t); - Bz_interp += Bz_part(xx,yy,zz,t); + if (include_external_fields) { + Br_interp += Interp(Br_ext, Br_stag, nodal, coarsen, i, j, 0, 0); + Bt_interp += Interp(Bt_ext, Bt_stag, nodal, coarsen, i, j, 0, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, 0, 0); } // calculate enE = (J - Ji) x B @@ -567,6 +552,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Er = Efield[0]->array(mfi); Array4 const& Et = Efield[1]->array(mfi); Array4 const& Ez = Efield[2]->array(mfi); + Array4 const& Er_ext = Efield_external[0]->const_array(mfi); + Array4 const& Et_ext = Efield_external[1]->const_array(mfi); + Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); Array4 const& Jr = Jfield[0]->const_array(mfi); Array4 const& Jt = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); @@ -639,15 +627,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Er(i, j, 0) -= eta_h * nabla2Jr; } - if (include_E_ext_part) { - // Determine r and z on nodal mesh at i and j - const amrex::Real fac_x = (1._rt - Er_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real yy = 0._rt; - const amrex::Real fac_z = (1._rt - Er_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - - Er(i, j, 0) -= Er_part(xx,yy,zz,t); + if (include_external_fields) { + Er(i, j, 0) -= Er_ext(i, j, 0); } }, @@ -693,15 +674,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Note: Hyper-resisitivity should be revisited here when modal decomposition is implemented - if (include_E_ext_part) { - // Determine r and z on nodal mesh at i and j - const amrex::Real fac_x = (1._rt - Et_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real yy = 0._rt; - const amrex::Real fac_z = (1._rt - Et_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - - Et(i, j, 0) -= Et_part(xx,yy,zz,t); + if (include_external_fields) { + Et(i, j, 0) -= Et_ext(i, j, 0); } }, @@ -738,20 +712,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } - if (include_hyper_resistivity_term) { + if (include_hyper_resistivity_term && solve_for_Faraday) { auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); Ez(i, j, 0) -= eta_h * nabla2Jz; } - if (include_E_ext_part) { - // Determine r and z on nodal mesh at i and j - const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real yy = 0._rt; - const amrex::Real fac_z = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real zz = j*dx_lev[1] + real_box.lo(1) + fac_z; - - Ez(i, j, 0) -= Ez_part(xx,yy,zz,t); + if (include_external_fields) { + Ez(i, j, 0) -= Ez_ext(i, j, 0); } } ); @@ -794,21 +761,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; - const bool include_B_ext_part = hybrid_model->m_add_ext_particle_B_field; - const auto Bx_part = hybrid_model->m_B_external[0]; - const auto By_part = hybrid_model->m_B_external[1]; - const auto Bz_part = hybrid_model->m_B_external[2]; - - const bool include_E_ext_part = hybrid_model->m_add_ext_particle_E_field; - const auto Ex_part = hybrid_model->m_E_external[0]; - const auto Ey_part = hybrid_model->m_E_external[1]; - const auto Ez_part = hybrid_model->m_E_external[2]; - - auto & warpx = WarpX::GetInstance(); - - auto dx_lev = warpx.Geom(lev).CellSizeArray(); - const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - const auto nodal_flag = IntVect::TheNodeVector(); + const bool include_external_fields = hybrid_model->m_add_external_fields; + auto const& Bfield_external = hybrid_model->Bfield_hyb_external[0]; // lev=0 + auto const& Efield_external = hybrid_model->Efield_hyb_external[0]; // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -869,6 +824,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 const& Bx_ext = Bfield_external[0]->const_array(mfi); + Array4 const& By_ext = Bfield_external[1]->const_array(mfi); + Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ @@ -887,18 +846,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); - if (include_B_ext_part) { - // Determine r and z on nodal mesh at i and j - const amrex::Real fac_x = (1._rt - nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - - Bx_interp += Bx_part(xx,yy,zz,t); - By_interp += By_part(xx,yy,zz,t); - Bz_interp += Bz_part(xx,yy,zz,t); + if (include_external_fields) { + Bx_interp += Interp(Bx_ext, Bx_stag, nodal, coarsen, i, j, k, 0); + By_interp += Interp(By_ext, By_stag, nodal, coarsen, i, j, k, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, k, 0); } // calculate enE = (J - Ji) x B @@ -940,6 +891,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Ex = Efield[0]->array(mfi); Array4 const& Ey = Efield[1]->array(mfi); Array4 const& Ez = Efield[2]->array(mfi); + Array4 const& Ex_ext = Efield_external[0]->const_array(mfi); + Array4 const& Ey_ext = Efield_external[1]->const_array(mfi); + Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); Array4 const& Jx = Jfield[0]->const_array(mfi); Array4 const& Jy = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); @@ -1007,16 +961,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ex(i, j, k) -= eta_h * nabla2Jx; } - if (include_E_ext_part) { - // Determine x, y, and z on nodal mesh at i, j, & k - const amrex::Real fac_x = (1._rt - Ex_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - Ex_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - Ex_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - - Ex(i, j, k) -= Ex_part(xx,yy,zz,t); + if (include_external_fields) { + Ex(i, j, k) -= Ex_ext(i, j, k); } }, @@ -1063,16 +1009,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ey(i, j, k) -= eta_h * nabla2Jy; } - if (include_E_ext_part) { - // Determine x, y, and z on nodal mesh at i, j, & k - const amrex::Real fac_x = (1._rt - Ey_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - Ey_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - Ey_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - - Ey(i, j, k) -= Ey_part(xx,yy,zz,t); + if (include_external_fields) { + Ey(i, j, k) -= Ey_ext(i, j, k); } }, @@ -1115,16 +1053,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez(i, j, k) -= eta_h * nabla2Jz; } - if (include_E_ext_part) { - // Determine x, y, and z on nodal mesh at i, j, & k - const amrex::Real fac_x = (1._rt - Ez_stag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real xx = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - Ez_stag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real yy = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - Ez_stag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real zz = k*dx_lev[2] + real_box.lo(2) + fac_z; - - Ez(i, j, k) -= Ez_part(xx,yy,zz,t); + if (include_external_fields) { + Ez(i, j, k) -= Ez_ext(i, j, k); } } ); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 0195c7a3356..97736b45de9 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -55,6 +55,31 @@ void WarpX::HybridPICEvolveFields () // Get requested number of substeps to use const int sub_steps = m_hybrid_pic_model->m_substeps; + + amrex::Real t_eval = gett_old(0); + amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; + + const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; + auto const& Bfield_hyb_external = m_hybrid_pic_model->Bfield_hyb_external; + auto const& Efield_hyb_external = m_hybrid_pic_model->Efield_hyb_external; + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // Get the external fields + m_hybrid_pic_model->GetFieldsExternal(m_edge_lengths, t_eval); + + // If using split fields, subtract the external field at the old time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Subtract( + *Bfield_fp[lev][idim], + *Bfield_hyb_external[lev][idim], + 0, 0, 1, + Bfield_hyb_external[lev][idim]->nGrowVect()); + } + } + FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + } // Get the external current m_hybrid_pic_model->GetCurrentExternal(m_edge_lengths); @@ -88,9 +113,6 @@ void WarpX::HybridPICEvolveFields () } } - amrex::Real t_start = gett_old(0); - amrex::Real sub_dt = 0.5_rt/sub_steps*dt[0]; - // Push the B field from t=n to t=n+1/2 using the current and density // at t=n, while updating the E field along with B using the electron // momentum equation @@ -99,8 +121,7 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->BfieldEvolveRK( Bfield_fp, Efield_fp, current_fp_temp, rho_fp_temp, m_edge_lengths, - t_start + static_cast(sub_step)*sub_dt, - sub_dt, + t_eval, sub_dt, DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -118,7 +139,12 @@ void WarpX::HybridPICEvolveFields () ); } - t_start += 0.5_rt*dt[0]; + t_eval += 0.5_rt*dt[0]; + + if (add_external_fields) { + // Get the external fields + m_hybrid_pic_model->GetFieldsExternal(m_edge_lengths, t_eval); + } // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities for (int sub_step = 0; sub_step < sub_steps; sub_step++) @@ -126,8 +152,7 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->BfieldEvolveRK( Bfield_fp, Efield_fp, current_fp, rho_fp_temp, m_edge_lengths, - t_start + static_cast(sub_step)*sub_dt, - sub_dt, + t_eval, sub_dt, DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -154,11 +179,35 @@ void WarpX::HybridPICEvolveFields () // Calculate the electron pressure at t=n+1 m_hybrid_pic_model->CalculateElectronPressure(); + t_eval = gett_new(0); + // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculateCurrentAmpere(Bfield_fp, m_edge_lengths); m_hybrid_pic_model->HybridPICSolveE( - Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, gett_new(0), false + Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, t_eval, false ); + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + m_hybrid_pic_model->GetFieldsExternal(m_edge_lengths, t_eval); + + // If using split fields, add the external field at the new time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Add( + *Bfield_fp[lev][idim], + *Bfield_hyb_external[lev][idim], + 0, 0, 1, + Bfield_hyb_external[lev][idim]->nGrowVect()); + MultiFab::Add( + *Efield_fp[lev][idim], + *Efield_hyb_external[lev][idim], + 0, 0, 1, + Efield_hyb_external[lev][idim]->nGrowVect()); + } + } + FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + } FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); // Copy the rho^{n+1} values to rho_fp_temp and the J_i^{n+1/2} values to diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 647b3e0a6b2..c78a5037212 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -2357,8 +2357,9 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { m_hybrid_pic_model->AllocateLevelMFs( - lev, ba, dm, ncomps, ngJ, ngRho, jx_nodal_flag, jy_nodal_flag, - jz_nodal_flag, rho_nodal_flag + lev, ba, dm, ncomps, ngJ, ngRho, ngEB, ngEB, jx_nodal_flag, jy_nodal_flag, + jz_nodal_flag, rho_nodal_flag, Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag ); } From 344a3dde69ec6028560c1e5d33dc5873e94e023f Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:08:42 -0700 Subject: [PATCH 11/86] Merged and refactored external fields to use new MultiFab register in Cylindrical and Cartesian. --- .../FiniteDifferenceSolver.H | 5 +- .../HybridPICModel/HybridPICModel.H | 97 +++++------ .../HybridPICModel/HybridPICModel.cpp | 156 +++++------------- .../HybridPICSolveE.cpp | 13 +- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 34 ++-- Source/Fields.H | 4 + 6 files changed, 117 insertions(+), 192 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 8d68f7310b1..488c73d84a0 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -1,7 +1,10 @@ -/* Copyright 2020 Remi Lehe +/* Copyright 2020-2024 The WarpX Community * * This file is part of WarpX. * + * Authors: Remi Lehe (LBNL) + * S. Eric Clark (Helion Energy) + * * License: BSD-3-Clause-LBNL */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 4fda1e74b6e..1b5ccd1bb74 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -12,6 +13,8 @@ #include "HybridPICModel_fwd.H" +#include "Fields.H" + #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" @@ -42,8 +45,8 @@ public: void ReadParameters (); /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ - void AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, @@ -72,41 +75,19 @@ public: * external current multifab. Note the external current can be a function * of time and therefore this should be re-evaluated at every step. */ - void GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths - ); + void GetCurrentExternal (bool skip_check = false); - void GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev - ); - - void GetFieldsExternal ( - amrex::Vector, 3>> const& edge_lengths - ); - - void GetFieldsExternal ( - amrex::Vector, 3>> const& edge_lengths, - amrex::Real t); - - void GetFieldsExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev - ); + void GetFieldsExternal (amrex::Real t); void GetExternalFieldFromExpression ( - std::array< std::unique_ptr, 3> const& field, + warpx::fields::FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev - ); - + int lev); + void GetExternalFieldFromExpression ( - std::array< std::unique_ptr, 3> const& field, + warpx::fields::FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, amrex::Real t - ); + int lev, amrex::Real t); /** * \brief @@ -252,35 +233,35 @@ public: std::array< amrex::ParserExecutor<4>, 3> m_E_external; // Declare multifabs specifically needed for the hybrid-PIC model - amrex::Vector< std::unique_ptr > rho_fp_temp; - amrex::Vector, 3 > > current_fp_temp; - amrex::Vector, 3 > > current_fp_ampere; - amrex::Vector, 3 > > current_fp_external; - amrex::Vector< std::unique_ptr > electron_pressure_fp; - - amrex::Vector, 3 > > Bfield_hyb_external; - amrex::Vector, 3 > > Efield_hyb_external; + // amrex::Vector< std::unique_ptr > rho_fp_temp; + // amrex::Vector, 3 > > current_fp_temp; + // amrex::Vector, 3 > > current_fp_ampere; + // amrex::Vector, 3 > > current_fp_external; + // amrex::Vector< std::unique_ptr > electron_pressure_fp; + + // amrex::Vector, 3 > > Bfield_hyb_external; + // amrex::Vector, 3 > > Efield_hyb_external; // amrex::Vector, 3 > > Bfield_hyb_self; // amrex::Vector, 3 > > Efield_hyb_self; - // Helper functions to retrieve hybrid-PIC multifabs - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_ampere (int lev, int direction) const - { - return current_fp_ampere[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_external (int lev, int direction) const - { - return current_fp_external[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_electron_pressure_fp (int lev) const - { - return electron_pressure_fp[lev].get(); - } + // // Helper functions to retrieve hybrid-PIC multifabs + // [[nodiscard]] amrex::MultiFab* + // get_pointer_current_fp_ampere (int lev, int direction) const + // { + // return current_fp_ampere[lev][direction].get(); + // } + + // [[nodiscard]] amrex::MultiFab* + // get_pointer_current_fp_external (int lev, int direction) const + // { + // return current_fp_external[lev][direction].get(); + // } + + // [[nodiscard]] amrex::MultiFab* + // get_pointer_electron_pressure_fp (int lev) const + // { + // return electron_pressure_fp[lev].get(); + // } /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index a7faa79ca60..9373ff2bdf2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -10,7 +11,7 @@ #include "HybridPICModel.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "WarpX.H" @@ -68,30 +69,8 @@ void HybridPICModel::ReadParameters () } } -void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - int lev, const BoxArray& ba, const DistributionMapping& dm, - const int ncomps, const IntVect& ngJ, const IntVect& ngRho, - const IntVect& jx_nodal_flag, - const IntVect& jy_nodal_flag, - const IntVect& jz_nodal_flag, - const IntVect& rho_nodal_flag) -void HybridPICModel::AllocateMFs (int nlevs_max) -{ - electron_pressure_fp.resize(nlevs_max); - rho_fp_temp.resize(nlevs_max); - current_fp_temp.resize(nlevs_max); - current_fp_ampere.resize(nlevs_max); - current_fp_external.resize(nlevs_max); - - if (m_add_external_fields) { - Bfield_hyb_external.resize(nlevs_max); - Efield_hyb_external.resize(nlevs_max); - // Bfield_hyb_self.resize(nlevs_max); - // Efield_hyb_self.resize(nlevs_max); - } -} - void HybridPICModel::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, int lev, const BoxArray& ba, const DistributionMapping& dm, const int ncomps, const IntVect& ngJ, const IntVect& ngRho, @@ -158,18 +137,24 @@ void HybridPICModel::AllocateLevelMFs ( if (m_add_external_fields) { // These are nodal to match when B-field is added in evaluation of Ohm's law - WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][0], amrex::convert(ba, Bx_nodal_flag), - dm, ncomps, ngB, lev, "Bfield_hyb_external[x]", 0.0_rt); - WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][1], amrex::convert(ba, By_nodal_flag), - dm, ncomps, ngB, lev, "Bfield_hyb_external[y]", 0.0_rt); - WarpX::AllocInitMultiFab(Bfield_hyb_external[lev][2], amrex::convert(ba, Bz_nodal_flag), - dm, ncomps, ngB, lev, "Bfield_hyb_external[z]", 0.0_rt); - WarpX::AllocInitMultiFab(Efield_hyb_external[lev][0], amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngE, lev, "Efield_hyb_external[x]", 0.0_rt); - WarpX::AllocInitMultiFab(Efield_hyb_external[lev][1], amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngE, lev, "Efield_hyb_external[y]", 0.0_rt); - WarpX::AllocInitMultiFab(Efield_hyb_external[lev][2], amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngE, lev, "Efield_hyb_external[z]", 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngE, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngE, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngE, 0.0_rt); } #ifdef WARPX_DIM_RZ @@ -179,21 +164,6 @@ void HybridPICModel::AllocateLevelMFs ( #endif } -void HybridPICModel::ClearLevel (int lev) -{ - electron_pressure_fp[lev].reset(); - rho_fp_temp[lev].reset(); - for (int i = 0; i < 3; ++i) { - current_fp_temp[lev][i].reset(); - current_fp_ampere[lev][i].reset(); - current_fp_external[lev][i].reset(); - if (m_add_external_fields) { - Bfield_hyb_external[lev][i].reset(); - Efield_hyb_external[lev][i].reset(); - } - } -} - void HybridPICModel::InitData () { m_resistivity_parser = std::make_unique( @@ -315,96 +285,60 @@ void HybridPICModel::InitData () // Initialize external current - note that this approach skips the check // if the current is time dependent which is what needs to be done to // write time independent fields on the first step. - for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - auto edge_lengths = std::array, 3>(); -#ifdef AMREX_USE_EB - if (EB::enabled()) { - using ablastr::fields::Direction; - auto const & edge_lengths_x = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); - auto const & edge_lengths_y = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); - auto const & edge_lengths_z = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); - - edge_lengths = std::array< std::unique_ptr, 3 >{ - std::make_unique( - edge_lengths_x, amrex::make_alias, 0, edge_lengths_x.nComp()), - std::make_unique( - edge_lengths_y, amrex::make_alias, 0, edge_lengths_y.nComp()), - std::make_unique( - edge_lengths_z, amrex::make_alias, 0, edge_lengths_z.nComp()) - }; - } -#endif - GetCurrentExternal(ablastr::fields::a2m(edge_lengths), lev); - GetFieldsExternal(ablastr::fields::a2m(edge_lengths), lev); - } + GetCurrentExternal(true); + GetFieldsExternal(warpx.gett_new(0)); } -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths) +void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) { - if (!m_external_field_has_time_dependence) { return; } + if (!skip_check && !m_external_field_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - GetExternalFieldFromExpression(current_fp_external[lev], m_J_external, edge_lengths[lev], lev); + GetExternalFieldFromExpression(FieldType::hybrid_current_fp_external, m_J_external, lev); } } -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev) -{ - if (!m_external_field_has_time_dependence) { return; } - GetExternalFieldFromExpression(current_fp_external[lev], m_J_external, edge_lengths, lev); -} - -void HybridPICModel::GetFieldsExternal ( - amrex::Vector, 3>> const& edge_lengths, - amrex::Real t) +void HybridPICModel::GetFieldsExternal (amrex::Real t) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - GetExternalFieldFromExpression(Bfield_hyb_external[lev], m_B_external, edge_lengths[lev], lev, t); - GetExternalFieldFromExpression(Efield_hyb_external[lev], m_E_external, edge_lengths[lev], lev, t); + GetExternalFieldFromExpression( + FieldType::hybrid_B_fp_external, + m_B_external, lev, t); + GetExternalFieldFromExpression( + FieldType::hybrid_E_fp_external, + m_E_external, lev, t); } } -void HybridPICModel::GetFieldsExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev) -{ - GetExternalFieldFromExpression(Bfield_hyb_external[lev], m_B_external, edge_lengths, lev); - GetExternalFieldFromExpression(Efield_hyb_external[lev], m_E_external, edge_lengths, lev); -} - void HybridPICModel::GetExternalFieldFromExpression ( - std::array< std::unique_ptr, 3> const& field, + FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, - std::array< std::unique_ptr, 3> const& edge_lengths, int lev) { auto & warpx = WarpX::GetInstance(); - auto t = warpx.gett_new(lev); - GetExternalFieldFromExpression(field, expression, edge_lengths, lev, t); + GetExternalFieldFromExpression(field_type, expression, lev, warpx.gett_new(lev)); } void HybridPICModel::GetExternalFieldFromExpression ( - std::array< std::unique_ptr, 3> const& field, + FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, - std::array< std::unique_ptr, 3> const& edge_lengths, int lev, amrex::Real t) { + using ablastr::fields::Direction; + // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser // except that the parsers include time dependence. auto & warpx = WarpX::GetInstance(); auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - auto& mfx = field[0]; - auto& mfy = field[1]; - auto& mfz = field[2]; + auto const& mfx = warpx.m_fields.get(field_type, Direction{0}, lev); + auto const& mfy = warpx.m_fields.get(field_type, Direction{1}, lev); + auto const& mfz = warpx.m_fields.get(field_type, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -427,9 +361,9 @@ void HybridPICModel::GetExternalFieldFromExpression ( amrex::Array4 lx, ly, lz; if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + lx = warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev)->array(mfi); + ly = warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev)->array(mfi); + lz = warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev)->array(mfi); } amrex::ParallelFor (tbx, tby, tbz, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 9f82d6c965d..97f436bfe76 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -23,6 +23,7 @@ #include using namespace amrex; +using warpx::fields::FieldType; void FiniteDifferenceSolver::CalculateCurrentAmpere ( ablastr::fields::VectorField & Jfield, @@ -423,8 +424,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; const bool include_external_fields = hybrid_model->m_add_external_fields; - auto const& Bfield_external = hybrid_model->Bfield_hyb_external[0]; // lev=0 - auto const& Efield_external = hybrid_model->Efield_hyb_external[0]; // lev=0 + + auto const& warpx = WarpX::GetInstance(); + ablastr::fields::ConstVectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + ablastr::fields::ConstVectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -762,8 +765,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; const bool include_external_fields = hybrid_model->m_add_external_fields; - auto const& Bfield_external = hybrid_model->Bfield_hyb_external[0]; // lev=0 - auto const& Efield_external = hybrid_model->Efield_hyb_external[0]; // lev=0 + + auto const& warpx = WarpX::GetInstance(); + ablastr::fields::ConstVectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + ablastr::fields::ConstVectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index e8605c25b2f..15f94c30f81 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -71,30 +72,27 @@ void WarpX::HybridPICEvolveFields () amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; - auto const& Bfield_hyb_external = m_hybrid_pic_model->Bfield_hyb_external; - auto const& Efield_hyb_external = m_hybrid_pic_model->Efield_hyb_external; // Handle field splitting for Hybrid field push if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->GetFieldsExternal(m_edge_lengths, t_eval); + m_hybrid_pic_model->GetFieldsExternal(t_eval); // If using split fields, subtract the external field at the old time for (int lev = 0; lev <= finest_level; ++lev) { for (int idim = 0; idim < 3; ++idim) { MultiFab::Subtract( - *Bfield_fp[lev][idim], - *Bfield_hyb_external[lev][idim], + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), 0, 0, 1, - Bfield_hyb_external[lev][idim]->nGrowVect()); + m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); } } FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); } // Get the external current - m_hybrid_pic_model->GetCurrentExternal( - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_hybrid_pic_model->GetCurrentExternal(); // Reference hybrid-PIC multifabs ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); @@ -157,7 +155,7 @@ void WarpX::HybridPICEvolveFields () if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->GetFieldsExternal(m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), t_eval); + m_hybrid_pic_model->GetFieldsExternal(t_eval); } // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities @@ -212,21 +210,21 @@ void WarpX::HybridPICEvolveFields () // Handle field splitting for Hybrid field push if (add_external_fields) { - m_hybrid_pic_model->GetFieldsExternal(m_edge_lengths, t_eval); + m_hybrid_pic_model->GetFieldsExternal(t_eval); // If using split fields, add the external field at the new time for (int lev = 0; lev <= finest_level; ++lev) { for (int idim = 0; idim < 3; ++idim) { MultiFab::Add( - *Bfield_fp[lev][idim], - *Bfield_hyb_external[lev][idim], + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), 0, 0, 1, - Bfield_hyb_external[lev][idim]->nGrowVect()); + m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); MultiFab::Add( - *Efield_fp[lev][idim], - *Efield_hyb_external[lev][idim], - 0, 0, 1, - Efield_hyb_external[lev][idim]->nGrowVect()); + *m_fields.get(FieldType::Efield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev)->nGrowVect()); } } FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); diff --git a/Source/Fields.H b/Source/Fields.H index 0aa3cbdd0c0..8b753bc1008 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -45,6 +45,8 @@ namespace warpx::fields hybrid_current_fp_temp, hybrid_current_fp_ampere, hybrid_current_fp_external, + hybrid_B_fp_external, + hybrid_E_fp_external, Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level @@ -97,6 +99,8 @@ namespace warpx::fields FieldType::hybrid_current_fp_temp, FieldType::hybrid_current_fp_ampere, FieldType::hybrid_current_fp_external, + FieldType::hybrid_B_fp_external, + FieldType::hybrid_E_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, From 18ae509da902b294672c6777ec22618b9f703ac2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 00:08:52 +0000 Subject: [PATCH 12/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Python/pywarpx/fields.py | 2 ++ Python/pywarpx/picmi.py | 6 ++++-- .../FiniteDifferenceSolver.H | 2 +- .../HybridPICModel/HybridPICModel.H | 4 ++-- Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp | 14 +++++++------- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 31dde3d8a4c..33b6329c9ca 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -703,6 +703,7 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): mf_name="Bfield_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) + def ExHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="Efield_hyb_external[x]", level=level, include_ghosts=include_ghosts @@ -738,6 +739,7 @@ def BzHybridExternalWrapper(level=0, include_ghosts=False): mf_name="Bfield_hyb_external[z]", level=level, include_ghosts=include_ghosts ) + def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 390523cfd1e..fb9662510ce 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1834,12 +1834,14 @@ def __init__( self.By_external_function = By_expression self.Bz_external_function = Bz_expression - if (Ex_expression is not None + if ( + Ex_expression is not None or Ey_expression is not None or Ez_expression is not None or Bx_expression is not None or By_expression is not None - or Bz_expression is not None): + or Bz_expression is not None + ): self.add_external_fields = True # Handle keyword arguments used in expressions diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 488c73d84a0..11abeedf90a 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -4,7 +4,7 @@ * * Authors: Remi Lehe (LBNL) * S. Eric Clark (Helion Energy) - * + * * License: BSD-3-Clause-LBNL */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 1b5ccd1bb74..3d8f1448612 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -53,7 +53,7 @@ public: const int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, - const amrex::IntVect& ngE, + const amrex::IntVect& ngE, const amrex::IntVect& ngB, const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, @@ -83,7 +83,7 @@ public: warpx::fields::FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, int lev); - + void GetExternalFieldFromExpression ( warpx::fields::FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 15f94c30f81..29d8bebda40 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -67,7 +67,7 @@ void WarpX::HybridPICEvolveFields () // Get requested number of substeps to use const int sub_steps = m_hybrid_pic_model->m_substeps; - + amrex::Real t_eval = gett_old(0); amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; @@ -83,8 +83,8 @@ void WarpX::HybridPICEvolveFields () for (int idim = 0; idim < 3; ++idim) { MultiFab::Subtract( *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), - *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), - 0, 0, 1, + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); } } @@ -217,13 +217,13 @@ void WarpX::HybridPICEvolveFields () for (int idim = 0; idim < 3; ++idim) { MultiFab::Add( *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), - *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), - 0, 0, 1, + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); MultiFab::Add( *m_fields.get(FieldType::Efield_fp, Direction{idim}, lev), - *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), - 0, 0, 1, + *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), + 0, 0, 1, m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev)->nGrowVect()); } } From 71116c719afe39f7e21b166a0f1fb25580cae9f1 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:31:54 -0700 Subject: [PATCH 13/86] Fixing hybrid multifab wrapper sin PICMI. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Python/pywarpx/fields.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 31dde3d8a4c..bcd71a9f11a 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -705,37 +705,37 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): def ExHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_hyb_external[x]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def EyHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_hyb_external[y]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def EzHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_hyb_external[z]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def BxHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_hyb_external[x]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def ByHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_hyb_external[y]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def BzHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_hyb_external[z]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPWrapper(level=0, include_ghosts=False): From bc127a81f38d4200176e6ebe79b1bfbd6f3da444 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:32:32 +0000 Subject: [PATCH 14/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Python/pywarpx/fields.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index eefc213ced7..ffcd09fb4a3 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -706,37 +706,55 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): def ExHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_E_fp_external", idir=0, level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, ) def EyHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_E_fp_external", idir=1, level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, ) def EzHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_E_fp_external", idir=2, level=level, include_ghosts=include_ghosts + mf_name="hybrid_E_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, ) def BxHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_B_fp_external", idir=0, level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, ) def ByHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_B_fp_external", idir=1, level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, ) def BzHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_B_fp_external", idir=2, level=level, include_ghosts=include_ghosts + mf_name="hybrid_B_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, ) From 8d39c5218863b9be40e22df0b7e9bafbeed7a530 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:43:07 -0700 Subject: [PATCH 15/86] Fixing some BC issues. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICModel/HybridPICModel.cpp | 18 ++++++++++++++---- .../FiniteDifferenceSolver/HybridPICSolveE.cpp | 2 ++ .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 5 ++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 9373ff2bdf2..53d9494c67f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -189,7 +189,6 @@ void HybridPICModel::InitData () } auto & warpx = WarpX::GetInstance(); - const auto& mypc = warpx.GetPartContainer(); m_B_external_parser[0] = std::make_unique( utils::parser::makeParser(m_Bx_ext_grid_function,{"x","y","z","t"})); @@ -302,7 +301,9 @@ void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) void HybridPICModel::GetFieldsExternal (amrex::Real t) { + using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { GetExternalFieldFromExpression( @@ -311,6 +312,12 @@ void HybridPICModel::GetFieldsExternal (amrex::Real t) GetExternalFieldFromExpression( FieldType::hybrid_E_fp_external, m_E_external, lev, t); + for (int idim=0; idim < 3; idim++) { + auto mf_Bext = warpx.m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev); + mf_Bext->FillBoundary(warpx.Geom(lev).periodicity()); + auto mf_Eext = warpx.m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev); + mf_Eext->FillBoundary(warpx.Geom(lev).periodicity()); + } } } @@ -336,9 +343,11 @@ void HybridPICModel::GetExternalFieldFromExpression ( auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - auto const& mfx = warpx.m_fields.get(field_type, Direction{0}, lev); - auto const& mfy = warpx.m_fields.get(field_type, Direction{1}, lev); - auto const& mfz = warpx.m_fields.get(field_type, Direction{2}, lev); + ablastr::fields::VectorField field = warpx.m_fields.get_alldirs(field_type, lev); + + amrex::MultiFab* mfx = field[0]; + amrex::MultiFab* mfy = field[1]; + amrex::MultiFab* mfz = field[2]; const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -449,6 +458,7 @@ void HybridPICModel::GetExternalFieldFromExpression ( } ); } + amrex::Gpu::streamSynchronize(); } void HybridPICModel::CalculateCurrentAmpere ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 97f436bfe76..e26398ac873 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -387,6 +387,8 @@ void FiniteDifferenceSolver::HybridPICSolveE ( amrex::Abort(Utils::TextMsg::Err( "HybridSolveE: The hybrid-PIC electromagnetic solver algorithm must be used")); } + auto& warpx = WarpX::GetInstance(); + warpx.ApplyEfieldBoundary(lev, PatchType::fine); } #ifdef WARPX_DIM_RZ diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 29d8bebda40..97afb756d1d 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -88,7 +88,6 @@ void WarpX::HybridPICEvolveFields () m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); } } - FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); } // Get the external current @@ -208,6 +207,8 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), t_eval, false ); + FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + // Handle field splitting for Hybrid field push if (add_external_fields) { m_hybrid_pic_model->GetFieldsExternal(t_eval); @@ -227,9 +228,7 @@ void WarpX::HybridPICEvolveFields () m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev)->nGrowVect()); } } - FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); } - FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); // Copy the rho^{n+1} values to rho_fp_temp and the J_i^{n+1/2} values to // current_fp_temp since at the next step those values will be needed as From a546023bfa9431739697c27de260bc2c09f2725b Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:21:12 -0700 Subject: [PATCH 16/86] Fixed segfault when accessing edge_lengths MF during loading of analytical field values into ghost cells. The E/B fields and edge_lengths have different numbers of ghost cells. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICModel/HybridPICModel.H | 3 +- .../HybridPICModel/HybridPICModel.cpp | 34 +++++++------ .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 50 +++++++++---------- Source/WarpX.cpp | 2 +- 4 files changed, 45 insertions(+), 44 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 3d8f1448612..ee75c695c43 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -53,8 +53,7 @@ public: const int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, - const amrex::IntVect& ngE, - const amrex::IntVect& ngB, + const amrex::IntVect& ngEB, const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, const amrex::IntVect& jz_nodal_flag, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 53d9494c67f..b9fef6bb195 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -74,7 +74,7 @@ void HybridPICModel::AllocateLevelMFs ( int lev, const BoxArray& ba, const DistributionMapping& dm, const int ncomps, const IntVect& ngJ, const IntVect& ngRho, - const IntVect& ngE, const IntVect& ngB, + const IntVect& ngEB, const IntVect& jx_nodal_flag, const IntVect& jy_nodal_flag, const IntVect& jz_nodal_flag, @@ -139,22 +139,22 @@ void HybridPICModel::AllocateLevelMFs ( // These are nodal to match when B-field is added in evaluation of Ohm's law fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), - dm, ncomps, ngB, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), - dm, ncomps, ngB, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), - dm, ncomps, ngB, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngE, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngE, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngE, 0.0_rt); + dm, ncomps, ngEB, 0.0_rt); } #ifdef WARPX_DIM_RZ @@ -368,17 +368,19 @@ void HybridPICModel::GetExternalFieldFromExpression ( auto const& mfyfab = mfy->array(mfi); auto const& mfzfab = mfz->array(mfi); - amrex::Array4 lx, ly, lz; - if (EB::enabled()) { - lx = warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev)->array(mfi); - ly = warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev)->array(mfi); - lz = warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev)->array(mfi); - } + // I am not sure this is needed. There are some guard cell matching issues with how the edges are computed. + // Since I want to fill in entire box + guard cells I can skip these checks. + // amrex::Array4 lx, ly, lz; + // if (EB::enabled()) { + // lx = warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev)->array(mfi); + // ly = warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev)->array(mfi); + // lz = warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev)->array(mfi); + // } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary - if (lx && lx(i, j, k) <= 0) { return; } + //if (EB::enabled() && lx(i, j, k) <= 0) { return; } // Shift required in the x-, y-, or z- position // depending on the index type of the multifab @@ -406,7 +408,7 @@ void HybridPICModel::GetExternalFieldFromExpression ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary - if (ly && ly(i, j, k) <= 0) { return; } + // if (ly && ly(i, j, k) <= 0) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; @@ -432,7 +434,7 @@ void HybridPICModel::GetExternalFieldFromExpression ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary - if (lz && lz(i, j, k) <= 0) { return; } + // if (lz && lz(i, j, k) <= 0) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 97afb756d1d..0c7d758fc35 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -34,6 +34,31 @@ void WarpX::HybridPICEvolveFields () finest_level == 0, "Ohm's law E-solve only works with a single level."); + // Get requested number of substeps to use + const int sub_steps = m_hybrid_pic_model->m_substeps; + + amrex::Real t_eval = gett_old(0); + amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; + + const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // Get the external fields + m_hybrid_pic_model->GetFieldsExternal(t_eval); + + // If using split fields, subtract the external field at the old time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Subtract( + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); + } + } + } + // The particles have now been pushed to their t_{n+1} positions. // Perform charge deposition in component 0 of rho_fp at t_{n+1}. mypc->DepositCharge(m_fields.get_mr_levels(FieldType::rho_fp, finest_level), 0._rt); @@ -65,31 +90,6 @@ void WarpX::HybridPICEvolveFields () } } - // Get requested number of substeps to use - const int sub_steps = m_hybrid_pic_model->m_substeps; - - amrex::Real t_eval = gett_old(0); - amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; - - const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; - - // Handle field splitting for Hybrid field push - if (add_external_fields) { - // Get the external fields - m_hybrid_pic_model->GetFieldsExternal(t_eval); - - // If using split fields, subtract the external field at the old time - for (int lev = 0; lev <= finest_level; ++lev) { - for (int idim = 0; idim < 3; ++idim) { - MultiFab::Subtract( - *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), - *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), - 0, 0, 1, - m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); - } - } - } - // Get the external current m_hybrid_pic_model->GetCurrentExternal(); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index ea58d6226b0..88b6e1d28c1 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -2243,7 +2243,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm { m_hybrid_pic_model->AllocateLevelMFs( m_fields, - lev, ba, dm, ncomps, ngJ, ngRho, ngEB, ngEB, jx_nodal_flag, jy_nodal_flag, + lev, ba, dm, ncomps, ngJ, ngRho, ngEB, jx_nodal_flag, jy_nodal_flag, jz_nodal_flag, rho_nodal_flag, Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag ); From e31b6397d74e8ab662a1a9968134bcbf6954954c Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 26 Sep 2024 15:51:58 -0700 Subject: [PATCH 17/86] Adding checks to make sure Hybrid external field loading is skipped when not enabled. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index b9fef6bb195..8360dda6331 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -285,7 +285,8 @@ void HybridPICModel::InitData () // if the current is time dependent which is what needs to be done to // write time independent fields on the first step. GetCurrentExternal(true); - GetFieldsExternal(warpx.gett_new(0)); + if (m_add_external_fields) + GetFieldsExternal(warpx.gett_new(0)); } void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) @@ -301,6 +302,8 @@ void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) void HybridPICModel::GetFieldsExternal (amrex::Real t) { + if (!m_add_external_fields) return; + using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); From 2ca719037c8b6090bed2d87e224a81b264c2fe28 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:11:04 -0700 Subject: [PATCH 18/86] Adding appropriate flags to mask embedded boundary properly during external data loading. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICModel/HybridPICModel.cpp | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 8360dda6331..f5d1277f584 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -371,19 +371,27 @@ void HybridPICModel::GetExternalFieldFromExpression ( auto const& mfyfab = mfy->array(mfi); auto const& mfzfab = mfz->array(mfi); - // I am not sure this is needed. There are some guard cell matching issues with how the edges are computed. - // Since I want to fill in entire box + guard cells I can skip these checks. - // amrex::Array4 lx, ly, lz; - // if (EB::enabled()) { - // lx = warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev)->array(mfi); - // ly = warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev)->array(mfi); - // lz = warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev)->array(mfi); - // } + amrex::Box lxb, lyb, lzb; + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + + auto const& mf_lx = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); + auto const& mf_ly = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); + auto const& mf_lz = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); + + lxb = mfi.growntilebox(mf_lx.nGrowVect()); + lyb = mfi.growntilebox(mf_ly.nGrowVect()); + lzb = mfi.growntilebox(mf_lz.nGrowVect()); + + lx = mf_lx.array(mfi); + ly = mf_ly.array(mfi); + lz = mf_lz.array(mfi); + } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - //if (EB::enabled() && lx(i, j, k) <= 0) { return; } + // skip if node is covered by an embedded boundary or outside of lx box array + if (lx && (!lxb.contains({i, j, k}) || lx(i, j, k) <= 0)) { return; } // Shift required in the x-, y-, or z- position // depending on the index type of the multifab @@ -410,8 +418,8 @@ void HybridPICModel::GetExternalFieldFromExpression ( mfxfab(i,j,k) = x_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - // if (ly && ly(i, j, k) <= 0) { return; } + // skip if node is covered by an embedded boundary or outside of ly box array + if (ly && (!lyb.contains({i, j, k}) || ly(i, j, k) <= 0)) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; @@ -436,8 +444,8 @@ void HybridPICModel::GetExternalFieldFromExpression ( mfyfab(i,j,k) = y_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - // if (lz && lz(i, j, k) <= 0) { return; } + // skip if node is covered by an embedded boundary or outside of lz box array + if (lz && (!lzb.contains({i, j, k}) || lz(i, j, k) <= 0)) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; From f1bcd35a745d1bea63fc6e063c46eb8671c50683 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 22 Oct 2024 10:45:51 -0700 Subject: [PATCH 19/86] Updating so time varying vector potential A is initialized, then B is computed by curlA and E is computed by computing numerical derivative of seperable time component. --- Python/pywarpx/HybridPICModel.py | 1 + Python/pywarpx/WarpX.py | 3 +- Python/pywarpx/__init__.py | 2 +- Python/pywarpx/fields.py | 26 ++++++ Python/pywarpx/picmi.py | 68 ++++++-------- .../FiniteDifferenceSolver/CMakeLists.txt | 1 + .../FiniteDifferenceSolver.H | 31 +++++++ .../HybridPICModel/CMakeLists.txt | 1 + .../HybridPICModel/HybridPICModel.H | 47 +--------- .../HybridPICModel/HybridPICModel.cpp | 90 ++++--------------- .../HybridPICModel/Make.package | 1 + .../HybridPICSolveE.cpp | 13 ++- .../FiniteDifferenceSolver/Make.package | 1 + .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 6 +- Source/Fields.H | 1 + 15 files changed, 118 insertions(+), 174 deletions(-) diff --git a/Python/pywarpx/HybridPICModel.py b/Python/pywarpx/HybridPICModel.py index 7bd8c961950..c0de692ef71 100644 --- a/Python/pywarpx/HybridPICModel.py +++ b/Python/pywarpx/HybridPICModel.py @@ -9,3 +9,4 @@ from .Bucket import Bucket hybridpicmodel = Bucket("hybrid_pic_model") +external_vector_potential = Bucket("external_vector_potential") \ No newline at end of file diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 9ef7019cda9..ed40919945c 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -20,7 +20,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry -from .HybridPICModel import hybridpicmodel +from .HybridPICModel import hybridpicmodel, external_vector_potential from .Interpolation import interpolation from .Lasers import lasers, lasers_list from .Particles import particles, particles_list @@ -46,6 +46,7 @@ def create_argv_list(self, **kw): argv += amrex.attrlist() argv += geometry.attrlist() argv += hybridpicmodel.attrlist() + argv += external_vector_potential.attrlist() argv += boundary.attrlist() argv += algo.attrlist() argv += interpolation.attrlist() diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 054ca451756..b8e025342dd 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -33,7 +33,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics # noqa from .EB2 import eb2 # noqa from .Geometry import geometry # noqa -from .HybridPICModel import hybridpicmodel # noqa +from .HybridPICModel import hybridpicmodel, external_vector_potential # noqa from .Interpolation import interpolation # noqa from .Lasers import lasers # noqa from .LoadThirdParty import load_cupy # noqa diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index ffcd09fb4a3..0bc8ad10a27 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -704,6 +704,32 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): ) +def AxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def AyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def AzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + def ExHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="hybrid_E_fp_external", diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index fb9662510ce..67f051aa96e 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1800,12 +1800,10 @@ def __init__( Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, - Ex_expression=None, - Ey_expression=None, - Ez_expression=None, - Bx_expression=None, - By_expression=None, - Bz_expression=None, + Ax_external_function=None, + Ay_external_function=None, + Az_external_function=None, + A_time_external_function=None, **kw, ): self.grid = grid @@ -1826,24 +1824,19 @@ def __init__( self.add_external_fields = None - self.Ex_external_function = Ex_expression - self.Ey_external_function = Ey_expression - self.Ez_external_function = Ez_expression - - self.Bx_external_function = Bx_expression - self.By_external_function = By_expression - self.Bz_external_function = Bz_expression + self.Ax_external_function = Ax_external_function + self.Ay_external_function = Ay_external_function + self.Az_external_function = Az_external_function if ( - Ex_expression is not None - or Ey_expression is not None - or Ez_expression is not None - or Bx_expression is not None - or By_expression is not None - or Bz_expression is not None + Ax_external_function is not None + or Ay_external_function is not None + or Az_external_function is not None ): self.add_external_fields = True + self.A_time_external_function = A_time_external_function + # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): @@ -1893,44 +1886,33 @@ def solver_initialize_inputs(self): ), ) pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields - pywarpx.hybridpicmodel.__setattr__( - "Bx_external_grid_function(x,y,z,t)", + pywarpx.external_vector_potential.__setattr__( + "Ax_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - self.Bx_external_function, self.mangle_dict + self.Ax_external_function, self.mangle_dict ), ) - pywarpx.hybridpicmodel.__setattr__( - "By_external_grid_function(x,y,z,t)", + pywarpx.external_vector_potential.__setattr__( + "Ay_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - self.By_external_function, self.mangle_dict + self.Ay_external_function, self.mangle_dict ), ) - pywarpx.hybridpicmodel.__setattr__( - "Bz_external_grid_function(x,y,z,t)", + pywarpx.external_vector_potential.__setattr__( + "Az_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - self.Bz_external_function, self.mangle_dict + self.Az_external_function, self.mangle_dict ), ) - pywarpx.hybridpicmodel.__setattr__( - "Ex_external_grid_function(x,y,z,t)", + pywarpx.external_vector_potential.__setattr__( + "A_time_external_function(t)", pywarpx.my_constants.mangle_expression( - self.Ex_external_function, self.mangle_dict - ), - ) - pywarpx.hybridpicmodel.__setattr__( - "Ey_external_grid_function(x,y,z,t)", - pywarpx.my_constants.mangle_expression( - self.Ey_external_function, self.mangle_dict - ), - ) - pywarpx.hybridpicmodel.__setattr__( - "Ez_external_grid_function(x,y,z,t)", - pywarpx.my_constants.mangle_expression( - self.Ez_external_function, self.mangle_dict + self.A_time_external_function, self.mangle_dict ), ) + class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): """ See `Input Parameters `__ for more information. diff --git a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt index 19c2092d1a6..7539d706632 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt @@ -3,6 +3,7 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE ComputeDivE.cpp + ComputeCurlA.cpp EvolveB.cpp EvolveBPML.cpp EvolveE.cpp diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 11abeedf90a..0e762d3593d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -182,6 +182,21 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& edge_lengths, int lev ); + /** + * \brief Calculation of B field from the vector potential A + * B = (curl x A) / mu0. + * + * \param[out] Bfield vector of current MultiFabs at a given level + * \param[in] Afield vector of magnetic field MultiFabs at a given level + * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] lev level number for the calculation + */ + void ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + ablastr::fields::VectorField const& edge_lengths, + int lev ); + private: ElectromagneticSolverAlgo m_fdtd_algo; @@ -262,6 +277,14 @@ class FiniteDifferenceSolver int lev ); + template + void ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + ablastr::fields::VectorField const& edge_lengths, + int lev + ); + #else template< typename T_Algo > void EvolveBCartesian ( @@ -367,6 +390,14 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& edge_lengths, int lev ); + + template + void ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + ablastr::fields::VectorField const& edge_lengths, + int lev + ); #endif }; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt index 1367578b0aa..bb29baefcb9 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt @@ -3,5 +3,6 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE HybridPICModel.cpp + ExternalVectorPotential.cpp ) endforeach() diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index ee75c695c43..e54df07f869 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -15,6 +15,7 @@ #include "Fields.H" +#include "ExternalVectorPotential.H" #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" @@ -214,53 +215,11 @@ public: std::string m_Jz_ext_grid_function = "0.0"; std::array< std::unique_ptr, 3> m_J_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_J_external; - bool m_external_field_has_time_dependence = false; + bool m_external_current_has_time_dependence = false; /** External E/B fields */ bool m_add_external_fields = false; - - std::string m_Bx_ext_grid_function = "0.0"; - std::string m_By_ext_grid_function = "0.0"; - std::string m_Bz_ext_grid_function = "0.0"; - std::array< std::unique_ptr, 3> m_B_external_parser; - std::array< amrex::ParserExecutor<4>, 3> m_B_external; - - std::string m_Ex_ext_grid_function = "0.0"; - std::string m_Ey_ext_grid_function = "0.0"; - std::string m_Ez_ext_grid_function = "0.0"; - std::array< std::unique_ptr, 3> m_E_external_parser; - std::array< amrex::ParserExecutor<4>, 3> m_E_external; - - // Declare multifabs specifically needed for the hybrid-PIC model - // amrex::Vector< std::unique_ptr > rho_fp_temp; - // amrex::Vector, 3 > > current_fp_temp; - // amrex::Vector, 3 > > current_fp_ampere; - // amrex::Vector, 3 > > current_fp_external; - // amrex::Vector< std::unique_ptr > electron_pressure_fp; - - // amrex::Vector, 3 > > Bfield_hyb_external; - // amrex::Vector, 3 > > Efield_hyb_external; - // amrex::Vector, 3 > > Bfield_hyb_self; - // amrex::Vector, 3 > > Efield_hyb_self; - - // // Helper functions to retrieve hybrid-PIC multifabs - // [[nodiscard]] amrex::MultiFab* - // get_pointer_current_fp_ampere (int lev, int direction) const - // { - // return current_fp_ampere[lev][direction].get(); - // } - - // [[nodiscard]] amrex::MultiFab* - // get_pointer_current_fp_external (int lev, int direction) const - // { - // return current_fp_external[lev][direction].get(); - // } - - // [[nodiscard]] amrex::MultiFab* - // get_pointer_electron_pressure_fp (int lev) const - // { - // return electron_pressure_fp[lev].get(); - // } + std::unique_ptr m_external_EB; /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 8360dda6331..0b71cd3d306 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -13,6 +13,7 @@ #include "EmbeddedBoundary/Enabled.H" #include "Fields.H" #include "Particles/MultiParticleContainer.H" +#include "ExternalVectorPotential.H" #include "WarpX.H" using namespace amrex; @@ -60,12 +61,7 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("add_external_fields", m_add_external_fields); if (m_add_external_fields) { - pp_hybrid.query("Bx_external_grid_function(x,y,z,t)", m_Bx_ext_grid_function); - pp_hybrid.query("By_external_grid_function(x,y,z,t)", m_By_ext_grid_function); - pp_hybrid.query("Bz_external_grid_function(x,y,z,t)", m_Bz_ext_grid_function); - pp_hybrid.query("Ex_external_grid_function(x,y,z,t)", m_Ex_ext_grid_function); - pp_hybrid.query("Ey_external_grid_function(x,y,z,t)", m_Ey_ext_grid_function); - pp_hybrid.query("Ez_external_grid_function(x,y,z,t)", m_Ez_ext_grid_function); + m_external_EB = std::make_unique(); } } @@ -136,25 +132,13 @@ void HybridPICModel::AllocateLevelMFs ( dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); if (m_add_external_fields) { - // These are nodal to match when B-field is added in evaluation of Ohm's law - fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, - lev, amrex::convert(ba, Bx_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, - lev, amrex::convert(ba, By_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, - lev, amrex::convert(ba, Bz_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, - lev, amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, - lev, amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, - lev, amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); + m_external_EB->AllocateLevelMFs( + fields, + lev, ba, dm, + ncomps, ngEB, + Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag + ); } #ifdef WARPX_DIM_RZ @@ -185,30 +169,10 @@ void HybridPICModel::InitData () // check if the external current parsers depend on time for (int i=0; i<3; i++) { const std::set J_ext_symbols = m_J_external_parser[i]->symbols(); - m_external_field_has_time_dependence += J_ext_symbols.count("t"); + m_external_current_has_time_dependence += J_ext_symbols.count("t"); } auto & warpx = WarpX::GetInstance(); - - m_B_external_parser[0] = std::make_unique( - utils::parser::makeParser(m_Bx_ext_grid_function,{"x","y","z","t"})); - m_B_external_parser[1] = std::make_unique( - utils::parser::makeParser(m_By_ext_grid_function,{"x","y","z","t"})); - m_B_external_parser[2] = std::make_unique( - utils::parser::makeParser(m_Bz_ext_grid_function,{"x","y","z","t"})); - m_B_external[0] = m_B_external_parser[0]->compile<4>(); - m_B_external[1] = m_B_external_parser[1]->compile<4>(); - m_B_external[2] = m_B_external_parser[2]->compile<4>(); - - m_E_external_parser[0] = std::make_unique( - utils::parser::makeParser(m_Ex_ext_grid_function,{"x","y","z","t"})); - m_E_external_parser[1] = std::make_unique( - utils::parser::makeParser(m_Ey_ext_grid_function,{"x","y","z","t"})); - m_E_external_parser[2] = std::make_unique( - utils::parser::makeParser(m_Ez_ext_grid_function,{"x","y","z","t"})); - m_E_external[0] = m_E_external_parser[0]->compile<4>(); - m_E_external[1] = m_E_external_parser[1]->compile<4>(); - m_E_external[2] = m_E_external_parser[2]->compile<4>(); using ablastr::fields::Direction; // Get the grid staggering of the fields involved in calculating E @@ -286,12 +250,12 @@ void HybridPICModel::InitData () // write time independent fields on the first step. GetCurrentExternal(true); if (m_add_external_fields) - GetFieldsExternal(warpx.gett_new(0)); + m_external_EB->InitData(); } void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) { - if (!skip_check && !m_external_field_has_time_dependence) { return; } + if (!skip_check && !m_external_current_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -300,37 +264,13 @@ void HybridPICModel::GetCurrentExternal (bool skip_check /*=false*/) } } -void HybridPICModel::GetFieldsExternal (amrex::Real t) -{ - if (!m_add_external_fields) return; - - using ablastr::fields::Direction; - auto& warpx = WarpX::GetInstance(); - - for (int lev = 0; lev <= warpx.finestLevel(); ++lev) - { - GetExternalFieldFromExpression( - FieldType::hybrid_B_fp_external, - m_B_external, lev, t); - GetExternalFieldFromExpression( - FieldType::hybrid_E_fp_external, - m_E_external, lev, t); - for (int idim=0; idim < 3; idim++) { - auto mf_Bext = warpx.m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev); - mf_Bext->FillBoundary(warpx.Geom(lev).periodicity()); - auto mf_Eext = warpx.m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev); - mf_Eext->FillBoundary(warpx.Geom(lev).periodicity()); - } - } -} - void HybridPICModel::GetExternalFieldFromExpression ( FieldType field_type, std::array< amrex::ParserExecutor<4>, 3> const& expression, - int lev) + int lev) { - auto & warpx = WarpX::GetInstance(); - GetExternalFieldFromExpression(field_type, expression, lev, warpx.gett_new(lev)); + auto& warpx = WarpX::GetInstance(); + GetExternalFieldFromExpression(FieldType::hybrid_current_fp_external, m_J_external, lev, warpx.gett_new(0)); } void HybridPICModel::GetExternalFieldFromExpression ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package index 8145cfcef2f..d4fa9bfc390 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package @@ -1,3 +1,4 @@ CEXE_sources += HybridPICModel.cpp +CEXE_sources += ExternalVectorPotential.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index e26398ac873..5f361ab4632 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -489,10 +489,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Br = Bfield[0]->const_array(mfi); Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); - - Array4 const& Br_ext = Bfield_external[0]->const_array(mfi); - Array4 const& Bt_ext = Bfield_external[1]->const_array(mfi); - Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); + Array4 const& Br_ext = Bfield_external[0]->const_array(mfi); + Array4 const& Bt_ext = Bfield_external[1]->const_array(mfi); + Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ @@ -557,15 +556,15 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Er = Efield[0]->array(mfi); Array4 const& Et = Efield[1]->array(mfi); Array4 const& Ez = Efield[2]->array(mfi); - Array4 const& Er_ext = Efield_external[0]->const_array(mfi); - Array4 const& Et_ext = Efield_external[1]->const_array(mfi); - Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); Array4 const& Jr = Jfield[0]->const_array(mfi); Array4 const& Jt = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); Array4 const& enE = enE_nodal_mf.const_array(mfi); Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.const_array(mfi); + Array4 const& Er_ext = Efield_external[0]->const_array(mfi); + Array4 const& Et_ext = Efield_external[1]->const_array(mfi); + Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); amrex::Array4 lr, lz; if (EB::enabled()) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/Make.package index b3708c411fa..bc71b9b51a2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/Make.package @@ -5,6 +5,7 @@ CEXE_sources += EvolveF.cpp CEXE_sources += EvolveG.cpp CEXE_sources += EvolveECTRho.cpp CEXE_sources += ComputeDivE.cpp +CEXE_sources += ComputeCurlA.cpp CEXE_sources += MacroscopicEvolveE.cpp CEXE_sources += EvolveBPML.cpp CEXE_sources += EvolveEPML.cpp diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 0c7d758fc35..48efbc5e879 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -45,7 +45,7 @@ void WarpX::HybridPICEvolveFields () // Handle field splitting for Hybrid field push if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->GetFieldsExternal(t_eval); + m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); // If using split fields, subtract the external field at the old time for (int lev = 0; lev <= finest_level; ++lev) { @@ -154,7 +154,7 @@ void WarpX::HybridPICEvolveFields () if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->GetFieldsExternal(t_eval); + m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); } // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities @@ -211,7 +211,7 @@ void WarpX::HybridPICEvolveFields () // Handle field splitting for Hybrid field push if (add_external_fields) { - m_hybrid_pic_model->GetFieldsExternal(t_eval); + m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); // If using split fields, add the external field at the new time for (int lev = 0; lev <= finest_level; ++lev) { diff --git a/Source/Fields.H b/Source/Fields.H index d470ac33e43..05125e74a1e 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -52,6 +52,7 @@ namespace warpx::fields hybrid_current_fp_external, hybrid_B_fp_external, hybrid_E_fp_external, + hybrid_A_fp_external, Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level From 15caad1431dd5a150242b0944e1c5bff8573e325 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:44:58 -0700 Subject: [PATCH 20/86] Ironing out a few merge issues. --- .../HybridPICModel/HybridPICModel.H | 2 +- .../HybridPICModel/HybridPICModel.cpp | 10 +++++++--- Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp | 6 +++--- Source/Fields.H | 7 ++++--- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index f44f29ff93e..12ea01d216b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -207,7 +207,7 @@ public: /** External E/B fields */ bool m_add_external_fields = false; - std::unique_ptr m_external_EB; + std::unique_ptr m_external_vector_potential; /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index d7189a808a1..03c22807969 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -61,7 +61,7 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("add_external_fields", m_add_external_fields); if (m_add_external_fields) { - m_external_EB = std::make_unique(); + m_external_vector_potential = std::make_unique(); } } @@ -133,7 +133,7 @@ void HybridPICModel::AllocateLevelMFs ( dm, ncomps, IntVect(1), 0.0_rt); if (m_add_external_fields) { - m_external_EB->AllocateLevelMFs( + m_external_vector_potential->AllocateLevelMFs( fields, lev, ba, dm, ncomps, ngEB, @@ -259,11 +259,15 @@ void HybridPICModel::InitData () warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); } + + if (m_add_external_fields) { + m_external_vector_potential->InitData(); + } } void HybridPICModel::GetCurrentExternal () { - if (!skip_check && !m_external_current_has_time_dependence) { return; } + if (!m_external_current_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index bce5df48dc4..6db14e682dc 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -45,7 +45,7 @@ void WarpX::HybridPICEvolveFields () // Handle field splitting for Hybrid field push if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); // If using split fields, subtract the external field at the old time for (int lev = 0; lev <= finest_level; ++lev) { @@ -154,7 +154,7 @@ void WarpX::HybridPICEvolveFields () if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); } // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities @@ -211,7 +211,7 @@ void WarpX::HybridPICEvolveFields () // Handle field splitting for Hybrid field push if (add_external_fields) { - m_hybrid_pic_model->m_external_EB->UpdateHybridExternalFields(t_eval, sub_dt); + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); // If using split fields, add the external field at the new time for (int lev = 0; lev <= finest_level; ++lev) { diff --git a/Source/Fields.H b/Source/Fields.H index 593aab36f5d..91e1e264a8d 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -50,9 +50,9 @@ namespace warpx::fields hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ - hybrid_B_fp_external, - hybrid_E_fp_external, - hybrid_A_fp_external, + hybrid_B_fp_external, /**< Used with Ohm's law solver. Stores external B field */ + hybrid_E_fp_external, /**< Used with Ohm's law solver. Stores external E field */ + hybrid_A_fp_external, /**< Used with Ohm's law solver. Stores external A field */ Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level @@ -107,6 +107,7 @@ namespace warpx::fields FieldType::hybrid_current_fp_external, FieldType::hybrid_B_fp_external, FieldType::hybrid_E_fp_external, + FieldType::hybrid_A_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, From 821250e8f7c31bfb3a144f274ceeac9d3b03dfcb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:45:46 +0000 Subject: [PATCH 21/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Python/pywarpx/HybridPICModel.py | 2 +- Python/pywarpx/WarpX.py | 2 +- Python/pywarpx/fields.py | 1 + Python/pywarpx/picmi.py | 1 - 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Python/pywarpx/HybridPICModel.py b/Python/pywarpx/HybridPICModel.py index c0de692ef71..f94f44ce931 100644 --- a/Python/pywarpx/HybridPICModel.py +++ b/Python/pywarpx/HybridPICModel.py @@ -9,4 +9,4 @@ from .Bucket import Bucket hybridpicmodel = Bucket("hybrid_pic_model") -external_vector_potential = Bucket("external_vector_potential") \ No newline at end of file +external_vector_potential = Bucket("external_vector_potential") diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index ed40919945c..9b0446bcc79 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -20,7 +20,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry -from .HybridPICModel import hybridpicmodel, external_vector_potential +from .HybridPICModel import external_vector_potential, hybridpicmodel from .Interpolation import interpolation from .Lasers import lasers, lasers_list from .Particles import particles, particles_list diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index d64db10cfbf..1eba1122a99 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -730,6 +730,7 @@ def AzHybridExternalWrapper(level=0, include_ghosts=False): include_ghosts=include_ghosts, ) + def ExHybridExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="hybrid_E_fp_external", diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 67f051aa96e..25bcaa8a90e 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1912,7 +1912,6 @@ def solver_initialize_inputs(self): ) - class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): """ See `Input Parameters `__ for more information. From ce9909603dc0fc6372a409db4a458d48f6e192e7 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:47:05 -0700 Subject: [PATCH 22/86] Fixing issues with initialization and boundary conditions. --- .../FiniteDifferenceSolver.H | 38 +++++++++---------- .../HybridPICModel/HybridPICModel.H | 9 ++--- .../HybridPICModel/HybridPICModel.cpp | 31 ++++++--------- .../HybridPICSolveE.cpp | 32 +++++++++------- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 38 ++++++++++--------- 5 files changed, 71 insertions(+), 77 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index eea9b070c58..963907bba99 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -154,16 +154,16 @@ class FiniteDifferenceSolver * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ - void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, - ablastr::fields::VectorField & Jfield, - ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Bfield, - amrex::MultiFab const& rhofield, - amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, - int lev, HybridPICModel const* hybrid_model, - bool solve_for_Faraday ); + void HybridPICSolveE ( + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, + int lev, HybridPICModel const* hybrid_model, + bool solve_for_Faraday ); /** * \brief Calculation of total current using Ampere's law (without @@ -175,10 +175,10 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( - ablastr::fields::VectorField& Jfield, - ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, - int lev ); + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, + int lev ); /** * \brief Calculation of B field from the vector potential A @@ -190,10 +190,10 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void ComputeCurlA ( - ablastr::fields::VectorField& Bfield, - ablastr::fields::VectorField const& Afield, - ablastr::fields::VectorField const& edge_lengths, - int lev ); + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + ablastr::fields::VectorField const& edge_lengths, + int lev ); private: @@ -262,7 +262,6 @@ class FiniteDifferenceSolver amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -375,7 +374,6 @@ class FiniteDifferenceSolver amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 12ea01d216b..41fd51eb203 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -106,7 +106,6 @@ public: ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -115,7 +114,6 @@ public: ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -124,7 +122,6 @@ public: ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, PatchType patch_type, bool solve_for_Faraday) const; void BfieldEvolveRK ( @@ -133,7 +130,7 @@ public: ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, DtType a_dt_type, + amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); void BfieldEvolveRK ( @@ -142,7 +139,7 @@ public: ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, int lev, DtType dt_type, + amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); void FieldPush ( @@ -151,7 +148,7 @@ public: ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, DtType dt_type, + amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); /** diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 03c22807969..63b73622499 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -329,7 +329,6 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); @@ -337,7 +336,7 @@ void HybridPICModel::HybridPICSolveE ( { HybridPICSolveE( Efield[lev], Jfield[lev], Bfield[lev], *rhofield[lev], - edge_lengths[lev], t, lev, solve_for_Faraday + edge_lengths[lev], lev, solve_for_Faraday ); } } @@ -348,13 +347,12 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, const int lev, const bool solve_for_Faraday) const { WARPX_PROFILE("WarpX::HybridPICSolveE()"); HybridPICSolveE( - Efield, Jfield, Bfield, rhofield, edge_lengths, t, lev, + Efield, Jfield, Bfield, rhofield, edge_lengths, lev, PatchType::fine, solve_for_Faraday ); if (lev > 0) @@ -370,7 +368,6 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, const int lev, PatchType patch_type, const bool solve_for_Faraday) const { @@ -382,7 +379,7 @@ void HybridPICModel::HybridPICSolveE ( // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( Efield, current_fp_plasma, Jfield, Bfield, rhofield, - *electron_pressure_fp, edge_lengths, t, lev, this, solve_for_Faraday + *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } @@ -449,14 +446,14 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, DtType dt_type, + amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { BfieldEvolveRK( - Bfield, Efield, Jfield, rhofield, edge_lengths, t, dt, lev, dt_type, + Bfield, Efield, Jfield, rhofield, edge_lengths, dt, lev, dt_type, ng, nodal_sync ); } @@ -468,7 +465,7 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, int lev, DtType dt_type, + amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { // Make copies of the B-field multifabs at t = n and create multifabs for @@ -491,13 +488,11 @@ void HybridPICModel::BfieldEvolveRK ( K[ii].setVal(0.0); } - amrex::Real t_eval = t; - // The Runge-Kutta scheme begins here. // Step 1: FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync + 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -511,10 +506,9 @@ void HybridPICModel::BfieldEvolveRK ( } // Step 2: - t_eval = t+0.5_rt*dt; FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync + 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -534,7 +528,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 3: FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - t_eval, dt, dt_type, ng, nodal_sync + dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -548,10 +542,9 @@ void HybridPICModel::BfieldEvolveRK ( } // Step 4: - t_eval = t + dt; FieldPush( Bfield, Efield, Jfield, rhofield, edge_lengths, - t_eval, 0.5_rt*dt, dt_type, ng, nodal_sync + 0.5_rt*dt, dt_type, ng, nodal_sync ); // The Bfield is now given by: @@ -585,7 +578,7 @@ void HybridPICModel::FieldPush ( ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, ablastr::fields::MultiLevelVectorField const& edge_lengths, - amrex::Real t, amrex::Real dt, DtType dt_type, + amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); @@ -593,7 +586,7 @@ void HybridPICModel::FieldPush ( // Calculate J = curl x B / mu0 - J_ext CalculatePlasmaCurrent(Bfield, edge_lengths); // Calculate the E-field from Ohm's law - HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, t, true); + HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); warpx.FillBoundaryE(ng, nodal_sync); // Push forward the B-field using Faraday's law warpx.EvolveB(dt, dt_type); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index eb4898a041a..6823a66f14b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -360,7 +360,6 @@ void FiniteDifferenceSolver::HybridPICSolveE ( amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -371,14 +370,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( HybridPICSolveECylindrical ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, t, lev, hybrid_model, solve_for_Faraday + edge_lengths, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, t, lev, hybrid_model, solve_for_Faraday + edge_lengths, lev, hybrid_model, solve_for_Faraday ); #endif @@ -400,7 +399,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -742,7 +740,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, ablastr::fields::VectorField const& edge_lengths, - amrex::Real t, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -761,9 +758,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_external_fields = hybrid_model->m_add_external_fields; - auto const& warpx = WarpX::GetInstance(); - ablastr::fields::ConstVectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 - ablastr::fields::ConstVectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -821,9 +818,12 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); - Array4 const& Bx_ext = Bfield_external[0]->const_array(mfi); - Array4 const& By_ext = Bfield_external[1]->const_array(mfi); - Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); + Array4 Bx_ext, By_ext, Bz_ext; + if (include_external_fields) { + Bx_ext = Bfield_external[0]->array(mfi); + By_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ @@ -888,9 +888,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Ex = Efield[0]->array(mfi); Array4 const& Ey = Efield[1]->array(mfi); Array4 const& Ez = Efield[2]->array(mfi); - Array4 const& Ex_ext = Efield_external[0]->const_array(mfi); - Array4 const& Ey_ext = Efield_external[1]->const_array(mfi); - Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); Array4 const& Jx = Jfield[0]->const_array(mfi); Array4 const& Jy = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); @@ -898,6 +895,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.array(mfi); + Array4 Ex_ext, Ey_ext, Ez_ext; + if (include_external_fields) { + Ex_ext = Efield_external[0]->array(mfi); + Ey_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } + amrex::Array4 lx, ly, lz; if (EB::enabled()) { lx = edge_lengths[0]->array(mfi); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 6db14e682dc..28433419805 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -37,15 +37,15 @@ void WarpX::HybridPICEvolveFields () // Get requested number of substeps to use const int sub_steps = m_hybrid_pic_model->m_substeps; - amrex::Real t_eval = gett_old(0); - amrex::Real sub_dt = 0.5_rt*dt[0]/sub_steps; - + // Get flag to include external fields. const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; // Handle field splitting for Hybrid field push if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0), + 0.5_rt*dt[0]); // If using split fields, subtract the external field at the old time for (int lev = 0; lev <= finest_level; ++lev) { @@ -54,7 +54,7 @@ void WarpX::HybridPICEvolveFields () *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), 0, 0, 1, - m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); } } } @@ -132,7 +132,7 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, rho_fp_temp, m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), - t_eval, sub_dt, + 0.5_rt*dt[0]/sub_steps, DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -150,11 +150,11 @@ void WarpX::HybridPICEvolveFields () ); } - t_eval += 0.5_rt*dt[0]; - if (add_external_fields) { // Get the external fields - m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0) + 0.5_rt*dt[0], + 0.5_rt*dt[0]); } // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities @@ -166,7 +166,7 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), rho_fp_temp, m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), - t_eval, sub_dt, + 0.5_rt*dt[0]/sub_steps, DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -190,29 +190,31 @@ void WarpX::HybridPICEvolveFields () } } + if (add_external_fields) { + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_new(0), + 0.5_rt*dt[0]); + } + // Calculate the electron pressure at t=n+1 m_hybrid_pic_model->CalculateElectronPressure(); - t_eval = gett_new(0); - // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculatePlasmaCurrent( m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_hybrid_pic_model->HybridPICSolveE( m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), t_eval, false - ); + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); // Handle field splitting for Hybrid field push if (add_external_fields) { - m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields(t_eval, sub_dt); - // If using split fields, add the external field at the new time for (int lev = 0; lev <= finest_level; ++lev) { for (int idim = 0; idim < 3; ++idim) { @@ -220,12 +222,12 @@ void WarpX::HybridPICEvolveFields () *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), 0, 0, 1, - m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev)->nGrowVect()); + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); MultiFab::Add( *m_fields.get(FieldType::Efield_fp, Direction{idim}, lev), *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), 0, 0, 1, - m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev)->nGrowVect()); + m_fields.get(FieldType::Efield_fp, Direction{idim}, lev)->nGrowVect()); } } } From c810af2ec7342831289ef646e56f10d04fe14a07 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 23:47:56 +0000 Subject: [PATCH 23/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 963907bba99..f695ffa4b41 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -154,7 +154,7 @@ class FiniteDifferenceSolver * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ - void HybridPICSolveE ( + void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField & Jfield, ablastr::fields::VectorField const& Jifield, From b85f8680251146f81981690e8f623947e6e65dbc Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 25 Oct 2024 12:53:22 -0700 Subject: [PATCH 24/86] Refactored EB logic in Hybrid solver to reduce code redundancy. --- Source/EmbeddedBoundary/CMakeLists.txt | 1 + Source/EmbeddedBoundary/Covered.H | 125 ++++++++ Source/EmbeddedBoundary/Covered.cpp | 67 ++++ Source/EmbeddedBoundary/Make.package | 2 + .../FiniteDifferenceSolver/ComputeCurlA.cpp | 290 ++++++++++++++++++ .../FiniteDifferenceSolver.H | 10 - .../HybridPICModel/ExternalVectorPotential.H | 96 ++++++ .../ExternalVectorPotential.cpp | 245 +++++++++++++++ .../HybridPICModel/HybridPICModel.H | 10 +- .../HybridPICModel/HybridPICModel.cpp | 44 +-- .../HybridPICSolveE.cpp | 97 ++---- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 7 +- Source/Initialization/WarpXInitData.cpp | 91 +----- Source/WarpX.H | 5 +- 14 files changed, 885 insertions(+), 205 deletions(-) create mode 100644 Source/EmbeddedBoundary/Covered.H create mode 100644 Source/EmbeddedBoundary/Covered.cpp create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp diff --git a/Source/EmbeddedBoundary/CMakeLists.txt b/Source/EmbeddedBoundary/CMakeLists.txt index 2fa5e3e602b..08a7cd5d821 100644 --- a/Source/EmbeddedBoundary/CMakeLists.txt +++ b/Source/EmbeddedBoundary/CMakeLists.txt @@ -7,5 +7,6 @@ foreach(D IN LISTS WarpX_DIMS) WarpXFaceExtensions.cpp WarpXFaceInfoBox.H Enabled.cpp + Covered.cpp ) endforeach() diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H new file mode 100644 index 00000000000..10ebf48f307 --- /dev/null +++ b/Source/EmbeddedBoundary/Covered.H @@ -0,0 +1,125 @@ +/* Copyright 2024 S. Eric Clark (Helion Energy) + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_EB_COVERED_H_ +#define WARPX_EB_COVERED_H_ + + +#include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include + +namespace EB { + +AMREX_ENUM(CoverTopology, + none, + face, + edge +); + +class Covered +{ +public: + amrex::Array4 lx, ly, lz, Sx, Sy, Sz; + +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; +#endif + + Covered (amrex::MFIter &mfi, int lev); + + // Can have topology of f or e for 'face' or 'edge' + AMREX_GPU_HOST_DEVICE + bool isCovered ( + const int idir, + const CoverTopology topology, + const int i, + const int j, + const int k) const noexcept + { + bool is_covered = false; + +#ifdef AMREX_USE_EB + switch (idir) + { + case 0: +#ifdef WARPX_DIM_3D + if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and Sx(i, j, k)<=0)) + { + is_covered = true; + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge + if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and lz(i, j, k)<=0)) + { + is_covered = true; + } +#endif + break; + + case 1: +#ifdef WARPX_DIM_3D + if((topology==CoverTopology::edge and ly(i, j, k)<=0) or (topology==CoverTopology::face and Sy(i, j, k)<=0)) + { + is_covered = true; + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered + if( (topology==CoverTopology::edge and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or + (topology==CoverTopology::face and Sy(i,j,k)<=0)) + { + is_covered = true; + } +#endif + break; + + case 2: +#ifdef WARPX_DIM_3D + if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and Sz(i, j, k)<=0)) + { + is_covered = true; + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge + if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and lx(i, j, k)<=0)) + { + is_covered = true; + } +#endif + break; + } +#endif //AMREX_USE_EB + + return is_covered; + } +}; + +} // namespace EB +#endif \ No newline at end of file diff --git a/Source/EmbeddedBoundary/Covered.cpp b/Source/EmbeddedBoundary/Covered.cpp new file mode 100644 index 00000000000..be3265dd13c --- /dev/null +++ b/Source/EmbeddedBoundary/Covered.cpp @@ -0,0 +1,67 @@ +/* Copyright 2024 S. Eric Clark + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "WarpX.H" + +#include "EmbeddedBoundary/Covered.H" +#include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include + +using namespace amrex; +using namespace warpx::fields; + +namespace EB { + +// Default Constructor +Covered::Covered (amrex::MFIter &mfi, int lev) +{ + if (EB::enabled()) { + auto& warpx = WarpX::GetInstance(); + auto edge_lengths = warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev); + auto face_areas = warpx.m_fields.get_alldirs(FieldType::face_areas, lev); + + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + Sx = face_areas[0]->array(mfi); + Sy = face_areas[1]->array(mfi); + Sz = face_areas[2]->array(mfi); + +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + lx_lo = amrex::lbound(lx); + lx_hi = amrex::ubound(lx); + lz_lo = amrex::lbound(lz); + lz_hi = amrex::ubound(lz); +#endif + } +} + +} \ No newline at end of file diff --git a/Source/EmbeddedBoundary/Make.package b/Source/EmbeddedBoundary/Make.package index 76a20896f85..c1f0d52c493 100644 --- a/Source/EmbeddedBoundary/Make.package +++ b/Source/EmbeddedBoundary/Make.package @@ -1,10 +1,12 @@ CEXE_headers += Enabled.H +CEXE_headers += Covered.H CEXE_headers += ParticleScraper.H CEXE_headers += ParticleBoundaryProcess.H CEXE_headers += DistanceToEB.H CEXE_headers += WarpXFaceInfoBox.H CEXE_sources += Enabled.cpp +CEXE_sources += Covered.cpp CEXE_sources += WarpXInitEB.cpp CEXE_sources += WarpXFaceExtensions.cpp diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp new file mode 100644 index 00000000000..1af1232dee2 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -0,0 +1,290 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "FiniteDifferenceSolver.H" + +#include "EmbeddedBoundary/Enabled.H" +#include "EmbeddedBoundary/Covered.H" +#ifdef WARPX_DIM_RZ +# include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" +#else +# include "FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" +#endif + +#include "Utils/TextMsg.H" +#include "WarpX.H" + +#include + +using namespace amrex; +using warpx::fields::FieldType; + +void FiniteDifferenceSolver::ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + int lev ) +{ + // Select algorithm (The choice of algorithm is a runtime option, + // but we compile code for each algorithm, using templates) + if (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC) { +#ifdef WARPX_DIM_RZ + ComputeCurlACylindrical ( + Bfield, Afield, lev + ); + +#else + ComputeCurlACartesian ( + Bfield, Afield, lev + ); + +#endif + } else { + amrex::Abort(Utils::TextMsg::Err( + "ComputeCurl: Unknown algorithm choice.")); + } +} + +// /** +// * \brief Calculate B from the curl of A +// * i.e. B = curl(A) output field on B field mesh staggering +// * +// * \param[out] curlField output of curl operation +// * \param[in] field input staggered field, should be on E/J/A mesh staggering +// */ +#ifdef WARPX_DIM_RZ +template +void FiniteDifferenceSolver::ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + int lev +) +{ + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Jfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + } + Real wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const& Ar = Afield[0]->const_array(mfi); + Array4 const& At = Afield[1]->const_array(mfi); + Array4 const& Az = Afield[2]->const_array(mfi); + Array4 const& Br = Bfield[0]->array(mfi); + Array4 const& Bt = Bfield[1]->array(mfi); + Array4 const& Bz = Bfield[2]->array(mfi); + + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); + int const n_coefs_r = static_cast(m_stencil_coefs_r.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + int const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract cylindrical specific parameters + Real const dr = m_dr; + int const nmodes = m_nmodes; + Real const rmin = m_rmin; + + // Extract tileboxes for which to loop over + Box const& tbr = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tbt = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the B-field from the A-field + amrex::ParallelFor(tbr, tbt, tbz, + + // Br calculation + [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ + if (cov_ptr.isCovered(0, EB::CoverTopology::face, i, j, 0)) { return; } + + Real const r = rmin + i*dr; // r on nodal point (Br is nodal in r) + if (r != 0) { // Off-axis, regular Maxwell equations + Br(i, j, 0, 0) = - T_Algo::UpwardDz(At, coefs_z, n_coefs_z, i, j, 0, 0); // Mode m=0 + for (int m=1; m(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} + +#else + +template +void FiniteDifferenceSolver::ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + int lev +) +{ + using ablastr::fields::Direction; + + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Bfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { + amrex::Gpu::synchronize(); + } + auto wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const &Bx = Bfield[0]->array(mfi); + Array4 const &By = Bfield[1]->array(mfi); + Array4 const &Bz = Bfield[2]->array(mfi); + Array4 const &Ax = Afield[0]->const_array(mfi); + Array4 const &Ay = Afield[1]->const_array(mfi); + Array4 const &Az = Afield[2]->const_array(mfi); + + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); + auto const n_coefs_x = static_cast(m_stencil_coefs_x.size()); + Real const * const AMREX_RESTRICT coefs_y = m_stencil_coefs_y.dataPtr(); + auto const n_coefs_y = static_cast(m_stencil_coefs_y.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + auto const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the curl of A + amrex::ParallelFor(tbx, tby, tbz, + + // Bx calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip if this cell is fully covered by embedded boundaries + if (cov_ptr.isCovered(0, EB::CoverTopology::face, i, j, k)) { return; } + + Bx(i, j, k) = ( + - T_Algo::UpwardDz(Ay, coefs_z, n_coefs_z, i, j, k) + + T_Algo::UpwardDy(Az, coefs_y, n_coefs_y, i, j, k) + ); + }, + + // By calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip if this cell is fully covered by embedded boundaries + if (cov_ptr.isCovered(1, EB::CoverTopology::face, i, j, k)) { return; } + + By(i, j, k) = ( + - T_Algo::UpwardDx(Az, coefs_x, n_coefs_x, i, j, k) + + T_Algo::UpwardDz(Ax, coefs_z, n_coefs_z, i, j, k) + ); + }, + + // Bz calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (cov_ptr.isCovered(2, EB::CoverTopology::face, i, j, k)) { return; } + + Bz(i, j, k) = ( + - T_Algo::UpwardDy(Ax, coefs_y, n_coefs_y, i, j, k) + + T_Algo::UpwardDx(Ay, coefs_x, n_coefs_x, i, j, k) + ); + } + ); + + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + wt = static_cast(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} +#endif + diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index f695ffa4b41..1ad2e36ba58 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -149,7 +149,6 @@ class FiniteDifferenceSolver * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] rhofield scalar ion charge density Multifab at a given level * \param[in] Pefield scalar electron pressure MultiFab at a given level - * \param[in] edge_lengths length of edges along embedded boundaries * \param[in] lev level number for the calculation * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation @@ -161,7 +160,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -177,7 +175,6 @@ class FiniteDifferenceSolver void CalculateCurrentAmpere ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ); /** @@ -192,7 +189,6 @@ class FiniteDifferenceSolver void ComputeCurlA ( ablastr::fields::VectorField& Bfield, ablastr::fields::VectorField const& Afield, - ablastr::fields::VectorField const& edge_lengths, int lev ); private: @@ -261,7 +257,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -269,7 +264,6 @@ class FiniteDifferenceSolver void CalculateCurrentAmpereCylindrical ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -277,7 +271,6 @@ class FiniteDifferenceSolver void ComputeCurlACylindrical ( ablastr::fields::VectorField& Bfield, ablastr::fields::VectorField const& Afield, - ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -373,7 +366,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -381,7 +373,6 @@ class FiniteDifferenceSolver void CalculateCurrentAmpereCartesian ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -389,7 +380,6 @@ class FiniteDifferenceSolver void ComputeCurlACartesian ( ablastr::fields::VectorField & Bfield, ablastr::fields::VectorField const& Afield, - ablastr::fields::VectorField const& edge_lengths, int lev ); #endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H new file mode 100644 index 00000000000..80da3ecee2f --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -0,0 +1,96 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ +#define WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ + +#include "Fields.H" + +#include "Utils/WarpXAlgorithmSelection.H" + +#include "EmbeddedBoundary/Covered.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Utils/Parser/ParserUtils.H" +#include "Utils/WarpXConst.H" +#include "Utils/WarpXProfilerWrapper.H" + +#include + +#include +#include +#include +#include +#include + +#include + +using namespace amrex; + +/** + * \brief This class contains the parameters needed to evaluate a + * time varying external vector potential, leading to external E/B + * fields to be applied in Hybrid Solver. This class is used to break up + * the passed in fields into a spatial and time dependent solution. + * + * Eventually this can be used in a list to control independent external + * fields with different time profiles. + * + */ +class ExternalVectorPotential +{ +protected: + std::string m_Ax_ext_grid_function = "0.0"; + std::string m_Ay_ext_grid_function = "0.0"; + std::string m_Az_ext_grid_function = "0.0"; + std::array< std::unique_ptr, 3> m_A_external_parser; + std::array< amrex::ParserExecutor<4>, 3> m_A_external; + + std::string m_A_ext_time_function = "1.0"; + std::unique_ptr m_A_external_time_parser; + amrex::ParserExecutor<1> m_A_time_scale; + + bool m_read_A_from_file = false; + std::string m_external_file_path = ""; + +public: + + + // Default Constructor + ExternalVectorPotential (); + + void ReadParameters (); + + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngEB, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag + ); + + void InitData (); + + AMREX_FORCE_INLINE + void ZeroFieldinEB ( + ablastr::fields::VectorField const& Field, + EB::CoverTopology topology, + const int lev); + + void UpdateHybridExternalFields ( + const amrex::Real t, + const amrex::Real dt + ); +}; + +#endif //WARPX_TIME_DEPENDENT_VECTOR_POTENTIAL_H_ \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp new file mode 100644 index 00000000000..053de4579ed --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -0,0 +1,245 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "ExternalVectorPotential.H" +#include "EmbeddedBoundary/Covered.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Fields.H" +#include "WarpX.H" + +#include + +using namespace amrex; +using namespace warpx::fields; + +ExternalVectorPotential::ExternalVectorPotential () +{ + ReadParameters(); +} + +void +ExternalVectorPotential::ReadParameters () +{ + const ParmParse pp_ext_A("external_vector_potential"); + + utils::parser::queryWithParser(pp_ext_A, "read_from_file", m_read_A_from_file); + + if (m_read_A_from_file) { + pp_ext_A.query("path", m_external_file_path); + } else { + pp_ext_A.query("Ax_external_grid_function(x,y,z)", m_Ax_ext_grid_function); + pp_ext_A.query("Ay_external_grid_function(x,y,z)", m_Ay_ext_grid_function); + pp_ext_A.query("Az_external_grid_function(x,y,z)", m_Az_ext_grid_function); + } + + pp_ext_A.query("A_time_external_function(t)", m_A_ext_time_function); +} + +void +ExternalVectorPotential::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngEB, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) +{ + using ablastr::fields::Direction; + fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); +} + +void +ExternalVectorPotential::InitData () +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + if (m_read_A_from_file) { + // Read A fields from file + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { +#if defined(WARPX_DIM_RZ) + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{0}, lev), + "A", "r"); + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{1}, lev), + "A", "t"); + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{2}, lev), + "A", "z"); +#else + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{0}, lev), + "A", "x"); + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{1}, lev), + "A", "y"); + warpx.ReadExternalFieldFromFile(m_external_file_path, + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{2}, lev), + "A", "z"); +#endif + } + } else { + // Initialize the A fields from expression + m_A_external_parser[0] = std::make_unique( + utils::parser::makeParser(m_Ax_ext_grid_function,{"x","y","z","t"})); + m_A_external_parser[1] = std::make_unique( + utils::parser::makeParser(m_Ay_ext_grid_function,{"x","y","z","t"})); + m_A_external_parser[2] = std::make_unique( + utils::parser::makeParser(m_Az_ext_grid_function,{"x","y","z","t"})); + m_A_external[0] = m_A_external_parser[0]->compile<4>(); + m_A_external[1] = m_A_external_parser[1]->compile<4>(); + m_A_external[2] = m_A_external_parser[2]->compile<4>(); + + // check if the external current parsers depend on time + for (int i=0; i<3; i++) { + const std::set A_ext_symbols = m_A_external_parser[i]->symbols(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, + "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); + } + + // Initialize data onto grid + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.ComputeExternalFieldOnGridUsingParser( + FieldType::hybrid_A_fp_external, + m_A_external[0], + m_A_external[1], + m_A_external[2], + lev, PatchType::fine, EB::CoverTopology::none); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } + } + + amrex::Gpu::streamSynchronize(); + + m_A_external_time_parser = std::make_unique( + utils::parser::makeParser(m_A_ext_time_function,{"t",})); + m_A_time_scale = m_A_external_time_parser->compile<1>(); + + UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); +} + +AMREX_FORCE_INLINE +void +ExternalVectorPotential::ZeroFieldinEB (ablastr::fields::VectorField const& Field, EB::CoverTopology topology, const int lev) +{ + auto &warpx = WarpX::GetInstance(); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Field[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Extract field data for this grid/tile + Array4 const& Fx = Field[0]->array(mfi); + Array4 const& Fy = Field[1]->array(mfi); + Array4 const& Fz = Field[2]->array(mfi); + + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(Field[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(Field[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Field[2]->ixType().toIntVect()); + + // Loop over the cells and update the fields + amrex::ParallelFor(tbx, tby, tbz, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (cov_ptr.isCovered(0, topology, i, j, k)) Fx(i, j, k) = 0_rt; + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (cov_ptr.isCovered(1, topology, i, j, k)) Fy(i, j, k) = 0_rt; + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (cov_ptr.isCovered(2, topology, i, j, k)) Fz(i, j, k) = 0_rt; + } + ); + } +} + +void +ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const amrex::Real dt) +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + // Get B-field Scaling Factor + amrex::Real scale_factor_B = m_A_time_scale(t); + + // Get dA/dt scaling factor based on time centered FD around t + amrex::Real sf_l = m_A_time_scale(t-0.5_rt*dt); + amrex::Real sf_r = m_A_time_scale(t+0.5_rt*dt); + amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_A_fp_external, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField B_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField E_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( + B_ext[lev], + A_ext[lev], + lev + ); + + for (int idir = 0; idir < 3; ++idir) { + // Scale B field by the time factor + B_ext[lev][Direction{idir}]->mult(scale_factor_B); + B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + + // Copy A into E and scale by the (-) derivative of the time function + E_ext[lev][Direction{idir}]->setVal(scale_factor_E); + amrex::MultiFab::Multiply(*E_ext[lev][Direction{idir}], *A_ext[lev][Direction{idir}], 0, 0, 1, 0); + E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + } + ZeroFieldinEB(B_ext[lev], EB::CoverTopology::face, lev); + ZeroFieldinEB(E_ext[lev], EB::CoverTopology::edge, lev); + } + amrex::Gpu::streamSynchronize(); +} diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 41fd51eb203..42d98f013d1 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -87,12 +87,10 @@ public: * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account */ void CalculatePlasmaCurrent ( - ablastr::fields::MultiLevelVectorField const& Bfield, - ablastr::fields::MultiLevelVectorField const& edge_lengths + ablastr::fields::MultiLevelVectorField const& Bfield ); void CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -105,7 +103,6 @@ public: ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -113,7 +110,6 @@ public: ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, int lev, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -121,7 +117,6 @@ public: ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, int lev, PatchType patch_type, bool solve_for_Faraday) const; void BfieldEvolveRK ( @@ -129,7 +124,6 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -138,7 +132,6 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -147,7 +140,6 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 63b73622499..95fb6e463ab 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -11,6 +11,7 @@ #include "HybridPICModel.H" #include "EmbeddedBoundary/Enabled.H" +#include "EmbeddedBoundary/Covered.H" #include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "ExternalVectorPotential.H" @@ -255,9 +256,7 @@ void HybridPICModel::InitData () m_J_external[0], m_J_external[1], m_J_external[2], - lev, PatchType::fine, 'e', - warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), - warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::edge); } if (m_add_external_fields) { @@ -277,26 +276,22 @@ void HybridPICModel::GetCurrentExternal () m_J_external[0], m_J_external[1], m_J_external[2], - lev, PatchType::fine, 'e', - warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), - warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::edge); } } void HybridPICModel::CalculatePlasmaCurrent ( - ablastr::fields::MultiLevelVectorField const& Bfield, - ablastr::fields::MultiLevelVectorField const& edge_lengths) + ablastr::fields::MultiLevelVectorField const& Bfield) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculatePlasmaCurrent(Bfield[lev], edge_lengths[lev], lev); + CalculatePlasmaCurrent(Bfield[lev], lev); } } void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, const int lev) { WARPX_PROFILE("HybridPICModel::CalculatePlasmaCurrent()"); @@ -304,7 +299,7 @@ void HybridPICModel::CalculatePlasmaCurrent ( auto& warpx = WarpX::GetInstance(); ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_plasma, Bfield, edge_lengths, lev + current_fp_plasma, Bfield, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but @@ -328,7 +323,6 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); @@ -336,7 +330,7 @@ void HybridPICModel::HybridPICSolveE ( { HybridPICSolveE( Efield[lev], Jfield[lev], Bfield[lev], *rhofield[lev], - edge_lengths[lev], lev, solve_for_Faraday + lev, solve_for_Faraday ); } } @@ -346,13 +340,12 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, const int lev, const bool solve_for_Faraday) const { WARPX_PROFILE("WarpX::HybridPICSolveE()"); HybridPICSolveE( - Efield, Jfield, Bfield, rhofield, edge_lengths, lev, + Efield, Jfield, Bfield, rhofield, lev, PatchType::fine, solve_for_Faraday ); if (lev > 0) @@ -367,7 +360,6 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, const int lev, PatchType patch_type, const bool solve_for_Faraday) const { @@ -379,7 +371,7 @@ void HybridPICModel::HybridPICSolveE ( // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( Efield, current_fp_plasma, Jfield, Bfield, rhofield, - *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday + *electron_pressure_fp, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } @@ -445,7 +437,6 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -453,7 +444,7 @@ void HybridPICModel::BfieldEvolveRK ( for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { BfieldEvolveRK( - Bfield, Efield, Jfield, rhofield, edge_lengths, dt, lev, dt_type, + Bfield, Efield, Jfield, rhofield, dt, lev, dt_type, ng, nodal_sync ); } @@ -464,7 +455,6 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -491,7 +481,7 @@ void HybridPICModel::BfieldEvolveRK ( // The Runge-Kutta scheme begins here. // Step 1: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -507,7 +497,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 2: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -527,7 +517,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 3: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, dt, dt_type, ng, nodal_sync ); @@ -543,7 +533,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 4: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -572,21 +562,21 @@ void HybridPICModel::BfieldEvolveRK ( } } + void HybridPICModel::FieldPush ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); // Calculate J = curl x B / mu0 - J_ext - CalculatePlasmaCurrent(Bfield, edge_lengths); + CalculatePlasmaCurrent(Bfield); // Calculate the E-field from Ohm's law - HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); + HybridPICSolveE(Efield, Jfield, Bfield, rhofield, true); warpx.FillBoundaryE(ng, nodal_sync); // Push forward the B-field using Faraday's law warpx.EvolveB(dt, dt_type); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 6823a66f14b..bee188b4d0f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -28,7 +28,6 @@ using warpx::fields::FieldType; void FiniteDifferenceSolver::CalculateCurrentAmpere ( ablastr::fields::VectorField & Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -36,12 +35,12 @@ void FiniteDifferenceSolver::CalculateCurrentAmpere ( if (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC) { #ifdef WARPX_DIM_RZ CalculateCurrentAmpereCylindrical ( - Jfield, Bfield, edge_lengths, lev + Jfield, Bfield, lev ); #else CalculateCurrentAmpereCartesian ( - Jfield, Bfield, edge_lengths, lev + Jfield, Bfield, lev ); #endif @@ -63,7 +62,6 @@ template void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -94,13 +92,7 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( Array4 const& Bt = Bfield[1]->array(mfi); Array4 const& Bz = Bfield[2]->array(mfi); - amrex::Array4 lr, lt, lz; - - if (EB::enabled()) { - lr = edge_lengths[0]->array(mfi); - lt = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - } + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -126,8 +118,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jr calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lr && lr(i, j, 0) <= 0) { return; } + if (cov_ptr.isCovered(0, EB::CoverTopology::edge, i, j, 0)) { return; } + // Mode m=0 Jr(i, j, 0, 0) = one_over_mu0 * ( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -150,8 +142,7 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jt calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // In RZ Jt is associated with a mesh node, so we need to check if the mesh node is covered - if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + if (cov_ptr.isCovered(1, EB::CoverTopology::edge, i, j, 0)) { return; } // r on a nodal point (Jt is nodal in r) Real const r = rmin + i*dr; @@ -196,8 +187,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lz && lz(i, j, 0) <= 0) { return; } + if (cov_ptr.isCovered(2, EB::CoverTopology::edge, i, j, 0)) { return; } + // r on a nodal point (Jz is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -246,7 +237,6 @@ template void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -276,12 +266,7 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( Array4 const &By = Bfield[1]->const_array(mfi); Array4 const &Bz = Bfield[2]->const_array(mfi); - amrex::Array4 lx, ly, lz; - if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - } + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -304,8 +289,7 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jx calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + if (cov_ptr.isCovered(0, EB::CoverTopology::edge, i, j, k)) { return; } Jx(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) @@ -315,14 +299,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jy calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries -#ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - // In XZ Jy is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } -#endif + if (cov_ptr.isCovered(1, EB::CoverTopology::edge, i, j, k)) { return; } + Jy(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) + T_Algo::DownwardDz(Bx, coefs_z, n_coefs_z, i, j, k) @@ -331,8 +309,7 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } + if (cov_ptr.isCovered(2, EB::CoverTopology::edge, i, j, k)) { return; } Jz(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) @@ -359,7 +336,6 @@ void FiniteDifferenceSolver::HybridPICSolveE ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -370,14 +346,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( HybridPICSolveECylindrical ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + lev, hybrid_model, solve_for_Faraday ); #endif @@ -398,7 +374,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -559,12 +534,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Et_ext = Efield_external[1]->const_array(mfi); Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); - amrex::Array4 lr, lz; - if (EB::enabled()) { - lr = edge_lengths[0]->array(mfi); - // edge_lengths[1] is `lt` and is not needed - lz = edge_lengths[2]->array(mfi); - } + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -585,8 +555,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Er calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lr && lr(i, j, 0) <= 0) { return; } + if (cov_ptr.isCovered(0, EB::CoverTopology::edge, i, j, 0)) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); @@ -631,8 +600,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Et calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // In RZ Et is associated with a mesh node, so we need to check if the mesh node is covered - if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + if (cov_ptr.isCovered(1, EB::CoverTopology::edge, i, j, 0)) { return; } // r on a nodal grid (Et is nodal in r) Real const r = rmin + i*dr; @@ -678,8 +646,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip field solve if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,0) <= 0) { return; } + if (cov_ptr.isCovered(2, EB::CoverTopology::edge, i, j, 0)) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); @@ -739,7 +706,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -902,12 +868,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez_ext = Efield_external[2]->array(mfi); } - amrex::Array4 lx, ly, lz; - if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - } + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -927,7 +888,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ex calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + if (cov_ptr.isCovered(0, EB::CoverTopology::edge, i, j, k)) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); @@ -969,14 +930,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ey calculation [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Skip field solve if this cell is fully covered by embedded boundaries -#ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } -#endif + if (cov_ptr.isCovered(1, EB::CoverTopology::edge, i, j, k)) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); @@ -1017,10 +972,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB - // Skip field solve if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } -#endif + if (cov_ptr.isCovered(2, EB::CoverTopology::edge, i, j, k)) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 28433419805..243856b2454 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -131,7 +131,6 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, rho_fp_temp, - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), 0.5_rt*dt[0]/sub_steps, DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points @@ -165,7 +164,6 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), rho_fp_temp, - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), 0.5_rt*dt[0]/sub_steps, DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points @@ -201,15 +199,14 @@ void WarpX::HybridPICEvolveFields () // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculatePlasmaCurrent( - m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level)); m_hybrid_pic_model->HybridPICSolveE( m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), false); + false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index f9cacf016d0..16d7b7da1b0 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -977,18 +977,14 @@ WarpX::InitLevelData (int lev, Real /*time*/) m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::fine, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::face); ComputeExternalFieldOnGridUsingParser( FieldType::Bfield_cp, m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::coarse, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev]); + lev, PatchType::coarse, EB::CoverTopology::face); } // if the input string for the E-field is "parse_e_ext_grid_function", @@ -1019,18 +1015,14 @@ WarpX::InitLevelData (int lev, Real /*time*/) m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::fine, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::edge); ComputeExternalFieldOnGridUsingParser( FieldType::Efield_cp, m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::coarse, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::coarse, EB::CoverTopology::edge); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { @@ -1064,9 +1056,7 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, - int lev, PatchType patch_type, [[maybe_unused]] const char topology, - std::optional const& edge_lengths, - std::optional const& face_areas) + int lev, PatchType patch_type, EB::CoverTopology topology) { auto t = gett_new(lev); @@ -1089,8 +1079,6 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - const bool eb_enabled = EB::enabled(); - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); @@ -1100,44 +1088,12 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( auto const& mfyfab = mfy->array(mfi); auto const& mfzfab = mfz->array(mfi); - amrex::Array4 lx, ly, lz, Sx, Sy, Sz; - if (eb_enabled) { - if (edge_lengths.has_value()) { - const auto& edge_lengths_array = edge_lengths.value(); - lx = edge_lengths_array[0]->array(mfi); - ly = edge_lengths_array[1]->array(mfi); - lz = edge_lengths_array[2]->array(mfi); - } - if (face_areas.has_value()) { - const auto& face_areas_array = face_areas.value(); - Sx = face_areas_array[0]->array(mfi); - Sy = face_areas_array[1]->array(mfi); - Sz = face_areas_array[2]->array(mfi); - } - } - -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; -#endif - if (eb_enabled) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - lx_lo = amrex::lbound(lx); - lx_hi = amrex::ubound(lx); - lz_lo = amrex::lbound(lz); - lz_hi = amrex::ubound(lz); -#endif - } + EB::Covered const& cov_ptr = EB::Covered(mfi, lev); amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and Sx(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and lz(i, j, k)<=0))) { return; } -#endif -#endif + if (cov_ptr.isCovered(0, topology, i, j, k)) { return; } + // Shift required in the x-, y-, or z- position // depending on the index type of the multifab #if defined(WARPX_DIM_1D_Z) @@ -1163,19 +1119,7 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( mfxfab(i,j,k) = fx_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(ly && ((topology=='e' and ly(i, j, k)<=0) or (topology=='f' and Sy(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if(lx && - ((topology=='e' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 - || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (topology=='f' and Sy(i,j,k)<=0))) { return; } -#endif -#endif + if (cov_ptr.isCovered(1, topology, i, j, k)) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -1199,14 +1143,7 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( mfyfab(i,j,k) = fy_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and Sz(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and lx(i, j, k)<=0))) { return; } -#endif -#endif + if (cov_ptr.isCovered(2, topology, i, j, k)) { return; } #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -1393,9 +1330,7 @@ WarpX::LoadExternalFields (int const lev) m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::fine, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::face); } else if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) @@ -1418,9 +1353,7 @@ WarpX::LoadExternalFields (int const lev) m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::fine, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, EB::CoverTopology::edge); } else if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) diff --git a/Source/WarpX.H b/Source/WarpX.H index bad63cd44d9..d633cf55690 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -37,6 +37,7 @@ # endif #endif #include "AcceleratorLattice/AcceleratorLattice.H" +#include "EmbeddedBoundary/Covered.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" #include "Fields.H" @@ -926,9 +927,7 @@ public: amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, - int lev, PatchType patch_type, [[maybe_unused]] char topology, - std::optional const& edge_lengths = std::nullopt, - std::optional const& face_areas = std::nullopt); + int lev, PatchType patch_type, EB::CoverTopology topology); /** * \brief Load field values from a user-specified openPMD file, From c4b9417116baee9c4f0dc669b8bcce0146cbc308 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:54:49 +0000 Subject: [PATCH 25/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Source/EmbeddedBoundary/Covered.H | 6 +++--- Source/EmbeddedBoundary/Covered.cpp | 2 +- .../FiniteDifferenceSolver/ComputeCurlA.cpp | 9 ++++----- .../HybridPICModel/ExternalVectorPotential.H | 12 ++++++------ .../HybridPICModel/ExternalVectorPotential.cpp | 14 +++++++------- 5 files changed, 21 insertions(+), 22 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 10ebf48f307..2d15fbe6538 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -94,12 +94,12 @@ public: || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or (topology==CoverTopology::face and Sy(i,j,k)<=0)) - { + { is_covered = true; } #endif break; - + case 2: #ifdef WARPX_DIM_3D if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and Sz(i, j, k)<=0)) @@ -122,4 +122,4 @@ public: }; } // namespace EB -#endif \ No newline at end of file +#endif diff --git a/Source/EmbeddedBoundary/Covered.cpp b/Source/EmbeddedBoundary/Covered.cpp index be3265dd13c..c9b0dc0d5da 100644 --- a/Source/EmbeddedBoundary/Covered.cpp +++ b/Source/EmbeddedBoundary/Covered.cpp @@ -64,4 +64,4 @@ Covered::Covered (amrex::MFIter &mfi, int lev) } } -} \ No newline at end of file +} diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index 1af1232dee2..f194eaf1186 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -54,7 +54,7 @@ void FiniteDifferenceSolver::ComputeCurlA ( // * \brief Calculate B from the curl of A // * i.e. B = curl(A) output field on B field mesh staggering // * -// * \param[out] curlField output of curl operation +// * \param[out] curlField output of curl operation // * \param[in] field input staggered field, should be on E/J/A mesh staggering // */ #ifdef WARPX_DIM_RZ @@ -116,7 +116,7 @@ void FiniteDifferenceSolver::ComputeCurlACylindrical ( // Br calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ if (cov_ptr.isCovered(0, EB::CoverTopology::face, i, j, 0)) { return; } - + Real const r = rmin + i*dr; // r on nodal point (Br is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Br(i, j, 0, 0) = - T_Algo::UpwardDz(At, coefs_z, n_coefs_z, i, j, 0, 0); // Mode m=0 @@ -169,7 +169,7 @@ void FiniteDifferenceSolver::ComputeCurlACylindrical ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ if (cov_ptr.isCovered(2, EB::CoverTopology::face, i, j, 0)) { return; } - + Real const r = rmin + (i + 0.5_rt)*dr; // r on a cell-centered grid (Bz is cell-centered in r) Bz(i, j, 0, 0) = - ( - T_Algo::UpwardDrr_over_r(At, r, dr, coefs_r, n_coefs_r, i, j, 0, 0)); for (int m=1 ; m m_A_time_scale; bool m_read_A_from_file = false; - std::string m_external_file_path = ""; + std::string m_external_file_path = ""; public: @@ -64,7 +64,7 @@ public: // Default Constructor ExternalVectorPotential (); - void ReadParameters (); + void ReadParameters (); void AllocateLevelMFs ( ablastr::fields::MultiFabRegister & fields, @@ -93,4 +93,4 @@ public: ); }; -#endif //WARPX_TIME_DEPENDENT_VECTOR_POTENTIAL_H_ \ No newline at end of file +#endif //WARPX_TIME_DEPENDENT_VECTOR_POTENTIAL_H_ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 053de4579ed..287ec0cca63 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -39,9 +39,9 @@ ExternalVectorPotential::ReadParameters () } pp_ext_A.query("A_time_external_function(t)", m_A_ext_time_function); -} +} -void +void ExternalVectorPotential::AllocateLevelMFs ( ablastr::fields::MultiFabRegister & fields, int lev, const BoxArray& ba, const DistributionMapping& dm, @@ -84,7 +84,7 @@ ExternalVectorPotential::AllocateLevelMFs ( dm, ncomps, ngEB, 0.0_rt); } -void +void ExternalVectorPotential::InitData () { using ablastr::fields::Direction; @@ -130,7 +130,7 @@ ExternalVectorPotential::InitData () // check if the external current parsers depend on time for (int i=0; i<3; i++) { const std::set A_ext_symbols = m_A_external_parser[i]->symbols(); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); } @@ -214,11 +214,11 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const amrex::Real sf_r = m_A_time_scale(t+0.5_rt*dt); amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; - ablastr::fields::MultiLevelVectorField A_ext = + ablastr::fields::MultiLevelVectorField A_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_A_fp_external, warpx.finestLevel()); - ablastr::fields::MultiLevelVectorField B_ext = + ablastr::fields::MultiLevelVectorField B_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); - ablastr::fields::MultiLevelVectorField E_ext = + ablastr::fields::MultiLevelVectorField E_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { From 1cafe4d7b22f36dfbcc01f3f80af3724e7757e00 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:34:30 -0700 Subject: [PATCH 26/86] Fixing some CI issues. --- Source/EmbeddedBoundary/Covered.H | 10 ---------- .../HybridPICModel/ExternalVectorPotential.cpp | 2 -- 2 files changed, 12 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 2d15fbe6538..1ab9b2d9e29 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -14,24 +14,14 @@ # include # include # include -# include -# include -# include # include -# include -# include # include # include # include # include # include -# include -# include -# include # include # include -# include -# include namespace EB { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 287ec0cca63..68a952c7169 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -163,8 +163,6 @@ AMREX_FORCE_INLINE void ExternalVectorPotential::ZeroFieldinEB (ablastr::fields::VectorField const& Field, EB::CoverTopology topology, const int lev) { - auto &warpx = WarpX::GetInstance(); - // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) From c73d22ccc612abe25d9d0711cebef3da2b4b2ae8 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 29 Oct 2024 14:03:23 -0700 Subject: [PATCH 27/86] Adding flags for sometimes unused variables in EB covering calcs. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.H | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 1ab9b2d9e29..2558f27bfe9 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -45,11 +45,11 @@ public: // Can have topology of f or e for 'face' or 'edge' AMREX_GPU_HOST_DEVICE bool isCovered ( - const int idir, - const CoverTopology topology, - const int i, - const int j, - const int k) const noexcept + [[maybe_unused]] const int idir, + [[maybe_unused]] const CoverTopology topology, + [[maybe_unused]] const int i, + [[maybe_unused]] const int j, + [[maybe_unused]] const int k) const noexcept { bool is_covered = false; From e2730bc08356c88065746f4021434b76330477e5 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:39:48 -0700 Subject: [PATCH 28/86] More CI cleanup. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.cpp b/Source/EmbeddedBoundary/Covered.cpp index c9b0dc0d5da..8da2b07dacd 100644 --- a/Source/EmbeddedBoundary/Covered.cpp +++ b/Source/EmbeddedBoundary/Covered.cpp @@ -13,27 +13,11 @@ # include # include # include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include # include # include # include -# include -# include -# include # include # include -# include using namespace amrex; using namespace warpx::fields; From aa716ae76234d8b0b985e69e821e2f6664dc0c67 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:18:18 -0700 Subject: [PATCH 29/86] Clang-tidy update. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.H | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 2558f27bfe9..226f1820201 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -51,9 +51,9 @@ public: [[maybe_unused]] const int j, [[maybe_unused]] const int k) const noexcept { +#ifdef AMREX_USE_EB bool is_covered = false; -#ifdef AMREX_USE_EB switch (idir) { case 0: @@ -105,9 +105,10 @@ public: #endif break; } -#endif //AMREX_USE_EB - return is_covered; +#else + return false; +#endif //AMREX_USE_EB } }; From eed83ea56552cc34da524a736bb056b53ebea06f Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 31 Oct 2024 10:34:31 -0700 Subject: [PATCH 30/86] Adding nodiscard to EB::Covered::isCovered routine. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 226f1820201..b9c97f6555f 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -43,7 +43,7 @@ public: Covered (amrex::MFIter &mfi, int lev); // Can have topology of f or e for 'face' or 'edge' - AMREX_GPU_HOST_DEVICE + AMREX_GPU_HOST_DEVICE [[nodiscard]] bool isCovered ( [[maybe_unused]] const int idir, [[maybe_unused]] const CoverTopology topology, From 6bfe0f9ceafcb67292ea254e6be37e526ca143c6 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:42:38 -0700 Subject: [PATCH 31/86] Changing order of attribute list for HIP Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index b9c97f6555f..d72f5d904b8 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -43,7 +43,7 @@ public: Covered (amrex::MFIter &mfi, int lev); // Can have topology of f or e for 'face' or 'edge' - AMREX_GPU_HOST_DEVICE [[nodiscard]] + [[nodiscard]] AMREX_GPU_HOST_DEVICE bool isCovered ( [[maybe_unused]] const int idir, [[maybe_unused]] const CoverTopology topology, From b7249950ad10f82bf535118d63eb2d6457f4062e Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:14:20 -0800 Subject: [PATCH 32/86] Adding ability to add lists of external fields with different timings. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Python/pywarpx/picmi.py | 78 ++--- .../HybridPICModel/ExternalVectorPotential.H | 45 +-- .../ExternalVectorPotential.cpp | 293 ++++++++++++------ Source/Fields.H | 2 - Source/Initialization/WarpXInitData.cpp | 47 ++- Source/WarpX.H | 26 ++ 6 files changed, 323 insertions(+), 168 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 25bcaa8a90e..b84cb709d45 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1800,10 +1800,7 @@ def __init__( Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, - Ax_external_function=None, - Ay_external_function=None, - Az_external_function=None, - A_time_external_function=None, + A_external=None, **kw, ): self.grid = grid @@ -1824,19 +1821,23 @@ def __init__( self.add_external_fields = None - self.Ax_external_function = Ax_external_function - self.Ay_external_function = Ay_external_function - self.Az_external_function = Az_external_function - - if ( - Ax_external_function is not None - or Ay_external_function is not None - or Az_external_function is not None - ): + # It is expected that a nested dicitonary will be passed + # into picmi for each field that has different timings + # e.g. + # A_external = { + # '': { + # 'Ax_external_function': ..., + # 'Ax_external_function': ..., + # 'Ax_external_function': ..., + # 'A_time_external_function': ... + # }, + # ': {...}' + # } + self.A_external = A_external + + if (A_external is not None): self.add_external_fields = True - self.A_time_external_function = A_time_external_function - # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): @@ -1887,29 +1888,36 @@ def solver_initialize_inputs(self): ) pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields pywarpx.external_vector_potential.__setattr__( - "Ax_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - self.Ax_external_function, self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - "Ay_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - self.Ay_external_function, self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - "Az_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - self.Az_external_function, self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - "A_time_external_function(t)", + "fields", pywarpx.my_constants.mangle_expression( - self.A_time_external_function, self.mangle_dict + list(self.A_external.keys()), self.mangle_dict ), ) + for field_name, field_dict in self.A_external.items(): + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ax_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict['Ax_external_function'], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ay_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict['Ay_external_function'], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Az_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict['Az_external_function'], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.A_time_external_function(t)", + pywarpx.my_constants.mangle_expression( + field_dict['A_time_external_function'], self.mangle_dict + ), + ) class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index 6a09e6f7347..1c01f07dbeb 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -30,8 +30,6 @@ #include -using namespace amrex; - /** * \brief This class contains the parameters needed to evaluate a * time varying external vector potential, leading to external E/B @@ -45,21 +43,24 @@ using namespace amrex; class ExternalVectorPotential { protected: - std::string m_Ax_ext_grid_function = "0.0"; - std::string m_Ay_ext_grid_function = "0.0"; - std::string m_Az_ext_grid_function = "0.0"; - std::array< std::unique_ptr, 3> m_A_external_parser; - std::array< amrex::ParserExecutor<4>, 3> m_A_external; + int m_nFields; - std::string m_A_ext_time_function = "1.0"; - std::unique_ptr m_A_external_time_parser; - amrex::ParserExecutor<1> m_A_time_scale; + std::vector m_field_names; - bool m_read_A_from_file = false; - std::string m_external_file_path = ""; + std::vector m_Ax_ext_grid_function; + std::vector m_Ay_ext_grid_function; + std::vector m_Az_ext_grid_function; + std::vector, 3>> m_A_external_parser; + std::vector, 3>> m_A_external; -public: + std::vector m_A_ext_time_function; + std::vector> m_A_external_time_parser; + std::vector> m_A_time_scale; + std::vector m_read_A_from_file; + std::vector m_external_file_path; + +public: // Default Constructor ExternalVectorPotential (); @@ -68,15 +69,15 @@ public: void AllocateLevelMFs ( ablastr::fields::MultiFabRegister & fields, - int lev, const BoxArray& ba, const DistributionMapping& dm, + int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, const int ncomps, - const IntVect& ngEB, - const IntVect& Ex_nodal_flag, - const IntVect& Ey_nodal_flag, - const IntVect& Ez_nodal_flag, - const IntVect& Bx_nodal_flag, - const IntVect& By_nodal_flag, - const IntVect& Bz_nodal_flag + const amrex::IntVect& ngEB, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag ); void InitData (); @@ -85,7 +86,7 @@ public: void ZeroFieldinEB ( ablastr::fields::VectorField const& Field, EB::CoverTopology topology, - const int lev); + int lev); void UpdateHybridExternalFields ( const amrex::Real t, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 68a952c7169..ec859d66dd6 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -28,17 +28,54 @@ ExternalVectorPotential::ReadParameters () { const ParmParse pp_ext_A("external_vector_potential"); - utils::parser::queryWithParser(pp_ext_A, "read_from_file", m_read_A_from_file); - - if (m_read_A_from_file) { - pp_ext_A.query("path", m_external_file_path); - } else { - pp_ext_A.query("Ax_external_grid_function(x,y,z)", m_Ax_ext_grid_function); - pp_ext_A.query("Ay_external_grid_function(x,y,z)", m_Ay_ext_grid_function); - pp_ext_A.query("Az_external_grid_function(x,y,z)", m_Az_ext_grid_function); - } + pp_ext_A.queryarr("fields", m_field_names); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_field_names.empty(), + "No external field names defined in external_vector_potential.fields"); + + m_nFields = m_field_names.size(); + + // Resize vectors and set defaults + m_Ax_ext_grid_function.resize(m_nFields); + m_Ay_ext_grid_function.resize(m_nFields); + m_Az_ext_grid_function.resize(m_nFields); + for (std::string & field : m_Ax_ext_grid_function) field = "0.0"; + for (std::string & field : m_Ay_ext_grid_function) field = "0.0"; + for (std::string & field : m_Az_ext_grid_function) field = "0.0"; + + m_A_external_parser.resize(m_nFields); + m_A_external.resize(m_nFields); + + m_A_ext_time_function.resize(m_nFields); + for (std::string & field_time : m_A_ext_time_function) field_time = "1.0"; - pp_ext_A.query("A_time_external_function(t)", m_A_ext_time_function); + m_A_external_time_parser.resize(m_nFields); + m_A_time_scale.resize(m_nFields); + + m_read_A_from_file.resize(m_nFields); + m_external_file_path.resize(m_nFields); + for (std::string & file_name : m_external_file_path) file_name = ""; + + for (int i = 0; i < m_nFields; ++i) { + bool read_from_file = false; + utils::parser::queryWithParser(pp_ext_A, + (m_field_names[i]+".read_from_file").c_str(), read_from_file); + m_read_A_from_file[i] = read_from_file; + + if (m_read_A_from_file[i]) { + pp_ext_A.query((m_field_names[i]+".path").c_str(), m_external_file_path[i]); + } else { + pp_ext_A.query((m_field_names[i]+".Ax_external_grid_function(x,y,z)").c_str(), + m_Ax_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Ay_external_grid_function(x,y,z)").c_str(), + m_Ay_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Az_external_grid_function(x,y,z)").c_str(), + m_Az_ext_grid_function[i]); + } + + pp_ext_A.query((m_field_names[i]+".A_time_external_function(t)").c_str(), + m_A_ext_time_function[i]); + } } void @@ -55,15 +92,29 @@ ExternalVectorPotential::AllocateLevelMFs ( const IntVect& Bz_nodal_flag) { using ablastr::fields::Direction; - fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{0}, - lev, amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{1}, - lev, amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); - fields.alloc_init(FieldType::hybrid_A_fp_external, Direction{2}, - lev, amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngEB, 0.0_rt); + for (std::string const & field_name : m_field_names) { + std::string const Aext_field = field_name + std::string{"_Aext"}; + fields.alloc_init(Aext_field, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + + std::string const curlAext_field = field_name + std::string{"_curlAext"}; + fields.alloc_init(curlAext_field, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + } fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); @@ -90,71 +141,97 @@ ExternalVectorPotential::InitData () using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); - if (m_read_A_from_file) { - // Read A fields from file - for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { + for (int i = 0; i < m_nFields; ++i) { + + std::string const Aext_field = m_field_names[i] + std::string{"_Aext"}; + + if (m_read_A_from_file[i]) { + // Read A fields from file + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { #if defined(WARPX_DIM_RZ) - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{0}, lev), - "A", "r"); - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{1}, lev), - "A", "t"); - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{2}, lev), - "A", "z"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "r"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "t"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); #else - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{0}, lev), - "A", "x"); - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{1}, lev), - "A", "y"); - warpx.ReadExternalFieldFromFile(m_external_file_path, - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{2}, lev), - "A", "z"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "x"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "y"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); #endif - } - } else { - // Initialize the A fields from expression - m_A_external_parser[0] = std::make_unique( - utils::parser::makeParser(m_Ax_ext_grid_function,{"x","y","z","t"})); - m_A_external_parser[1] = std::make_unique( - utils::parser::makeParser(m_Ay_ext_grid_function,{"x","y","z","t"})); - m_A_external_parser[2] = std::make_unique( - utils::parser::makeParser(m_Az_ext_grid_function,{"x","y","z","t"})); - m_A_external[0] = m_A_external_parser[0]->compile<4>(); - m_A_external[1] = m_A_external_parser[1]->compile<4>(); - m_A_external[2] = m_A_external_parser[2]->compile<4>(); - - // check if the external current parsers depend on time - for (int i=0; i<3; i++) { - const std::set A_ext_symbols = m_A_external_parser[i]->symbols(); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, - "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); - } + } + } else { + // Initialize the A fields from expression + m_A_external_parser[i][0] = std::make_unique( + utils::parser::makeParser(m_Ax_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][1] = std::make_unique( + utils::parser::makeParser(m_Ay_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][2] = std::make_unique( + utils::parser::makeParser(m_Az_ext_grid_function[i],{"x","y","z","t"})); + m_A_external[i][0] = m_A_external_parser[i][0]->compile<4>(); + m_A_external[i][1] = m_A_external_parser[i][1]->compile<4>(); + m_A_external[i][2] = m_A_external_parser[i][2]->compile<4>(); + + // check if the external current parsers depend on time + for (int idim=0; idim<3; idim++) { + const std::set A_ext_symbols = m_A_external_parser[i][idim]->symbols(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, + "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); + } - // Initialize data onto grid - for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { - warpx.ComputeExternalFieldOnGridUsingParser( - FieldType::hybrid_A_fp_external, - m_A_external[0], - m_A_external[1], - m_A_external[2], - lev, PatchType::fine, EB::CoverTopology::none); + // Initialize data onto grid + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.ComputeExternalFieldOnGridUsingParser( + Aext_field, + m_A_external[i][0], + m_A_external[i][1], + m_A_external[i][2], + lev, PatchType::fine, EB::CoverTopology::none); - for (int idir = 0; idir < 3; ++idir) { - warpx.m_fields.get(FieldType::hybrid_A_fp_external, Direction{idir}, lev)-> - FillBoundary(warpx.Geom(lev).periodicity()); + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(Aext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } } } - } - amrex::Gpu::streamSynchronize(); + amrex::Gpu::streamSynchronize(); + + // Compute the curl of at at max and store + std::string const curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); - m_A_external_time_parser = std::make_unique( - utils::parser::makeParser(m_A_ext_time_function,{"t",})); - m_A_time_scale = m_A_external_time_parser->compile<1>(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( + curlA_ext[lev], + A_ext[lev], + lev + ); + + ZeroFieldinEB(curlA_ext[lev], EB::CoverTopology::face, lev); + ZeroFieldinEB(A_ext[lev], EB::CoverTopology::edge, lev); + } + + // Generate parser for time function + m_A_external_time_parser[i] = std::make_unique( + utils::parser::makeParser(m_A_ext_time_function[i],{"t",})); + m_A_time_scale[i] = m_A_external_time_parser[i]->compile<1>(); + + } UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); } @@ -204,40 +281,56 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); - // Get B-field Scaling Factor - amrex::Real scale_factor_B = m_A_time_scale(t); - - // Get dA/dt scaling factor based on time centered FD around t - amrex::Real sf_l = m_A_time_scale(t-0.5_rt*dt); - amrex::Real sf_r = m_A_time_scale(t+0.5_rt*dt); - amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; - ablastr::fields::MultiLevelVectorField A_ext = - warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_A_fp_external, warpx.finestLevel()); ablastr::fields::MultiLevelVectorField B_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); ablastr::fields::MultiLevelVectorField E_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); + // Zero E and B external fields for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( - B_ext[lev], - A_ext[lev], - lev - ); - for (int idir = 0; idir < 3; ++idir) { - // Scale B field by the time factor - B_ext[lev][Direction{idir}]->mult(scale_factor_B); - B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); - - // Copy A into E and scale by the (-) derivative of the time function - E_ext[lev][Direction{idir}]->setVal(scale_factor_E); - amrex::MultiFab::Multiply(*E_ext[lev][Direction{idir}], *A_ext[lev][Direction{idir}], 0, 0, 1, 0); - E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + B_ext[lev][Direction{idir}]->setVal(0.0_rt); + E_ext[lev][Direction{idir}]->setVal(0.0_rt); + } + } + + for (int i = 0; i < m_nFields; ++i) { + std::string const Aext_field = m_field_names[i] + std::string{"_Aext"}; + std::string const curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + + // Get B-field Scaling Factor + amrex::Real scale_factor_B = m_A_time_scale[i](t); + + // Get dA/dt scaling factor based on time centered FD around t + amrex::Real sf_l = m_A_time_scale[i](t-0.5_rt*dt); + amrex::Real sf_r = m_A_time_scale[i](t+0.5_rt*dt); + amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + for (int idir = 0; idir < 3; ++idir) { + // Scale A_ext by - \partial A / \partial t and add to E_ext + amrex::MultiFab::LinComb( + *E_ext[lev][Direction{idir}], + 1.0_rt, *E_ext[lev][Direction{idir}], 0, + scale_factor_E, *A_ext[lev][Direction{idir}], 0, + 0, 1, 0); + E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + + // Scale curlA_ext by the t function and add to B_ext + amrex::MultiFab::LinComb( + *B_ext[lev][Direction{idir}], + 1.0_rt, *B_ext[lev][Direction{idir}], 0, + scale_factor_B, *curlA_ext[lev][Direction{idir}], 0, + 0, 1, 0); + B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + } } - ZeroFieldinEB(B_ext[lev], EB::CoverTopology::face, lev); - ZeroFieldinEB(E_ext[lev], EB::CoverTopology::edge, lev); } amrex::Gpu::streamSynchronize(); } diff --git a/Source/Fields.H b/Source/Fields.H index 91e1e264a8d..0bd4d3626f4 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -52,7 +52,6 @@ namespace warpx::fields hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ hybrid_B_fp_external, /**< Used with Ohm's law solver. Stores external B field */ hybrid_E_fp_external, /**< Used with Ohm's law solver. Stores external E field */ - hybrid_A_fp_external, /**< Used with Ohm's law solver. Stores external A field */ Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level @@ -107,7 +106,6 @@ namespace warpx::fields FieldType::hybrid_current_fp_external, FieldType::hybrid_B_fp_external, FieldType::hybrid_E_fp_external, - FieldType::hybrid_A_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 16d7b7da1b0..8908953162f 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1051,19 +1051,23 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } -void WarpX::ComputeExternalFieldOnGridUsingParser ( - warpx::fields::FieldType field, +template +void ComputeExternalFieldOnGridUsingParser_template ( + T field, amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, int lev, PatchType patch_type, EB::CoverTopology topology) { - auto t = gett_new(lev); + auto &warpx = WarpX::GetInstance(); + auto const &geom = warpx.Geom(lev); + + auto t = warpx.gett_new(lev); - auto dx_lev = geom[lev].CellSizeArray(); - const RealBox& real_box = geom[lev].ProbDomain(); + auto dx_lev = geom.CellSizeArray(); + const RealBox& real_box = geom.ProbDomain(); - amrex::IntVect refratio = (lev > 0 ) ? RefRatio(lev-1) : amrex::IntVect(1); + amrex::IntVect refratio = (lev > 0 ) ? warpx.RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; @@ -1071,9 +1075,9 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } using ablastr::fields::Direction; - amrex::MultiFab* mfx = m_fields.get(field, Direction{0}, lev); - amrex::MultiFab* mfy = m_fields.get(field, Direction{1}, lev); - amrex::MultiFab* mfz = m_fields.get(field, Direction{2}, lev); + amrex::MultiFab* mfx = warpx.m_fields.get(field, Direction{0}, lev); + amrex::MultiFab* mfy = warpx.m_fields.get(field, Direction{1}, lev); + amrex::MultiFab* mfz = warpx.m_fields.get(field, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -1170,6 +1174,31 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } } +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, EB::CoverTopology topology) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, topology); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, EB::CoverTopology topology) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, topology); +} void WarpX::CheckGuardCells() { for (int lev = 0; lev <= max_level; ++lev) diff --git a/Source/WarpX.H b/Source/WarpX.H index d633cf55690..328a1b36c2e 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -929,6 +929,32 @@ public: amrex::ParserExecutor<4> const& fz_parser, int lev, PatchType patch_type, EB::CoverTopology topology); + /** + * \brief + * This function computes the E, B, and J fields on each level + * using the parser and the user-defined function for the external fields. + * The subroutine will parse the x_/y_z_external_grid_function and + * then, the field multifab is initialized based on the (x,y,z) position + * on the staggered yee-grid or cell-centered grid, in the interior cells + * and guard cells. + * + * \param[in] field MultiLevelVectorField + * \param[in] fx_parser parser function to initialize x-field + * \param[in] fy_parser parser function to initialize y-field + * \param[in] fz_parser parser function to initialize z-field + * \param[in] edge_lengths edge lengths information + * \param[in] face_areas face areas information + * \param[in] topology flag indicating if field is edge-based or face-based + * \param[in] lev level of the Multifabs that is initialized + * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) + */ + void ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, EB::CoverTopology topology); + /** * \brief Load field values from a user-specified openPMD file, * for the fields Ex, Ey, Ez, Bx, By, Bz From 859158e5a2300757dfa906e49c3149ea40188ba4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 19:14:53 +0000 Subject: [PATCH 33/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Python/pywarpx/picmi.py | 12 ++++++------ .../HybridPICModel/ExternalVectorPotential.cpp | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index b84cb709d45..c4ca72d9d9c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1823,7 +1823,7 @@ def __init__( # It is expected that a nested dicitonary will be passed # into picmi for each field that has different timings - # e.g. + # e.g. # A_external = { # '': { # 'Ax_external_function': ..., @@ -1835,7 +1835,7 @@ def __init__( # } self.A_external = A_external - if (A_external is not None): + if A_external is not None: self.add_external_fields = True # Handle keyword arguments used in expressions @@ -1897,25 +1897,25 @@ def solver_initialize_inputs(self): pywarpx.external_vector_potential.__setattr__( f"{field_name}.Ax_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - field_dict['Ax_external_function'], self.mangle_dict + field_dict["Ax_external_function"], self.mangle_dict ), ) pywarpx.external_vector_potential.__setattr__( f"{field_name}.Ay_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - field_dict['Ay_external_function'], self.mangle_dict + field_dict["Ay_external_function"], self.mangle_dict ), ) pywarpx.external_vector_potential.__setattr__( f"{field_name}.Az_external_grid_function(x,y,z)", pywarpx.my_constants.mangle_expression( - field_dict['Az_external_function'], self.mangle_dict + field_dict["Az_external_function"], self.mangle_dict ), ) pywarpx.external_vector_potential.__setattr__( f"{field_name}.A_time_external_function(t)", pywarpx.my_constants.mangle_expression( - field_dict['A_time_external_function'], self.mangle_dict + field_dict["A_time_external_function"], self.mangle_dict ), ) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index ec859d66dd6..2258891d026 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -103,7 +103,7 @@ ExternalVectorPotential::AllocateLevelMFs ( fields.alloc_init(Aext_field, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - + std::string const curlAext_field = field_name + std::string{"_curlAext"}; fields.alloc_init(curlAext_field, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), @@ -142,7 +142,7 @@ ExternalVectorPotential::InitData () auto& warpx = WarpX::GetInstance(); for (int i = 0; i < m_nFields; ++i) { - + std::string const Aext_field = m_field_names[i] + std::string{"_Aext"}; if (m_read_A_from_file[i]) { From db817c1a36ba3c89dd9a120e9d7b9943b3989e76 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:14:47 -0800 Subject: [PATCH 34/86] Updating boundary covering helper to work when compiling with EB, but not using. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/EmbeddedBoundary/Covered.H | 86 ++++++++++--------- Source/EmbeddedBoundary/Covered.cpp | 2 + .../ExternalVectorPotential.cpp | 24 ++++-- 3 files changed, 64 insertions(+), 48 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index d72f5d904b8..5b7c7a2e96c 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -40,6 +40,10 @@ public: amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; #endif + // This state should be captured as part of this object and shipped + // to device via lambda capture. + bool m_eb_enabled = false; + Covered (amrex::MFIter &mfi, int lev); // Can have topology of f or e for 'face' or 'edge' @@ -53,57 +57,59 @@ public: { #ifdef AMREX_USE_EB bool is_covered = false; - - switch (idir) - { - case 0: -#ifdef WARPX_DIM_3D - if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and Sx(i, j, k)<=0)) + + if (m_eb_enabled) { + switch (idir) { - is_covered = true; - } + case 0: +#ifdef WARPX_DIM_3D + if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and Sx(i, j, k)<=0)) + { + is_covered = true; + } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and lz(i, j, k)<=0)) - { - is_covered = true; - } + //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge + if((topology==CoverTopology::edge and lx(i, j, k)<=0) or (topology==CoverTopology::face and lz(i, j, k)<=0)) + { + is_covered = true; + } #endif - break; + break; - case 1: + case 1: #ifdef WARPX_DIM_3D - if((topology==CoverTopology::edge and ly(i, j, k)<=0) or (topology==CoverTopology::face and Sy(i, j, k)<=0)) - { - is_covered = true; - } + if((topology==CoverTopology::edge and ly(i, j, k)<=0) or (topology==CoverTopology::face and Sy(i, j, k)<=0)) + { + is_covered = true; + } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if( (topology==CoverTopology::edge and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 - || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (topology==CoverTopology::face and Sy(i,j,k)<=0)) - { - is_covered = true; - } + //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered + if( (topology==CoverTopology::edge and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or + (topology==CoverTopology::face and Sy(i,j,k)<=0)) + { + is_covered = true; + } #endif - break; + break; - case 2: + case 2: #ifdef WARPX_DIM_3D - if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and Sz(i, j, k)<=0)) - { - is_covered = true; - } + if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and Sz(i, j, k)<=0)) + { + is_covered = true; + } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and lx(i, j, k)<=0)) - { - is_covered = true; - } + //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge + if((topology==CoverTopology::edge and lz(i, j, k)<=0) or (topology==CoverTopology::face and lx(i, j, k)<=0)) + { + is_covered = true; + } #endif - break; + break; + } } return is_covered; #else diff --git a/Source/EmbeddedBoundary/Covered.cpp b/Source/EmbeddedBoundary/Covered.cpp index 8da2b07dacd..396b57fabd5 100644 --- a/Source/EmbeddedBoundary/Covered.cpp +++ b/Source/EmbeddedBoundary/Covered.cpp @@ -32,6 +32,8 @@ Covered::Covered (amrex::MFIter &mfi, int lev) auto edge_lengths = warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev); auto face_areas = warpx.m_fields.get_alldirs(FieldType::face_areas, lev); + m_eb_enabled = true; + lx = edge_lengths[0]->array(mfi); ly = edge_lengths[1]->array(mfi); lz = edge_lengths[2]->array(mfi); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index ec859d66dd6..119c5177ad4 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -93,7 +93,7 @@ ExternalVectorPotential::AllocateLevelMFs ( { using ablastr::fields::Direction; for (std::string const & field_name : m_field_names) { - std::string const Aext_field = field_name + std::string{"_Aext"}; + std::string Aext_field = field_name + std::string{"_Aext"}; fields.alloc_init(Aext_field, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); @@ -104,7 +104,7 @@ ExternalVectorPotential::AllocateLevelMFs ( lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - std::string const curlAext_field = field_name + std::string{"_curlAext"}; + std::string curlAext_field = field_name + std::string{"_curlAext"}; fields.alloc_init(curlAext_field, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); @@ -143,7 +143,7 @@ ExternalVectorPotential::InitData () for (int i = 0; i < m_nFields; ++i) { - std::string const Aext_field = m_field_names[i] + std::string{"_Aext"}; + std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; if (m_read_A_from_file[i]) { // Read A fields from file @@ -208,7 +208,7 @@ ExternalVectorPotential::InitData () amrex::Gpu::streamSynchronize(); // Compute the curl of at at max and store - std::string const curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; ablastr::fields::MultiLevelVectorField A_ext = warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); @@ -222,8 +222,10 @@ ExternalVectorPotential::InitData () lev ); - ZeroFieldinEB(curlA_ext[lev], EB::CoverTopology::face, lev); - ZeroFieldinEB(A_ext[lev], EB::CoverTopology::edge, lev); + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(curlAext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } } // Generate parser for time function @@ -320,14 +322,20 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const 1.0_rt, *E_ext[lev][Direction{idir}], 0, scale_factor_E, *A_ext[lev][Direction{idir}], 0, 0, 1, 0); - E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); - + // Scale curlA_ext by the t function and add to B_ext amrex::MultiFab::LinComb( *B_ext[lev][Direction{idir}], 1.0_rt, *B_ext[lev][Direction{idir}], 0, scale_factor_B, *curlA_ext[lev][Direction{idir}], 0, 0, 1, 0); + } + + ZeroFieldinEB(B_ext[lev], EB::CoverTopology::face, lev); + ZeroFieldinEB(E_ext[lev], EB::CoverTopology::edge, lev); + + for (int idir = 0; idir < 3; ++idir) { + E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); } } From 5a1e8cceccf3352857ad9c207d580da0fffd8841 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:20:48 +0000 Subject: [PATCH 35/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Source/EmbeddedBoundary/Covered.H | 2 +- .../HybridPICModel/ExternalVectorPotential.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/EmbeddedBoundary/Covered.H b/Source/EmbeddedBoundary/Covered.H index 5b7c7a2e96c..34da0edb2e7 100644 --- a/Source/EmbeddedBoundary/Covered.H +++ b/Source/EmbeddedBoundary/Covered.H @@ -57,7 +57,7 @@ public: { #ifdef AMREX_USE_EB bool is_covered = false; - + if (m_eb_enabled) { switch (idir) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index aa6f79da145..afd50191edc 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -322,7 +322,7 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const 1.0_rt, *E_ext[lev][Direction{idir}], 0, scale_factor_E, *A_ext[lev][Direction{idir}], 0, 0, 1, 0); - + // Scale curlA_ext by the t function and add to B_ext amrex::MultiFab::LinComb( *B_ext[lev][Direction{idir}], From 6659cdc47a4cf98eefb29565673370a087bc2f9d Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:33:41 -0800 Subject: [PATCH 36/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c4ca72d9d9c..6170ef2cb43 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1783,7 +1783,7 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. - Ex/y/z_external_function: str + Ax/y/z_external_function: str Function of space and time specifying external (non-plasma) E-fields. """ From 2c0b35f657a43a09e7c59e45b7d41c583d25f1ba Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:33:49 -0800 Subject: [PATCH 37/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 6170ef2cb43..35910c50b23 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1784,7 +1784,7 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Function of space and time specifying external (non-plasma) currents. Ax/y/z_external_function: str - Function of space and time specifying external (non-plasma) E-fields. + Function of space and time specifying external (non-plasma) vector potential fields. """ def __init__( From e82cccc447906a01998f9e5549a0ad8892ba0fef Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:34:00 -0800 Subject: [PATCH 38/86] Update Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index f194eaf1186..d7aa3149ee3 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -68,7 +68,7 @@ void FiniteDifferenceSolver::ComputeCurlACylindrical ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); - // reset Jfield + // reset Bfield Bfield[0]->setVal(0); Bfield[1]->setVal(0); Bfield[2]->setVal(0); From e1180fdac5a456bf59d109c9299ae626cd2af91b Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:34:17 -0800 Subject: [PATCH 39/86] Update Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index d7aa3149ee3..83e8e6d7833 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -133,7 +133,7 @@ void FiniteDifferenceSolver::ComputeCurlACylindrical ( Br(i, j, 0, 0) = 0.; // Mode m=0 for (int m=1; m Date: Thu, 7 Nov 2024 09:34:35 -0800 Subject: [PATCH 40/86] Update Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index 83e8e6d7833..445ad944150 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -269,6 +269,7 @@ void FiniteDifferenceSolver::ComputeCurlACartesian ( // Bz calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip if this cell is fully covered by embedded boundaries if (cov_ptr.isCovered(2, EB::CoverTopology::face, i, j, k)) { return; } Bz(i, j, k) = ( From dd18deb5c39b9606d2a0c676a13f06ad425393cc Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:34:50 -0800 Subject: [PATCH 41/86] Update Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index bee188b4d0f..04b0c449b77 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -676,7 +676,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } - if (include_hyper_resistivity_term && solve_for_Faraday) { + if (include_hyper_resistivity_term) { auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); Ez(i, j, 0) -= eta_h * nabla2Jz; } From 13ecbce67ed43c823a49f2682585926498fd5e85 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:35:15 -0800 Subject: [PATCH 42/86] Update Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- .../FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 04b0c449b77..97d3411491b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -863,9 +863,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 Ex_ext, Ey_ext, Ez_ext; if (include_external_fields) { - Ex_ext = Efield_external[0]->array(mfi); - Ey_ext = Efield_external[1]->array(mfi); - Ez_ext = Efield_external[2]->array(mfi); + Ex_ext = Efield_external[0]->array(mfi); + Ey_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); } EB::Covered const& cov_ptr = EB::Covered(mfi, lev); From 913760ca7b259f30e268c7645a98ec5d62fd87d3 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 7 Nov 2024 12:32:29 -0800 Subject: [PATCH 43/86] Redisabling filtering for RZ. Commenting out E field boundary. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 4 ++-- Source/WarpX.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index bee188b4d0f..beaf159986f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -361,8 +361,8 @@ void FiniteDifferenceSolver::HybridPICSolveE ( amrex::Abort(Utils::TextMsg::Err( "HybridSolveE: The hybrid-PIC electromagnetic solver algorithm must be used")); } - auto& warpx = WarpX::GetInstance(); - warpx.ApplyEfieldBoundary(lev, PatchType::fine); + // auto& warpx = WarpX::GetInstance(); + // warpx.ApplyEfieldBoundary(lev, PatchType::fine); } #ifdef WARPX_DIM_RZ diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 3494c10fdc2..538bd89dd83 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -738,8 +738,8 @@ WarpX::ReadParameters () { // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) - //WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, - // "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, + "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); } #endif From 5083cad591df4ba160f5b5a74b7cb183dc1a8a19 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:12:21 -0800 Subject: [PATCH 44/86] Fixing some bugs in RZ mode. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Python/pywarpx/picmi.py | 55 ++++++++++--------- .../HybridPICSolveE.cpp | 26 ++++++--- 2 files changed, 45 insertions(+), 36 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 35910c50b23..41600399ff7 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1887,37 +1887,38 @@ def solver_initialize_inputs(self): ), ) pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields - pywarpx.external_vector_potential.__setattr__( - "fields", - pywarpx.my_constants.mangle_expression( - list(self.A_external.keys()), self.mangle_dict - ), - ) - for field_name, field_dict in self.A_external.items(): - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Ax_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - field_dict["Ax_external_function"], self.mangle_dict - ), - ) + if self.add_external_fields: pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Ay_external_grid_function(x,y,z)", + "fields", pywarpx.my_constants.mangle_expression( - field_dict["Ay_external_function"], self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Az_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - field_dict["Az_external_function"], self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.A_time_external_function(t)", - pywarpx.my_constants.mangle_expression( - field_dict["A_time_external_function"], self.mangle_dict + list(self.A_external.keys()), self.mangle_dict ), ) + for field_name, field_dict in self.A_external.items(): + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ax_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ax_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ay_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ay_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Az_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Az_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.A_time_external_function(t)", + pywarpx.my_constants.mangle_expression( + field_dict["A_time_external_function"], self.mangle_dict + ), + ) class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index b5e88559c31..08e59482841 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -398,9 +398,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_external_fields = hybrid_model->m_add_external_fields; - auto const& warpx = WarpX::GetInstance(); - ablastr::fields::ConstVectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 - ablastr::fields::ConstVectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -457,9 +457,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Br = Bfield[0]->const_array(mfi); Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); - Array4 const& Br_ext = Bfield_external[0]->const_array(mfi); - Array4 const& Bt_ext = Bfield_external[1]->const_array(mfi); - Array4 const& Bz_ext = Bfield_external[2]->const_array(mfi); + + Array4 Br_ext, Bt_ext, Bz_ext; + if (include_external_fields) { + Br_ext = Bfield_external[0]->array(mfi); + Bt_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ @@ -530,9 +534,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& enE = enE_nodal_mf.const_array(mfi); Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.const_array(mfi); - Array4 const& Er_ext = Efield_external[0]->const_array(mfi); - Array4 const& Et_ext = Efield_external[1]->const_array(mfi); - Array4 const& Ez_ext = Efield_external[2]->const_array(mfi); + + Array4 Er_ext, Et_ext, Ez_ext; + if (include_external_fields) { + Er_ext = Efield_external[0]->array(mfi); + Et_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } EB::Covered const& cov_ptr = EB::Covered(mfi, lev); From 0ee42fc97edee00e80348468edf64afec2486f2a Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:06:11 -0800 Subject: [PATCH 45/86] Adding Generic Multifab wrappers to access runtime allocated multifabs in register via string name. Exposing function to compute curl of external A after modifying multifabs. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Python/pywarpx/fields.py | 15 +++++ .../HybridPICModel/ExternalVectorPotential.H | 3 + .../ExternalVectorPotential.cpp | 58 ++++++++++++------- .../HybridPICModel/HybridPICModel.cpp | 12 ++++ Source/Python/WarpX.cpp | 4 ++ Source/WarpX.H | 1 + 6 files changed, 73 insertions(+), 20 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 1eba1122a99..42ba61e237b 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -578,6 +578,21 @@ def norm0(self, *args): return self.mf.norm0(*args) +def CustomNamedxWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=0, level=level, include_ghosts=include_ghosts + ) + +def CustomNamedyWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=1, level=level, include_ghosts=include_ghosts + ) + +def CustomNamedzWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=2, level=level, include_ghosts=include_ghosts + ) + def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index 1c01f07dbeb..866843e000b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -82,6 +82,9 @@ public: void InitData (); + void CalculateExternalCurlA (); + void CalculateExternalCurlA (std::string& coil_name); + AMREX_FORCE_INLINE void ZeroFieldinEB ( ablastr::fields::VectorField const& Field, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index afd50191edc..ec5cd70f7de 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -207,26 +207,7 @@ ExternalVectorPotential::InitData () amrex::Gpu::streamSynchronize(); - // Compute the curl of at at max and store - std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; - - ablastr::fields::MultiLevelVectorField A_ext = - warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); - ablastr::fields::MultiLevelVectorField curlA_ext = - warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); - - for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( - curlA_ext[lev], - A_ext[lev], - lev - ); - - for (int idir = 0; idir < 3; ++idir) { - warpx.m_fields.get(curlAext_field, Direction{idir}, lev)-> - FillBoundary(warpx.Geom(lev).periodicity()); - } - } + CalculateExternalCurlA(m_field_names[i]); // Generate parser for time function m_A_external_time_parser[i] = std::make_unique( @@ -238,6 +219,43 @@ ExternalVectorPotential::InitData () UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); } + +void +ExternalVectorPotential::CalculateExternalCurlA () +{ + for (auto fname : m_field_names) { + CalculateExternalCurlA(fname); + } +} + +void +ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) +{ + using ablastr::fields::Direction; + auto & warpx = WarpX::GetInstance(); + + // Compute the curl of at at max and store + std::string Aext_field = coil_name + std::string{"_Aext"}; + std::string curlAext_field = coil_name + std::string{"_curlAext"}; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( + curlA_ext[lev], + A_ext[lev], + lev); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(curlAext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } +} + AMREX_FORCE_INLINE void ExternalVectorPotential::ZeroFieldinEB (ablastr::fields::VectorField const& Field, EB::CoverTopology topology, const int lev) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 95fb6e463ab..129ce706b92 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -582,3 +582,15 @@ void HybridPICModel::FieldPush ( warpx.EvolveB(dt, dt_type); warpx.FillBoundaryB(ng, nodal_sync); } + +void +WarpX::CalculateExternalCurlA() { + WARPX_PROFILE("WarpX::CalculateExternalCurlA()"); + + auto & warpx = WarpX::GetInstance(); + + // Get reference to External Field Object + auto ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); + ext_vector->CalculateExternalCurlA(); + +} \ No newline at end of file diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 0b1ae49dfbc..2fa41f8d477 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -246,6 +246,10 @@ The physical fields in WarpX have the following naming: [] () { WarpX::ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) + .def_static("calculate_hybrid_external_curlA", + [] (WarpX& wx) { wx.CalculateExternalCurlA(); }, + "Executes calculation of the curl of the external A in the hybrid solver." + ) .def("synchronize", [] (WarpX& wx) { wx.Synchronize(); }, "Synchronize particle velocities and positions." diff --git a/Source/WarpX.H b/Source/WarpX.H index 328a1b36c2e..908928244a4 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -870,6 +870,7 @@ public: void ComputeDivE(amrex::MultiFab& divE, int lev); static void ProjectionCleanDivB (); + void CalculateExternalCurlA (); [[nodiscard]] amrex::IntVect getngEB() const { return guard_cells.ng_alloc_EB; } [[nodiscard]] amrex::IntVect getngF() const { return guard_cells.ng_alloc_F; } From c9e9bd10478ec2f3157dde1542222ba911de7b26 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 7 Dec 2024 01:06:33 +0000 Subject: [PATCH 46/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Python/pywarpx/fields.py | 3 +++ .../HybridPICModel/ExternalVectorPotential.cpp | 4 ++-- .../FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 42ba61e237b..ae543bd446c 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -583,16 +583,19 @@ def CustomNamedxWrapper(mf_name, level=0, include_ghosts=False): mf_name=mf_name, idir=0, level=level, include_ghosts=include_ghosts ) + def CustomNamedyWrapper(mf_name, level=0, include_ghosts=False): return _MultiFABWrapper( mf_name=mf_name, idir=1, level=level, include_ghosts=include_ghosts ) + def CustomNamedzWrapper(mf_name, level=0, include_ghosts=False): return _MultiFABWrapper( mf_name=mf_name, idir=2, level=level, include_ghosts=include_ghosts ) + def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index ec5cd70f7de..a614d6d284a 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -220,7 +220,7 @@ ExternalVectorPotential::InitData () } -void +void ExternalVectorPotential::CalculateExternalCurlA () { for (auto fname : m_field_names) { @@ -228,7 +228,7 @@ ExternalVectorPotential::CalculateExternalCurlA () } } -void +void ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) { using ablastr::fields::Direction; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 129ce706b92..5a965aaca03 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -593,4 +593,4 @@ WarpX::CalculateExternalCurlA() { auto ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); ext_vector->CalculateExternalCurlA(); -} \ No newline at end of file +} From d86c411ff7e43874cb2d3e9ec8f6cb4358ed7b9a Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 29 Jan 2025 16:25:18 -0800 Subject: [PATCH 47/86] Adding Holmstrom vacuum handing and updating external fields to interact with vacuum fields appropriately. Adding CI tests for cylinder compression. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Examples/Tests/CMakeLists.txt | 1 + .../CMakeLists.txt | 24 + ...d_ohm_solver_cylinder_compression_picmi.py | 429 ++++++++++++++++++ ...z_ohm_solver_cylinder_compression_picmi.py | 421 +++++++++++++++++ Python/pywarpx/picmi.py | 83 ++-- .../FiniteDifferenceSolver/ComputeCurlA.cpp | 4 +- .../ExternalVectorPotential.cpp | 13 +- .../HybridPICModel/HybridPICModel.cpp | 3 + .../HybridPICSolveE.cpp | 100 ++-- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 3 +- Source/WarpX.cpp | 2 +- 11 files changed, 1017 insertions(+), 66 deletions(-) create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 6fea9368e78..6d4a465cd52 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -40,6 +40,7 @@ add_subdirectory(nci_fdtd_stability) add_subdirectory(nci_psatd_stability) add_subdirectory(nodal_electrostatic) add_subdirectory(nuclear_fusion) +add_subdirectory(ohm_solver_cylinder_compression) add_subdirectory(ohm_solver_em_modes) add_subdirectory(ohm_solver_ion_beam_instability) add_subdirectory(ohm_solver_ion_Landau_damping) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt new file mode 100644 index 00000000000..93ad79b3b05 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_ohm_solver_cylinder_compression_picmi # name + 3 # dims + 2 # nprocs + "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs + analysis_3d.py # analysis + diags/diag1000100 # output + OFF # dependency +) +label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) + +add_warpx_test( + test_rz_ohm_solver_cylinder_compression_picmi # name + RZ # dims + 2 # nprocs + "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs + analysis_rz.py # analysis + diags/diag1000100 # output + OFF # dependency +) +label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..7cb41d28ab3 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate magnetic reconnection in a force-free sheet. The setup is based +# --- on the problem described in Le et al. (2016) +# --- https://aip.scitation.org/doi/10.1063/1.4943893. + +import argparse +import shutil +import sys +from pathlib import Path + +import dill +import numpy as np +from mpi4py import MPI as mpi +import pandas as pd +from scipy.interpolate import PchipInterpolator + +import openpmd_api as io + +from pywarpx import callbacks, fields, libwarpx, picmi, amrex + +# amrex.throw_exception = 1 +# amrex.signal_handling = 0 + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0*constants.q_e*T_i + + B0 = np.sqrt(2*constants.mu0*p0) # Initial magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values match GS solution in gs_psi.csv) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LX = 2.*R_c*1.05 # m + LY = 2.*R_c*1.05 + LZ = 0.5 # m + + LT = 20 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NX = 256 + NY = 256 + NZ = 128 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 20 + + def Bz(self, r): + return np.sqrt(self.B0**2 - 2.*constants.mu0*self.n0*constants.q_e*self.T_i/(1. + np.exp((r - self.R_p)/self.delta_p))) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lx = self.LX + self.Ly = self.LY + self.Lz = self.LZ + + self.DX = self.LX/self.NX + self.DY = self.LY/self.NY + self.DZ = self.LZ/self.NZ + + if comm.rank == 0: + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + xvec = np.linspace(-self.LX, self.LX, num=2*self.NX) + yvec = np.linspace(-self.LY,self.LY, num=2*self.NY) + zvec = np.linspace(-self.LZ, self.LZ, num=2*self.NZ) + XM, YM, ZM = np.meshgrid(xvec, yvec, zvec, indexing='ij') + + RM = np.sqrt(XM**2 + YM**2) + + + Ax_data = -0.5*YM*self.dB + Ay_data = 0.5*XM*self.dB + Az_data = np.zeros_like(RM) + + # Write vector potential to file to exercise field loading via OPenPMD + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.grid_spacing = [self.DX, self.DY, self.DZ] + A.grid_global_offset = [-self.LX, -self.LY, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["x", "y", "z"] + A.data_order = "C" + A.unit_dimension = {io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0} + + Ax = A["x"] + Ay = A["y"] + Az = A["z"] + + Ax.position = [0., 0.] + Ay.position = [0., 0.] + Az.position = [0., 0.] + + Ax_dataset = io.Dataset( + Ax_data.dtype, + Ax_data.shape + ) + + Ay_dataset = io.Dataset( + Ay_data.dtype, + Ay_data.shape + ) + + Az_dataset = io.Dataset( + Az_data.dtype, + Az_data.shape + ) + + Ax.reset_dataset(Ax_dataset) + Ay.reset_dataset(Ay_dataset) + Az.reset_dataset(Az_dataset) + + Ax.store_chunk(Ax_data) + Ay.store_chunk(Ay_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NR = 64 + self.NZ = 128 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 #self.total_steps // 200 + + # dump all the current attributes to a dill pickle file + if comm.rank == 0: + with open("sim_parameters.dpkl", "wb") as f: + dill.dump(self, f) + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDX/DY = {self.DX/self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ/self.l_i:.3f} c/w_pi\n" + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz/self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Bx = fields.BxFPExternalWrapper(include_ghosts=False) + By = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Bx[:,:] = 0. + By[:,:] = 0. + + XM, YM, ZM = np.meshgrid(Bz.mesh('x'), Bz.mesh('y'), Bz.mesh('z'), indexing='ij') + + RM = np.sqrt(XM**2 + YM**2) + + Bz[:,:] = self.Bz(RM) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.Cartesian3DGrid( + number_of_cells=[self.NX, self.NY, self.NZ], + lower_bound=[-0.5*self.Lx, -0.5*self.Ly, -0.5*self.Lz], + upper_bound=[ 0.5*self.Lx, 0.5*self.Ly, 0.5*self.Lz], + lower_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + 'uniform': { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05*self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.01*self.n0*constants.q_e, + eta_p=1e-8, + eta_v=1e-3 + ) + simulation.solver = self.solver + + simulation.embedded_boundary = picmi.EmbeddedBoundary( + implicit_function="(x**2+y**2-R_w**2)", + R_w=self.R_c + ) + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = '(sqrt(x*x+y*y)*q_e*B0/m_p)' + dlnndr = '((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))' + vth = f'0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))' + + momentum_expr = [ + f'y*{vth}', + f'-x*{vth}', + '0' + ] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression=f"n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f'{str(self.vi_th)}']*3, + warpx_density_min=0.01*self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + # callbacks.installafterEsolve(self.check_fields) + + # particle_diag = picmi.ParticleDiagnostic( + # name="particles", + # period=self.diag_steps, + # species=[self.ions], + # data_list=["ux", "uy", "uz", "x", "z", "weighting"], + # warpx_format='openpmd', + # warpx_openpmd_backend='h5', + # ) + # simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="fields", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + warpx_format='openpmd', + warpx_openpmd_backend='h5', + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags/fields").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + # def check_fields(self): + # step = simulation.extension.warpx.getistep(lev=0) - 1 + + # if not (step == 1 or step % self.diag_steps == 0): + # return + + # rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] + # Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 + # Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 + # Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 + # By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 + # Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 + + # if libwarpx.amr.ParallelDescriptor.MyProc() != 0: + # return + + # # save the fields to file + # with open(f"diags/fields/fields_{step:06d}.npz", "wb") as f: + # np.savez(f, rho=rho, Jiy=Jiy, Jy=Jy, Bx=Bx, By=By, Bz=Bz) + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..af01bd33ce3 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,421 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate adiabatic compression of a plasma cylinder initializaed from an +# --- analytical Grad-Shafranov solution. + +# NOTE: Currently radial boundary is broken and requires further investigation + +import argparse +import shutil +import sys +from pathlib import Path + +import dill +import numpy as np +from mpi4py import MPI as mpi +import pandas as pd +from scipy.interpolate import PchipInterpolator + +import openpmd_api as io + +from pywarpx import callbacks, fields, libwarpx, picmi, amrex + +# amrex.throw_exception = 1 +# amrex.signal_handling = 0 + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0*constants.q_e*T_i + + B0 = np.sqrt(2*constants.mu0*p0) # External magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values match GS solution in gs_psi.csv) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LR = R_c # m + LZ = 0.25*R_c # m + + LT = 20 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NR = 128 + NZ = 32 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 25 + + def Bz(self, r): + return np.sqrt(self.B0**2 - 2.*constants.mu0*self.n0*constants.q_e*self.T_i/(1. + np.exp((r - self.R_p)/self.delta_p))) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lr = self.LR + self.Lz = self.LZ + + self.DR = self.LR/self.NR + self.DZ = self.LZ/self.NZ + + # Write A to OpenPMD for a uniform B field to exercise file based loader + if comm.rank == 0: + mvec = np.array([0]) + rvec = np.linspace(0,2*self.LR, num=2*self.NR) + zvec = np.linspace(-self.LZ, self.LZ, num=2*self.NZ) + MM, RM, ZM = np.meshgrid(mvec, rvec, zvec, indexing='ij') + + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + Ar_data = np.zeros_like(RM) + Az_data = np.zeros_like(RM) + + # Zero padded outside of domain + At_data = 0.5*RM*self.dB + + # Write vector potential to file to exercise field loading via + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.geometry = io.Geometry.thetaMode + A.geometry_parameters = "m=0" + A.grid_spacing = [self.DR, self.DZ] + A.grid_global_offset = [0.0, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["r", "z"] + A.data_order = "C" + A.unit_dimension = {io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0} + + Ar = A["r"] + At = A["t"] + Az = A["z"] + + Ar.position = [0., 0.] + At.position = [0., 0.] + Az.position = [0., 0.] + + Ar_dataset = io.Dataset( + Ar_data.dtype, + Ar_data.shape + ) + + At_dataset = io.Dataset( + At_data.dtype, + At_data.shape + ) + + Az_dataset = io.Dataset( + Az_data.dtype, + Az_data.shape + ) + + Ar.reset_dataset(Ar_dataset) + At.reset_dataset(At_dataset) + Az.reset_dataset(Az_dataset) + + Ar.store_chunk(Ar_data) + At.store_chunk(At_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NR = 64 + self.NZ = 128 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 #self.total_steps // 200 + + # dump all the current attributes to a dill pickle file + if comm.rank == 0: + with open("sim_parameters.dpkl", "wb") as f: + dill.dump(self, f) + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDR = {self.DR/self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ/self.l_i:.3f} c/w_pi\n" + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz/self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Br = fields.BxFPExternalWrapper(include_ghosts=False) + Bt = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Br[:,:] = 0. + Bt[:,:] = 0. + + RM, ZM = np.meshgrid(Bz.mesh('r'), Bz.mesh('z'), indexing='ij') + + Bz[:,:] = self.Bz(RM)*(RM <= self.R_c) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.CylindricalGrid( + number_of_cells=[self.NR, self.NZ], + lower_bound=[0., -self.Lz / 2.0], + upper_bound=[self.Lr, self.Lz / 2.0], + lower_boundary_conditions=["none", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["none", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + 'uniform': { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05*self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.01*self.n0*constants.q_e, + eta_p=1e-8, + eta_v=1e-3 + ) + simulation.solver = self.solver + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = '(sqrt(x*x+y*y)*q_e*B0/m_p)' + dlnndr = '((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))' + vth = f'0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))' + + momentum_expr = [ + f'y*{vth}', + f'-x*{vth}', + '0' + ] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression=f"n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f'{str(self.vi_th)}']*3, + warpx_density_min=0.01*self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + # callbacks.installafterEsolve(self.check_fields) + + # particle_diag = picmi.ParticleDiagnostic( + # name="particles", + # period=self.diag_steps, + # species=[self.ions], + # data_list=["ux", "uy", "uz", "x", "z", "weighting"], + # warpx_format='openpmd', + # warpx_openpmd_backend='h5', + # ) + # simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="fields", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + warpx_format='openpmd', + warpx_openpmd_backend='h5', + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags/fields").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + # def check_fields(self): + # step = simulation.extension.warpx.getistep(lev=0) - 1 + + # if not (step == 1 or step % self.diag_steps == 0): + # return + + # rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] + # Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 + # Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 + # Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 + # By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 + # Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 + + # if libwarpx.amr.ParallelDescriptor.MyProc() != 0: + # return + + # # save the fields to file + # with open(f"diags/fields/fields_{step:06d}.npz", "wb") as f: + # np.savez(f, rho=rho, Jiy=Jiy, Jy=Jy, Bx=Bx, By=By, Bz=Bz) + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 41600399ff7..397c65f09e3 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1783,8 +1783,30 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. - Ax/y/z_external_function: str + A_external: dict Function of space and time specifying external (non-plasma) vector potential fields. + It is expected that a nested dicitonary will be passed + into picmi for each field that has different timings + e.g. + A_external = { + '': { + 'Ax_external_function': , + 'Ax_external_function': , + 'Ax_external_function': , + 'A_time_external_function': + }, + ': {...}' + } + + or if fields are to be loaded from an OpenPMD file + A_external = { + '': { + 'load_from_file': True, + 'path': , + 'A_time_external_function': + }, + ': {...}' + } """ def __init__( @@ -1820,19 +1842,6 @@ def __init__( self.Jz_external_function = Jz_external_function self.add_external_fields = None - - # It is expected that a nested dicitonary will be passed - # into picmi for each field that has different timings - # e.g. - # A_external = { - # '': { - # 'Ax_external_function': ..., - # 'Ax_external_function': ..., - # 'Ax_external_function': ..., - # 'A_time_external_function': ... - # }, - # ': {...}' - # } self.A_external = A_external if A_external is not None: @@ -1895,24 +1904,34 @@ def solver_initialize_inputs(self): ), ) for field_name, field_dict in self.A_external.items(): - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Ax_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - field_dict["Ax_external_function"], self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Ay_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - field_dict["Ay_external_function"], self.mangle_dict - ), - ) - pywarpx.external_vector_potential.__setattr__( - f"{field_name}.Az_external_grid_function(x,y,z)", - pywarpx.my_constants.mangle_expression( - field_dict["Az_external_function"], self.mangle_dict - ), - ) + if "read_from_file" in field_dict.keys() and field_dict["read_from_file"]: + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.read_from_file", + field_dict["read_from_file"] + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.path", + field_dict["path"] + ) + else: + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ax_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ax_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ay_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ay_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Az_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Az_external_function"], self.mangle_dict + ), + ) pywarpx.external_vector_potential.__setattr__( f"{field_name}.A_time_external_function(t)", pywarpx.my_constants.mangle_expression( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index 445ad944150..cbed59ba1cd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -166,12 +166,12 @@ void FiniteDifferenceSolver::ComputeCurlACylindrical ( } }, - // Jz calculation + // Bz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ if (cov_ptr.isCovered(2, EB::CoverTopology::face, i, j, 0)) { return; } Real const r = rmin + (i + 0.5_rt)*dr; // r on a cell-centered grid (Bz is cell-centered in r) - Bz(i, j, 0, 0) = - ( - T_Algo::UpwardDrr_over_r(At, r, dr, coefs_r, n_coefs_r, i, j, 0, 0)); + Bz(i, j, 0, 0) = T_Algo::UpwardDrr_over_r(At, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); for (int m=1 ; mFillBoundary(warpx.Geom(lev).periodicity()); B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); } + + if (EB::enabled()) { + ZeroFieldinEB(B_ext[lev], EB::CoverTopology::face, lev); + ZeroFieldinEB(E_ext[lev], EB::CoverTopology::edge, lev); + } } } amrex::Gpu::streamSynchronize(); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 129ce706b92..14a3e3c8eef 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -578,9 +578,12 @@ void HybridPICModel::FieldPush ( // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, true); warpx.FillBoundaryE(ng, nodal_sync); + warpx.ApplyEfieldBoundary(0, PatchType::fine); + // Push forward the B-field using Faraday's law warpx.EvolveB(dt, dt_type); warpx.FillBoundaryB(ng, nodal_sync); + warpx.ApplyBfieldBoundary(0, PatchType::fine, dt_type); } void diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 08e59482841..301cecc16b2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -567,18 +567,20 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; + Real jr_val = 0._rt; if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); + jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); const Real jt_val = Interp(Jt, Jt_stag, Er_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Er_stag, coarsen, i, j, 0, 0); jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -588,7 +590,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); - Er(i, j, 0) = (enE_r - grad_Pe) / rho_val; + if (rho_val >= rho_val_limited) { + Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; + } else { + Ez(i, j, 0) = 0._rt; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } @@ -597,11 +603,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // r on cell-centered point (Jr is cell-centered in r) Real const r = rmin + (i + 0.5_rt)*dr; - auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); + auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); Er(i, j, 0) -= eta_h * nabla2Jr; } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Er(i, j, 0) -= Er_ext(i, j, 0); } }, @@ -619,19 +626,21 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; + Real jt_val = 0._rt; if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jr_val = Interp(Jr, Jr_stag, Et_stag, coarsen, i, j, 0, 0); - const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); + jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Et_stag, coarsen, i, j, 0, 0); jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure // -> d/dt = 0 for m = 0 @@ -640,14 +649,29 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); - Et(i, j, 0) = (enE_t - grad_Pe) / rho_val; + if (rho_val >= rho_val_limited) { + Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; + } else { + Et(i, j, 0) = 0._rt; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } - // Note: Hyper-resisitivity should be revisited here when modal decomposition is implemented + if (include_hyper_resistivity_term) { + // r on nodal point (Jt is nodal in r) + Real const r = rmin + i*dr; + + // Do not apply hyper-resistivity at r=0 + auto nabla2Jt = 0._rt; + if (r > 0.5_rt*dr) { + nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); + } + Et(i, j, 0) -= eta_h * nabla2Jt; + } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Et(i, j, 0) -= Et_ext(i, j, 0); } }, @@ -658,6 +682,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -669,7 +694,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -679,17 +704,29 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); - Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val; + if (rho_val >= rho_val_limited) { + Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; + } else { + Ez(i, j, 0) = 0._rt; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } if (include_hyper_resistivity_term) { - auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); + // r on nodal point (Jz is nodal in r) + Real const r = rmin + i*dr; + + auto nabla2Jz = 0._rt; + if (r > 0.5_rt*dr) { + nabla2Jz = T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); + } + Ez(i, j, 0) -= eta_h * nabla2Jz; } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Ez(i, j, 0) -= Ez_ext(i, j, 0); } } @@ -900,6 +937,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -911,7 +949,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -921,17 +959,19 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); - Ex(i, j, k) = (enE_x - grad_Pe) / rho_val; + Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k); + auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jx, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); Ex(i, j, k) -= eta_h * nabla2Jx; } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Ex(i, j, k) -= Ex_ext(i, j, k); } }, @@ -942,6 +982,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -953,7 +994,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -963,17 +1004,19 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); - Ey(i, j, k) = (enE_y - grad_Pe) / rho_val; + Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jy = T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k); + auto nabla2Jy = T_Algo::Dxx(Jy, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); Ey(i, j, k) -= eta_h * nabla2Jy; } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Ey(i, j, k) -= Ey_ext(i, j, k); } }, @@ -984,6 +1027,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -995,7 +1039,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -1005,17 +1049,19 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); - Ez(i, j, k) = (enE_z - grad_Pe) / rho_val; + Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); + auto nabla2Jz = T_Algo::Dxx(Jz, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jz, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); Ez(i, j, k) -= eta_h * nabla2Jz; } - if (include_external_fields) { + if (include_external_fields && rho_val >= rho_floor) { Ez(i, j, k) -= Ez_ext(i, j, k); } } diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 243856b2454..7ba79b1cc98 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -150,7 +150,7 @@ void WarpX::HybridPICEvolveFields () } if (add_external_fields) { - // Get the external fields + // Get the external fields at E^{n+1/2} m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( gett_old(0) + 0.5_rt*dt[0], 0.5_rt*dt[0]); @@ -209,6 +209,7 @@ void WarpX::HybridPICEvolveFields () false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + // ApplyEfieldBoundary(0, PatchType::fine); // Handle field splitting for Hybrid field push if (add_external_fields) { diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 538bd89dd83..dc26d3094e3 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -734,7 +734,7 @@ WarpX::ReadParameters () use_kspace_filter = use_filter; use_filter = false; } - else // FDTD + else if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::HybridPIC) { // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) From 0f11b42ddf2ca95cab350f45becb3ec2b9c9f1d0 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 4 Feb 2025 10:41:45 -0800 Subject: [PATCH 48/86] Changing evaluation of current to break dependence on standard resistivity flags. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICSolveE.cpp | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 301cecc16b2..0b0bef6d430 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -571,9 +571,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - Real jr_val = 0._rt; if (solve_for_Faraday && resistivity_has_J_dependence) { - jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); + const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); const Real jt_val = Interp(Jt, Jt_stag, Er_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Er_stag, coarsen, i, j, 0, 0); jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); @@ -601,7 +600,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_hyper_resistivity_term) { // r on cell-centered point (Jr is cell-centered in r) - Real const r = rmin + (i + 0.5_rt)*dr; + const Real r = rmin + (i + 0.5_rt)*dr; + const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); @@ -631,10 +631,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - Real jt_val = 0._rt; if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jr_val = Interp(Jr, Jr_stag, Et_stag, coarsen, i, j, 0, 0); - jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); + const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Et_stag, coarsen, i, j, 0, 0); jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); } @@ -659,15 +658,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } if (include_hyper_resistivity_term) { - // r on nodal point (Jt is nodal in r) - Real const r = rmin + i*dr; - - // Do not apply hyper-resistivity at r=0 - auto nabla2Jt = 0._rt; - if (r > 0.5_rt*dr) { - nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) - + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); - } + const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); + auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); Et(i, j, 0) -= eta_h * nabla2Jt; } From e55ffdf80118a66822458a613730b27f317f5efb Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 4 Feb 2025 15:19:23 -0800 Subject: [PATCH 49/86] Changing Ez calculation for hyper resistivity to allow for z complonent of vector laplacian on axis Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 0b0bef6d430..99fbd338025 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -710,10 +710,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // r on nodal point (Jz is nodal in r) Real const r = rmin + i*dr; - auto nabla2Jz = 0._rt; + auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); if (r > 0.5_rt*dr) { - nabla2Jz = T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) - + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); + nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); } Ez(i, j, 0) -= eta_h * nabla2Jz; From 8a09559df44d91e699f973feee90cda8856f2b49 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 23:42:45 +0000 Subject: [PATCH 50/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../CMakeLists.txt | 2 +- ...d_ohm_solver_cylinder_compression_picmi.py | 154 +++++++++--------- ...z_ohm_solver_cylinder_compression_picmi.py | 140 ++++++++-------- Python/pywarpx/picmi.py | 11 +- .../HybridPICSolveE.cpp | 2 +- 5 files changed, 153 insertions(+), 156 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt index 93ad79b3b05..d2ad31947dc 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -21,4 +21,4 @@ add_warpx_test( diags/diag1000100 # output OFF # dependency ) -label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) \ No newline at end of file +label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 19521ad9fd9..672ecf73290 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -14,13 +14,10 @@ import dill import numpy as np -from mpi4py import MPI as mpi -import pandas as pd -from scipy.interpolate import PchipInterpolator - import openpmd_api as io +from mpi4py import MPI as mpi -from pywarpx import callbacks, fields, libwarpx, picmi, amrex +from pywarpx import fields, picmi # amrex.throw_exception = 1 # amrex.signal_handling = 0 @@ -29,17 +26,21 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False, warpx_amrex_use_gpu_aware_mpi=True) +simulation = picmi.Simulation( + warpx_serialize_initial_conditions=True, + verbose=False, + warpx_amrex_use_gpu_aware_mpi=True, +) class PlasmaCylinderCompression(object): # B0 is chosen with all other quantities scaled by it n0 = 1e20 - T_i = 10 # eV + T_i = 10 # eV T_e = 0 - p0 = n0*constants.q_e*T_i + p0 = n0 * constants.q_e * T_i - B0 = np.sqrt(2*constants.mu0*p0) # Initial magnetic field strength (T) + B0 = np.sqrt(2 * constants.mu0 * p0) # Initial magnetic field strength (T) # Do a 2x uniform B-field compression dB = B0 @@ -52,8 +53,8 @@ class PlasmaCylinderCompression(object): delta_p = 0.025 # Domain parameters - LX = 2.*R_c*1.05 # m - LY = 2.*R_c*1.05 + LX = 2.0 * R_c * 1.05 # m + LY = 2.0 * R_c * 1.05 LZ = 0.5 # m LT = 20 # ion cyclotron periods @@ -71,7 +72,15 @@ class PlasmaCylinderCompression(object): substeps = 20 def Bz(self, r): - return np.sqrt(self.B0**2 - 2.*constants.mu0*self.n0*constants.q_e*self.T_i/(1. + np.exp((r - self.R_p)/self.delta_p))) + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) def __init__(self, test, verbose): self.test = test @@ -81,23 +90,22 @@ def __init__(self, test, verbose): self.Ly = self.LY self.Lz = self.LZ - self.DX = self.LX/self.NX - self.DY = self.LY/self.NY - self.DZ = self.LZ/self.NZ + self.DX = self.LX / self.NX + self.DY = self.LY / self.NY + self.DZ = self.LZ / self.NZ if comm.rank == 0: # Write uniform compression dataset to OpenPMD to exercise reading openPMD data # for the time varying external fields - xvec = np.linspace(-self.LX, self.LX, num=2*self.NX) - yvec = np.linspace(-self.LY,self.LY, num=2*self.NY) - zvec = np.linspace(-self.LZ, self.LZ, num=2*self.NZ) - XM, YM, ZM = np.meshgrid(xvec, yvec, zvec, indexing='ij') + xvec = np.linspace(-self.LX, self.LX, num=2 * self.NX) + yvec = np.linspace(-self.LY, self.LY, num=2 * self.NY) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + XM, YM, ZM = np.meshgrid(xvec, yvec, zvec, indexing="ij") RM = np.sqrt(XM**2 + YM**2) - - Ax_data = -0.5*YM*self.dB - Ay_data = 0.5*XM*self.dB + Ax_data = -0.5 * YM * self.dB + Ay_data = 0.5 * XM * self.dB Az_data = np.zeros_like(RM) # Write vector potential to file to exercise field loading via OPenPMD @@ -111,34 +119,27 @@ def __init__(self, test, verbose): A.grid_unit_SI = 1.0 A.axis_labels = ["x", "y", "z"] A.data_order = "C" - A.unit_dimension = {io.Unit_Dimension.M: 1.0, - io.Unit_Dimension.T: -2.0, - io.Unit_Dimension.I: -1.0, - io.Unit_Dimension.L: -1.0} + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } Ax = A["x"] Ay = A["y"] Az = A["z"] - Ax.position = [0., 0.] - Ay.position = [0., 0.] - Az.position = [0., 0.] - - Ax_dataset = io.Dataset( - Ax_data.dtype, - Ax_data.shape - ) - - Ay_dataset = io.Dataset( - Ay_data.dtype, - Ay_data.shape - ) - - Az_dataset = io.Dataset( - Az_data.dtype, - Az_data.shape - ) - + Ax.position = [0.0, 0.0] + Ay.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ax_dataset = io.Dataset(Ax_data.dtype, Ax_data.shape) + + Ay_dataset = io.Dataset(Ay_data.dtype, Ay_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + Ax.reset_dataset(Ax_dataset) Ay.reset_dataset(Ay_dataset) Az.reset_dataset(Az_dataset) @@ -165,7 +166,7 @@ def __init__(self, test, verbose): self.NZ = 128 else: self.total_steps = int(self.LT / self.DT) - self.diag_steps = 100 #self.total_steps // 200 + self.diag_steps = 100 # self.total_steps // 200 # dump all the current attributes to a dill pickle file if comm.rank == 0: @@ -179,8 +180,8 @@ def __init__(self, test, verbose): f"\tTi = {self.T_i:.1f} eV\n" f"\tn0 = {self.n0:.1e} m^-3\n" f"\tB0 = {self.B0:.2f} T\n", - f"\tDX/DY = {self.DX/self.l_i:.3f} c/w_pi\n" - f"\tDZ = {self.DZ/self.l_i:.3f} c/w_pi\n" + f"\tDX/DY = {self.DX / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", ) print( f"Plasma parameters:\n" @@ -191,7 +192,7 @@ def __init__(self, test, verbose): ) print( f"Numerical parameters:\n" - f"\tdz = {self.Lz/self.NZ:.1e} m\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" f"\tdt = {self.dt:.1e} s\n" f"\tdiag steps = {self.diag_steps:d}\n" f"\ttotal steps = {self.total_steps:d}\n" @@ -231,14 +232,16 @@ def load_fields(self): By = fields.ByFPExternalWrapper(include_ghosts=False) Bz = fields.BzFPExternalWrapper(include_ghosts=False) - Bx[:,:] = 0. - By[:,:] = 0. - - XM, YM, ZM = np.meshgrid(Bz.mesh('x'), Bz.mesh('y'), Bz.mesh('z'), indexing='ij') - + Bx[:, :] = 0.0 + By[:, :] = 0.0 + + XM, YM, ZM = np.meshgrid( + Bz.mesh("x"), Bz.mesh("y"), Bz.mesh("z"), indexing="ij" + ) + RM = np.sqrt(XM**2 + YM**2) - Bz[:,:] = self.Bz(RM) + Bz[:, :] = self.Bz(RM) comm.Barrier() def setup_run(self): @@ -251,8 +254,8 @@ def setup_run(self): # Create grid self.grid = picmi.Cartesian3DGrid( number_of_cells=[self.NX, self.NY, self.NZ], - lower_bound=[-0.5*self.Lx, -0.5*self.Ly, -0.5*self.Lz], - upper_bound=[ 0.5*self.Lx, 0.5*self.Ly, 0.5*self.Lz], + lower_bound=[-0.5 * self.Lx, -0.5 * self.Ly, -0.5 * self.Lz], + upper_bound=[0.5 * self.Lx, 0.5 * self.Ly, 0.5 * self.Lz], lower_boundary_conditions=["dirichlet", "dirichlet", "periodic"], upper_boundary_conditions=["dirichlet", "dirichlet", "periodic"], lower_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], @@ -271,7 +274,7 @@ def setup_run(self): ####################################################################### # External Field definition. Sigmoid starting around 2.5 us A_ext = { - 'uniform': { + "uniform": { "read_from_file": True, "path": "Afield.h5", "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", @@ -283,22 +286,21 @@ def setup_run(self): gamma=1.0, Te=self.T_e, n0=self.n0, - n_floor=0.05*self.n0, + n_floor=0.05 * self.n0, plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, - rho_floor=0.01*self.n0*constants.q_e, + rho_floor=0.01 * self.n0 * constants.q_e, eta_p=1e-8, - eta_v=1e-3 + eta_v=1e-3, ) simulation.solver = self.solver simulation.embedded_boundary = picmi.EmbeddedBoundary( - implicit_function="(x**2+y**2-R_w**2)", - R_w=self.R_c + implicit_function="(x**2+y**2-R_w**2)", R_w=self.R_c ) # Add field loader callback @@ -306,37 +308,33 @@ def setup_run(self): load_from_python=self.load_fields, warpx_do_divb_cleaning_external=True, load_B=True, - load_E=False + load_E=False, ) simulation.add_applied_field(B_ext) ####################################################################### # Particle types setup # ####################################################################### - r_omega = '(sqrt(x*x+y*y)*q_e*B0/m_p)' - dlnndr = '((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))' - vth = f'0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))' + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" - momentum_expr = [ - f'y*{vth}', - f'-x*{vth}', - '0' - ] + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] self.ions = picmi.Species( name="ions", charge="q_e", mass=self.M, initial_distribution=picmi.AnalyticDistribution( - density_expression=f"n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", momentum_expressions=momentum_expr, - warpx_momentum_spread_expressions=[f'{str(self.vi_th)}']*3, - warpx_density_min=0.01*self.n0, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, R_p=self.R_p, delta_p=self.delta_p, n0_p=self.n0, B0=self.B0, - T_i=self.T_i + T_i=self.T_i, ), ) simulation.add_species( @@ -366,8 +364,8 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='openpmd', - warpx_openpmd_backend='h5', + warpx_format="openpmd", + warpx_openpmd_backend="h5", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 3fe82c4d71a..461943842ad 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -3,7 +3,7 @@ # --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are # --- treated as kinetic particles and electrons as an isothermal, inertialess # --- background fluid. The script demonstrates the use of this model to -# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- simulate adiabatic compression of a plasma cylinder initialized from an # --- analytical Grad-Shafranov solution. import argparse @@ -13,13 +13,10 @@ import dill import numpy as np -from mpi4py import MPI as mpi -import pandas as pd -from scipy.interpolate import PchipInterpolator - import openpmd_api as io +from mpi4py import MPI as mpi -from pywarpx import callbacks, fields, libwarpx, picmi, amrex +from pywarpx import fields, picmi # amrex.throw_exception = 1 # amrex.signal_handling = 0 @@ -28,17 +25,21 @@ comm = mpi.COMM_WORLD -simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False, warpx_amrex_use_gpu_aware_mpi=True) +simulation = picmi.Simulation( + warpx_serialize_initial_conditions=True, + verbose=False, + warpx_amrex_use_gpu_aware_mpi=True, +) class PlasmaCylinderCompression(object): # B0 is chosen with all other quantities scaled by it n0 = 1e20 - T_i = 10 # eV + T_i = 10 # eV T_e = 0 - p0 = n0*constants.q_e*T_i + p0 = n0 * constants.q_e * T_i - B0 = np.sqrt(2*constants.mu0*p0) # External magnetic field strength (T) + B0 = np.sqrt(2 * constants.mu0 * p0) # External magnetic field strength (T) # Do a 2x uniform B-field compression dB = B0 @@ -52,7 +53,7 @@ class PlasmaCylinderCompression(object): # Domain parameters LR = R_c # m - LZ = 0.25*R_c # m + LZ = 0.25 * R_c # m LT = 20 # ion cyclotron periods DT = 1e-3 # ion cyclotron periods @@ -68,7 +69,15 @@ class PlasmaCylinderCompression(object): substeps = 25 def Bz(self, r): - return np.sqrt(self.B0**2 - 2.*constants.mu0*self.n0*constants.q_e*self.T_i/(1. + np.exp((r - self.R_p)/self.delta_p))) + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) def __init__(self, test, verbose): self.test = test @@ -77,15 +86,15 @@ def __init__(self, test, verbose): self.Lr = self.LR self.Lz = self.LZ - self.DR = self.LR/self.NR - self.DZ = self.LZ/self.NZ + self.DR = self.LR / self.NR + self.DZ = self.LZ / self.NZ # Write A to OpenPMD for a uniform B field to exercise file based loader if comm.rank == 0: mvec = np.array([0]) - rvec = np.linspace(0,2*self.LR, num=2*self.NR) - zvec = np.linspace(-self.LZ, self.LZ, num=2*self.NZ) - MM, RM, ZM = np.meshgrid(mvec, rvec, zvec, indexing='ij') + rvec = np.linspace(0, 2 * self.LR, num=2 * self.NR) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + MM, RM, ZM = np.meshgrid(mvec, rvec, zvec, indexing="ij") # Write uniform compression dataset to OpenPMD to exercise reading openPMD data # for the time varying external fields @@ -93,9 +102,9 @@ def __init__(self, test, verbose): Az_data = np.zeros_like(RM) # Zero padded outside of domain - At_data = 0.5*RM*self.dB + At_data = 0.5 * RM * self.dB - # Write vector potential to file to exercise field loading via + # Write vector potential to file to exercise field loading via series = io.Series("Afield.h5", io.Access.create) it = series.iterations[0] @@ -108,34 +117,27 @@ def __init__(self, test, verbose): A.grid_unit_SI = 1.0 A.axis_labels = ["r", "z"] A.data_order = "C" - A.unit_dimension = {io.Unit_Dimension.M: 1.0, - io.Unit_Dimension.T: -2.0, - io.Unit_Dimension.I: -1.0, - io.Unit_Dimension.L: -1.0} + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } Ar = A["r"] At = A["t"] Az = A["z"] - Ar.position = [0., 0.] - At.position = [0., 0.] - Az.position = [0., 0.] - - Ar_dataset = io.Dataset( - Ar_data.dtype, - Ar_data.shape - ) - - At_dataset = io.Dataset( - At_data.dtype, - At_data.shape - ) - - Az_dataset = io.Dataset( - Az_data.dtype, - Az_data.shape - ) - + Ar.position = [0.0, 0.0] + At.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ar_dataset = io.Dataset(Ar_data.dtype, Ar_data.shape) + + At_dataset = io.Dataset(At_data.dtype, At_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + Ar.reset_dataset(Ar_dataset) At.reset_dataset(At_dataset) Az.reset_dataset(Az_dataset) @@ -162,7 +164,7 @@ def __init__(self, test, verbose): self.NZ = 128 else: self.total_steps = int(self.LT / self.DT) - self.diag_steps = 100 #self.total_steps // 200 + self.diag_steps = 100 # self.total_steps // 200 # dump all the current attributes to a dill pickle file if comm.rank == 0: @@ -176,8 +178,8 @@ def __init__(self, test, verbose): f"\tTi = {self.T_i:.1f} eV\n" f"\tn0 = {self.n0:.1e} m^-3\n" f"\tB0 = {self.B0:.2f} T\n", - f"\tDR = {self.DR/self.l_i:.3f} c/w_pi\n" - f"\tDZ = {self.DZ/self.l_i:.3f} c/w_pi\n" + f"\tDR = {self.DR / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", ) print( f"Plasma parameters:\n" @@ -188,7 +190,7 @@ def __init__(self, test, verbose): ) print( f"Numerical parameters:\n" - f"\tdz = {self.Lz/self.NZ:.1e} m\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" f"\tdt = {self.dt:.1e} s\n" f"\tdiag steps = {self.diag_steps:d}\n" f"\ttotal steps = {self.total_steps:d}\n" @@ -228,12 +230,12 @@ def load_fields(self): Bt = fields.ByFPExternalWrapper(include_ghosts=False) Bz = fields.BzFPExternalWrapper(include_ghosts=False) - Br[:,:] = 0. - Bt[:,:] = 0. - - RM, ZM = np.meshgrid(Bz.mesh('r'), Bz.mesh('z'), indexing='ij') + Br[:, :] = 0.0 + Bt[:, :] = 0.0 + + RM, ZM = np.meshgrid(Bz.mesh("r"), Bz.mesh("z"), indexing="ij") - Bz[:,:] = self.Bz(RM)*(RM <= self.R_c) + Bz[:, :] = self.Bz(RM) * (RM <= self.R_c) comm.Barrier() def setup_run(self): @@ -246,7 +248,7 @@ def setup_run(self): # Create grid self.grid = picmi.CylindricalGrid( number_of_cells=[self.NR, self.NZ], - lower_bound=[0., -self.Lz / 2.0], + lower_bound=[0.0, -self.Lz / 2.0], upper_bound=[self.Lr, self.Lz / 2.0], lower_boundary_conditions=["none", "periodic"], upper_boundary_conditions=["dirichlet", "periodic"], @@ -266,7 +268,7 @@ def setup_run(self): ####################################################################### # External Field definition. Sigmoid starting around 2.5 us A_ext = { - 'uniform': { + "uniform": { "read_from_file": True, "path": "Afield.h5", "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", @@ -278,16 +280,16 @@ def setup_run(self): gamma=1.0, Te=self.T_e, n0=self.n0, - n_floor=0.05*self.n0, + n_floor=0.05 * self.n0, plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, - rho_floor=0.01*self.n0*constants.q_e, + rho_floor=0.01 * self.n0 * constants.q_e, eta_p=1e-8, - eta_v=1e-3 + eta_v=1e-3, ) simulation.solver = self.solver @@ -296,37 +298,33 @@ def setup_run(self): load_from_python=self.load_fields, warpx_do_divb_cleaning_external=True, load_B=True, - load_E=False + load_E=False, ) simulation.add_applied_field(B_ext) ####################################################################### # Particle types setup # ####################################################################### - r_omega = '(sqrt(x*x+y*y)*q_e*B0/m_p)' - dlnndr = '((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))' - vth = f'0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))' + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" - momentum_expr = [ - f'y*{vth}', - f'-x*{vth}', - '0' - ] + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] self.ions = picmi.Species( name="ions", charge="q_e", mass=self.M, initial_distribution=picmi.AnalyticDistribution( - density_expression=f"n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", momentum_expressions=momentum_expr, - warpx_momentum_spread_expressions=[f'{str(self.vi_th)}']*3, - warpx_density_min=0.01*self.n0, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, R_p=self.R_p, delta_p=self.delta_p, n0_p=self.n0, B0=self.B0, - T_i=self.T_i + T_i=self.T_i, ), ) simulation.add_species( @@ -356,8 +354,8 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='openpmd', - warpx_openpmd_backend='h5', + warpx_format="openpmd", + warpx_openpmd_backend="h5", ) simulation.add_diagnostic(field_diag) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c40dad41ba7..93c13e5c44a 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1977,14 +1977,15 @@ def solver_initialize_inputs(self): ), ) for field_name, field_dict in self.A_external.items(): - if "read_from_file" in field_dict.keys() and field_dict["read_from_file"]: + if ( + "read_from_file" in field_dict.keys() + and field_dict["read_from_file"] + ): pywarpx.external_vector_potential.__setattr__( - f"{field_name}.read_from_file", - field_dict["read_from_file"] + f"{field_name}.read_from_file", field_dict["read_from_file"] ) pywarpx.external_vector_potential.__setattr__( - f"{field_name}.path", - field_dict["path"] + f"{field_name}.path", field_dict["path"] ) else: pywarpx.external_vector_potential.__setattr__( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 3d40ef7fb68..4dd14b60539 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -763,7 +763,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (r > 0.5_rt*dr) { nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); } - + Ez(i, j, 0) -= eta_h * nabla2Jz; } From b41e7174e4dc7b43e7a5c50adc853e4df0a171d4 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:17:18 -0800 Subject: [PATCH 51/86] Adding benchmarks and cleaning up CI test a bit. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../CMakeLists.txt | 10 +-- .../analysis_default_regression.py | 1 + ...d_ohm_solver_cylinder_compression_picmi.py | 67 +++++-------------- ...z_ohm_solver_cylinder_compression_picmi.py | 64 +++++------------- ...ohm_solver_cylinder_compression_picmi.json | 28 ++++++++ ...ohm_solver_cylinder_compression_picmi.json | 28 ++++++++ 6 files changed, 95 insertions(+), 103 deletions(-) create mode 120000 Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py create mode 100644 Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt index 93ad79b3b05..0aa6da5af97 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs - analysis_3d.py # analysis - diags/diag1000100 # output + analysis_default_regression.py # analysis + diags/diag1000020 # output OFF # dependency ) label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) @@ -16,9 +16,9 @@ add_warpx_test( test_rz_ohm_solver_cylinder_compression_picmi # name RZ # dims 2 # nprocs - "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs - analysis_rz.py # analysis - diags/diag1000100 # output + "inputs_test_rz_ohm_solver_cylinder_compression_picmi.py --test" # inputs + analysis_default_regression.py # analysis + diags/diag1000020 # output OFF # dependency ) label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 19521ad9fd9..a86f26866c2 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -12,24 +12,18 @@ import sys from pathlib import Path -import dill import numpy as np from mpi4py import MPI as mpi -import pandas as pd -from scipy.interpolate import PchipInterpolator import openpmd_api as io -from pywarpx import callbacks, fields, libwarpx, picmi, amrex - -# amrex.throw_exception = 1 -# amrex.signal_handling = 0 +from pywarpx import fields, picmi constants = picmi.constants comm = mpi.COMM_WORLD -simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False, warpx_amrex_use_gpu_aware_mpi=True) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) class PlasmaCylinderCompression(object): @@ -161,16 +155,12 @@ def __init__(self, test, verbose): if self.test: self.total_steps = 20 self.diag_steps = self.total_steps // 5 - self.NR = 64 - self.NZ = 128 + self.NX = 128 + self.NY = 128 + self.NZ = 64 else: self.total_steps = int(self.LT / self.DT) - self.diag_steps = 100 #self.total_steps // 200 - - # dump all the current attributes to a dill pickle file - if comm.rank == 0: - with open("sim_parameters.dpkl", "wb") as f: - dill.dump(self, f) + self.diag_steps = 100 # print out plasma parameters if comm.rank == 0: @@ -350,24 +340,21 @@ def setup_run(self): # Add diagnostics # ####################################################################### - # callbacks.installafterEsolve(self.check_fields) - - # particle_diag = picmi.ParticleDiagnostic( - # name="particles", - # period=self.diag_steps, - # species=[self.ions], - # data_list=["ux", "uy", "uz", "x", "z", "weighting"], - # warpx_format='openpmd', - # warpx_openpmd_backend='h5', - # ) - # simulation.add_diagnostic(particle_diag) + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + warpx_format='plotfile', + ) + simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( - name="fields", + name="diag1", grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='openpmd', - warpx_openpmd_backend='h5', + warpx_format='plotfile', ) simulation.add_diagnostic(field_diag) @@ -384,26 +371,6 @@ def setup_run(self): simulation.initialize_inputs() simulation.initialize_warpx() - # def check_fields(self): - # step = simulation.extension.warpx.getistep(lev=0) - 1 - - # if not (step == 1 or step % self.diag_steps == 0): - # return - - # rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] - # Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 - # Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 - # Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 - # By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 - # Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 - - # if libwarpx.amr.ParallelDescriptor.MyProc() != 0: - # return - - # # save the fields to file - # with open(f"diags/fields/fields_{step:06d}.npz", "wb") as f: - # np.savez(f, rho=rho, Jiy=Jiy, Jy=Jy, Bx=Bx, By=By, Bz=Bz) - ########################## # parse input parameters diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 3fe82c4d71a..2c075a3569f 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -19,16 +19,13 @@ import openpmd_api as io -from pywarpx import callbacks, fields, libwarpx, picmi, amrex - -# amrex.throw_exception = 1 -# amrex.signal_handling = 0 +from pywarpx import fields, picmi constants = picmi.constants comm = mpi.COMM_WORLD -simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False, warpx_amrex_use_gpu_aware_mpi=True) +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) class PlasmaCylinderCompression(object): @@ -62,7 +59,7 @@ class PlasmaCylinderCompression(object): NZ = 32 # Starting number of particles per cell - NPPC = 500 + NPPC = 100 # Number of substeps used to update B substeps = 25 @@ -159,15 +156,10 @@ def __init__(self, test, verbose): self.total_steps = 20 self.diag_steps = self.total_steps // 5 self.NR = 64 - self.NZ = 128 + self.NZ = 16 else: self.total_steps = int(self.LT / self.DT) - self.diag_steps = 100 #self.total_steps // 200 - - # dump all the current attributes to a dill pickle file - if comm.rank == 0: - with open("sim_parameters.dpkl", "wb") as f: - dill.dump(self, f) + self.diag_steps = 100 # print out plasma parameters if comm.rank == 0: @@ -340,24 +332,21 @@ def setup_run(self): # Add diagnostics # ####################################################################### - # callbacks.installafterEsolve(self.check_fields) - - # particle_diag = picmi.ParticleDiagnostic( - # name="particles", - # period=self.diag_steps, - # species=[self.ions], - # data_list=["ux", "uy", "uz", "x", "z", "weighting"], - # warpx_format='openpmd', - # warpx_openpmd_backend='h5', - # ) - # simulation.add_diagnostic(particle_diag) + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + warpx_format='plotfile', + ) + simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( - name="fields", + name="diag1", grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='openpmd', - warpx_openpmd_backend='h5', + warpx_format='plotfile', ) simulation.add_diagnostic(field_diag) @@ -374,27 +363,6 @@ def setup_run(self): simulation.initialize_inputs() simulation.initialize_warpx() - # def check_fields(self): - # step = simulation.extension.warpx.getistep(lev=0) - 1 - - # if not (step == 1 or step % self.diag_steps == 0): - # return - - # rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] - # Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 - # Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 - # Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 - # By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 - # Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 - - # if libwarpx.amr.ParallelDescriptor.MyProc() != 0: - # return - - # # save the fields to file - # with open(f"diags/fields/fields_{step:06d}.npz", "wb") as f: - # np.savez(f, rho=rho, Jiy=Jiy, Jy=Jy, Bx=Bx, By=By, Bz=Bz) - - ########################## # parse input parameters ########################## diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..6d7c340872e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,28 @@ +{ + "ions": { + "particle_momentum_x": 3.128227749711046e-18, + "particle_momentum_y": 3.1249880463984e-18, + "particle_momentum_z": 3.016641338454627e-18, + "particle_position_x": 13625.821554080689, + "particle_position_y": 2285.883651970563, + "particle_theta": 114866.80388469782, + "particle_weight": 2.5250688831450644e+18 + }, + "lev=0": { + "Br": 0.008896568565884552, + "Bt": 0.011688602810050318, + "Bz": 11.684975894837464, + "Er": 124145.17803384457, + "Et": 3329.8840869367214, + "Ez": 129.6252379274751, + "T_ions": 7914.7337616377135, + "divB": 2.584189295657386e-12, + "jr": 7751878.488355549, + "jr_displacement": 7706240.764620148, + "jt": 21429184.190657146, + "jt_displacement": 51771964.95048943, + "jz": 5806260.486605658, + "jz_displacement": 5965142.135938229, + "rho": 7968.093059919787 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..3e3cf2a6087 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,28 @@ +{ + "ions": { + "particle_momentum_x": 3.1282277497110458e-18, + "particle_momentum_y": 3.1249880463984007e-18, + "particle_momentum_z": 3.0166413384546275e-18, + "particle_position_x": 13625.821554080689, + "particle_position_y": 2285.8836519705624, + "particle_theta": 114866.80388469782, + "particle_weight": 2.5250688831450644e+18 + }, + "lev=0": { + "Br": 0.008896568565884543, + "Bt": 0.011688602810050335, + "Bz": 11.684975894837464, + "Er": 124145.17803384454, + "Et": 3329.8840869367186, + "Ez": 129.62523792747487, + "T_ions": 7914.7337616377135, + "divB": 2.586842458896982e-12, + "jr": 7751878.488355547, + "jr_displacement": 7706240.764620148, + "jt": 21429184.19065714, + "jt_displacement": 51771964.95048943, + "jz": 5806260.486605657, + "jz_displacement": 5965142.135938228, + "rho": 7968.093059919787 + } +} \ No newline at end of file From d151e962553c92e72a9bedb291367e6a0e8d1e84 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 00:19:55 +0000 Subject: [PATCH 52/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index c76e85d4e50..7dc7041277a 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -13,8 +13,6 @@ from pathlib import Path import numpy as np -from mpi4py import MPI as mpi - import openpmd_api as io from mpi4py import MPI as mpi @@ -344,7 +342,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - warpx_format='plotfile', + warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( @@ -352,7 +350,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='plotfile', + warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) From ea55f193740e3326c2881085ce602d6cb80339e7 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:25:51 -0800 Subject: [PATCH 53/86] More CI changes. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...z_ohm_solver_cylinder_compression_picmi.py | 28 ------------------- 1 file changed, 28 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index a4c1eab8925..5f6bb48f22d 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -17,26 +17,12 @@ from mpi4py import MPI as mpi from pywarpx import fields, picmi -<<<<<<< HEAD -======= - -# amrex.throw_exception = 1 -# amrex.signal_handling = 0 ->>>>>>> 8a09559df44d91e699f973feee90cda8856f2b49 constants = picmi.constants comm = mpi.COMM_WORLD -<<<<<<< HEAD simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) -======= -simulation = picmi.Simulation( - warpx_serialize_initial_conditions=True, - verbose=False, - warpx_amrex_use_gpu_aware_mpi=True, -) ->>>>>>> 8a09559df44d91e699f973feee90cda8856f2b49 class PlasmaCylinderCompression(object): @@ -171,16 +157,7 @@ def __init__(self, test, verbose): self.NZ = 16 else: self.total_steps = int(self.LT / self.DT) -<<<<<<< HEAD self.diag_steps = 100 -======= - self.diag_steps = 100 # self.total_steps // 200 - - # dump all the current attributes to a dill pickle file - if comm.rank == 0: - with open("sim_parameters.dpkl", "wb") as f: - dill.dump(self, f) ->>>>>>> 8a09559df44d91e699f973feee90cda8856f2b49 # print out plasma parameters if comm.rank == 0: @@ -363,12 +340,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], -<<<<<<< HEAD warpx_format='plotfile', -======= - warpx_format="openpmd", - warpx_openpmd_backend="h5", ->>>>>>> 8a09559df44d91e699f973feee90cda8856f2b49 ) simulation.add_diagnostic(field_diag) From 404cdc883f11c11967c90db32f6d26a6029fdeb8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 00:26:41 +0000 Subject: [PATCH 54/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../inputs_test_rz_ohm_solver_cylinder_compression_picmi.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 5f6bb48f22d..32299cef680 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -11,7 +11,6 @@ import sys from pathlib import Path -import dill import numpy as np import openpmd_api as io from mpi4py import MPI as mpi @@ -332,7 +331,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - warpx_format='plotfile', + warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( @@ -340,7 +339,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - warpx_format='plotfile', + warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) @@ -357,6 +356,7 @@ def setup_run(self): simulation.initialize_inputs() simulation.initialize_warpx() + ########################## # parse input parameters ########################## From aff8938eed7b950dd59dfe82b86d995248dd318e Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:27:21 -0800 Subject: [PATCH 55/86] Changing description in CI test input file for 3d case. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 7dc7041277a..2102ba66300 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -3,9 +3,8 @@ # --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are # --- treated as kinetic particles and electrons as an isothermal, inertialess # --- background fluid. The script demonstrates the use of this model to -# --- simulate magnetic reconnection in a force-free sheet. The setup is based -# --- on the problem described in Le et al. (2016) -# --- https://aip.scitation.org/doi/10.1063/1.4943893. +# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- analytical Grad-Shafranov solution. import argparse import shutil From ef83768a2723b5f839fac88b495bc7052d10a263 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:54:49 -0800 Subject: [PATCH 56/86] Adding usage documentation to Parameters.rst Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index aaba7130b87..242b781111b 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2535,6 +2535,24 @@ Maxwell solver: kinetic-fluid hybrid * ``hybrid_pic_model.substeps`` (`int`) optional (default ``10``) If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the number of sub-steps to take during the B-field update. +* ``hybid_pic_model.add_external_fields`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the hybrid solver to use split external fields defined in external_vector_potential inputs. + +* ``external_vector_potential.fields`` (list of `str`) optional (default ``empty``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this adds a list names for external time varying vector potentials to be added to hybrid solver. + +* ``external_vector_potential..read_from_file`` (`bool`) optional (default ``false``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate teh time varying field. + +* ``external_vector_potential..path`` (`str`) optional (default ``""``) + If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally. + +* ``external_vector_potential..A[x,y,z]_external_grid_function(x,y,z)`` (`str`) optional (default ``"0"``) + If ``external_vector_potential..read_from_file`` is set to ``false``, Sets the external vector potential to be populated by an implicit function (on the grid). + +* ``external_vector_potential..A_time_external_grid_function(t)`` (`str`) optional (default ``"1"``) + This sets the relative strngth of the external vector potential by a an implicit time function, which can compute the external B fields and E fields based on the time derivative of the function. + .. note:: Based on results from :cite:t:`param-Stanier2020` it is recommended to use From 0aa4144c9a718fdc2a2c83b9dc193cacaef2483c Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 6 Feb 2025 14:46:59 -0800 Subject: [PATCH 57/86] Adding usage documentation, holmstrom vacuum flag, and updated CI tests and benchmarks. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Docs/source/refs.bib | 10 ++++ Docs/source/usage/parameters.rst | 9 ++-- ...d_ohm_solver_cylinder_compression_picmi.py | 15 +++--- ...z_ohm_solver_cylinder_compression_picmi.py | 5 +- Python/pywarpx/picmi.py | 8 +++ ...ohm_solver_cylinder_compression_picmi.json | 44 ++++++++-------- ...ohm_solver_cylinder_compression_picmi.json | 44 ++++++++-------- .../HybridPICModel/HybridPICModel.H | 2 + .../HybridPICModel/HybridPICModel.cpp | 2 + .../HybridPICSolveE.cpp | 52 ++++++++++++------- 10 files changed, 117 insertions(+), 74 deletions(-) diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index d6c81c34404..9d3bbd709d7 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -507,3 +507,13 @@ @article{Rhee1987 url = {https://doi.org/10.1063/1.1139314}, eprint = {https://pubs.aip.org/aip/rsi/article-pdf/58/2/240/19154912/240\_1\_online.pdf}, } + +@misc{holmstrom2013handlingvacuumregionshybrid, + title={Handling vacuum regions in a hybrid plasma solver}, + author={M. Holmstrom}, + year={2013}, + eprint={1301.0272}, + archivePrefix={arXiv}, + primaryClass={physics.space-ph}, + url={https://arxiv.org/abs/1301.0272}, +} \ No newline at end of file diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 242b781111b..2bff856b4b7 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2535,6 +2535,9 @@ Maxwell solver: kinetic-fluid hybrid * ``hybrid_pic_model.substeps`` (`int`) optional (default ``10``) If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the number of sub-steps to take during the B-field update. +* ``hybrid_pic_model.holmstrom_vacuum_region`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the vacuum region handling of the generalized Ohm's Law to suppress vacuum fluctuations. :cite:t:`param-holmstrom2013handlingvacuumregionshybrid`. + * ``hybid_pic_model.add_external_fields`` (`bool`) optional (default ``false``) If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the hybrid solver to use split external fields defined in external_vector_potential inputs. @@ -2545,13 +2548,13 @@ Maxwell solver: kinetic-fluid hybrid If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate teh time varying field. * ``external_vector_potential..path`` (`str`) optional (default ``""``) - If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally. + If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally in :math:`weber/m`. * ``external_vector_potential..A[x,y,z]_external_grid_function(x,y,z)`` (`str`) optional (default ``"0"``) - If ``external_vector_potential..read_from_file`` is set to ``false``, Sets the external vector potential to be populated by an implicit function (on the grid). + If ``external_vector_potential..read_from_file`` is set to ``false``, Sets the external vector potential to be populated by an implicit function (on the grid) in :math:`weber/m`. * ``external_vector_potential..A_time_external_grid_function(t)`` (`str`) optional (default ``"1"``) - This sets the relative strngth of the external vector potential by a an implicit time function, which can compute the external B fields and E fields based on the time derivative of the function. + This sets the relative strength of the external vector potential by a dimensionless implicit time function, which can compute the external B fields and E fields based on the value and first time derivative of the function. .. note:: diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 2102ba66300..b8273f0e8cf 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -48,13 +48,13 @@ class PlasmaCylinderCompression(object): LY = 2.0 * R_c * 1.05 LZ = 0.5 # m - LT = 20 # ion cyclotron periods + LT = 10 # ion cyclotron periods DT = 1e-3 # ion cyclotron periods # Resolution parameters - NX = 256 - NY = 256 - NZ = 128 + NX = 128 + NY = 128 + NZ = 64 # Starting number of particles per cell NPPC = 100 @@ -153,9 +153,9 @@ def __init__(self, test, verbose): if self.test: self.total_steps = 20 self.diag_steps = self.total_steps // 5 - self.NX = 128 - self.NY = 128 - self.NZ = 64 + self.NX = 64 + self.NY = 64 + self.NZ = 32 else: self.total_steps = int(self.LT / self.DT) self.diag_steps = 100 @@ -277,6 +277,7 @@ def setup_run(self): plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, + holmstrom_vacuum_region=True, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 32299cef680..e4afa886faf 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -39,7 +39,7 @@ class PlasmaCylinderCompression(object): # Flux Conserver radius R_c = 0.5 - # Plasma Radius (These values match GS solution in gs_psi.csv) + # Plasma Radius (These values control the analytical GS solution) R_p = 0.25 delta_p = 0.025 @@ -47,7 +47,7 @@ class PlasmaCylinderCompression(object): LR = R_c # m LZ = 0.25 * R_c # m - LT = 20 # ion cyclotron periods + LT = 10 # ion cyclotron periods DT = 1e-3 # ion cyclotron periods # Resolution parameters @@ -271,6 +271,7 @@ def setup_run(self): plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, + holmstrom_vacuum_region=True, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 93c13e5c44a..11bdd553a1c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1853,6 +1853,10 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): substeps: int, default=100 Number of substeps to take when updating the B-field. + holmstrom_vacuum_region: bool, default=False + Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms. + This flag is useful for suppressing vacuum region fluctuations. A large resistivity value must be used when rho <= rho_floor. + Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. @@ -1892,6 +1896,7 @@ def __init__( plasma_resistivity=None, plasma_hyper_resistivity=None, substeps=None, + holmstrom_vacuum_region=None, Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, @@ -1910,6 +1915,8 @@ def __init__( self.substeps = substeps + self.holmstrom_vacuum_region = holmstrom_vacuum_region + self.Jx_external_function = Jx_external_function self.Jy_external_function = Jy_external_function self.Jz_external_function = Jz_external_function @@ -1950,6 +1957,7 @@ def solver_initialize_inputs(self): ) pywarpx.hybridpicmodel.plasma_hyper_resistivity = self.plasma_hyper_resistivity pywarpx.hybridpicmodel.substeps = self.substeps + pywarpx.hybridpicmodel.holmstrom_vacuum_region = self.holmstrom_vacuum_region pywarpx.hybridpicmodel.__setattr__( "Jx_external_grid_function(x,y,z,t)", pywarpx.my_constants.mangle_expression( diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json index 6d7c340872e..6998ab0da0e 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,28 @@ { "ions": { - "particle_momentum_x": 3.128227749711046e-18, - "particle_momentum_y": 3.1249880463984e-18, - "particle_momentum_z": 3.016641338454627e-18, - "particle_position_x": 13625.821554080689, - "particle_position_y": 2285.883651970563, - "particle_theta": 114866.80388469782, - "particle_weight": 2.5250688831450644e+18 + "particle_momentum_x": 2.161895374235295e-16, + "particle_momentum_y": 2.161411203560475e-16, + "particle_momentum_z": 2.0520677781299486e-16, + "particle_position_x": 770127.8429835918, + "particle_position_y": 770025.4680569558, + "particle_position_z": 620825.7144136425, + "particle_weight": 1.0082612097115662e+19 }, "lev=0": { - "Br": 0.008896568565884552, - "Bt": 0.011688602810050318, - "Bz": 11.684975894837464, - "Er": 124145.17803384457, - "Et": 3329.8840869367214, - "Ez": 129.6252379274751, - "T_ions": 7914.7337616377135, - "divB": 2.584189295657386e-12, - "jr": 7751878.488355549, - "jr_displacement": 7706240.764620148, - "jt": 21429184.190657146, - "jt_displacement": 51771964.95048943, - "jz": 5806260.486605658, - "jz_displacement": 5965142.135938229, - "rho": 7968.093059919787 + "Bx": 0.6173939208933583, + "By": 0.6156654475976036, + "Bz": 2252.109000563476, + "Ex": 7803095.042882249, + "Ey": 7792996.137344913, + "Ez": 10116.81554132315, + "T_ions": 516200.00033491524, + "divB": 1.5610639489247467e-10, + "jx": 1067436275.7870781, + "jx_displacement": 2716391036.0076694, + "jy": 1086816146.375398, + "jy_displacement": 2736702988.4469233, + "jz": 156137768.19274762, + "jz_displacement": 170222197.6393573, + "rho": 384100.415250341 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json index 3e3cf2a6087..42a177f537c 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,28 @@ { "ions": { - "particle_momentum_x": 3.1282277497110458e-18, - "particle_momentum_y": 3.1249880463984007e-18, - "particle_momentum_z": 3.0166413384546275e-18, - "particle_position_x": 13625.821554080689, - "particle_position_y": 2285.8836519705624, - "particle_theta": 114866.80388469782, - "particle_weight": 2.5250688831450644e+18 + "particle_momentum_x": 3.1279139840698964e-18, + "particle_momentum_y": 3.1249019874762554e-18, + "particle_momentum_z": 3.0166365667623666e-18, + "particle_position_x": 13625.881373895038, + "particle_position_y": 2285.883598780935, + "particle_theta": 114866.80236740559, + "particle_weight": 2.525068883145065e+18 }, "lev=0": { - "Br": 0.008896568565884543, - "Bt": 0.011688602810050335, - "Bz": 11.684975894837464, - "Er": 124145.17803384454, - "Et": 3329.8840869367186, - "Ez": 129.62523792747487, - "T_ions": 7914.7337616377135, - "divB": 2.586842458896982e-12, - "jr": 7751878.488355547, - "jr_displacement": 7706240.764620148, - "jt": 21429184.19065714, - "jt_displacement": 51771964.95048943, - "jz": 5806260.486605657, - "jz_displacement": 5965142.135938228, - "rho": 7968.093059919787 + "Br": 0.01217904585912652, + "Bt": 0.025667145483503906, + "Bz": 11.684988558642056, + "Er": 117654.48249411368, + "Et": 3705.6428969840963, + "Ez": 182.5019568964504, + "T_ions": 7914.730922747134, + "divB": 2.742922090926581e-12, + "jr": 7767403.865920181, + "jr_displacement": 8710530.146246921, + "jt": 21427740.289813034, + "jt_displacement": 51770521.049816445, + "jz": 5806273.707548495, + "jz_displacement": 6914160.6345753735, + "rho": 7968.091650164713 } } \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 086cd641ac8..8cbb8f1d16e 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -175,6 +175,8 @@ public: /** Number of substeps to take when evolving B */ int m_substeps = 10; + bool m_holmstrom_vacuum_region = false; + /** Electron temperature in eV */ amrex::Real m_elec_temp; /** Reference electron density */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 2bc3000d5f9..50bb432f736 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -33,6 +33,8 @@ void HybridPICModel::ReadParameters () // of sub steps can be specified by the user (defaults to 50). utils::parser::queryWithParser(pp_hybrid, "substeps", m_substeps); + utils::parser::queryWithParser(pp_hybrid, "holmstrom_vacuum_region", m_holmstrom_vacuum_region); + // The hybrid model requires an electron temperature, reference density // and exponent to be given. These values will be used to calculate the // electron pressure according to p = n0 * Te * (n/n0)^gamma diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 4dd14b60539..50d21806508 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -435,6 +435,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_external_fields = hybrid_model->m_add_external_fields; + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + auto & warpx = WarpX::GetInstance(); ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 @@ -635,10 +637,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); - if (rho_val >= rho_val_limited) { - Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Er(i, j, 0) = 0._rt; } else { - Ez(i, j, 0) = 0._rt; + Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B @@ -653,7 +655,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Er(i, j, 0) -= eta_h * nabla2Jr; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Er(i, j, 0) -= Er_ext(i, j, 0); } }, @@ -695,10 +697,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); - if (rho_val >= rho_val_limited) { - Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; - } else { + if (rho_val < rho_floor && holmstrom_vacuum_region) { Et(i, j, 0) = 0._rt; + } else { + Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B @@ -711,7 +713,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Et(i, j, 0) -= eta_h * nabla2Jt; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Et(i, j, 0) -= Et_ext(i, j, 0); } }, @@ -746,10 +748,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); - if (rho_val >= rho_val_limited) { - Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; - } else { + if (rho_val < rho_floor && holmstrom_vacuum_region) { Ez(i, j, 0) = 0._rt; + } else { + Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B @@ -767,7 +769,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) -= eta_h * nabla2Jz; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Ez(i, j, 0) -= Ez_ext(i, j, 0); } } @@ -811,6 +813,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_external_fields = hybrid_model->m_add_external_fields; + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + auto & warpx = WarpX::GetInstance(); ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 @@ -1009,7 +1013,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); - Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ex(i, j, k) = 0._rt; + } else { + Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } @@ -1021,7 +1029,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ex(i, j, k) -= eta_h * nabla2Jx; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Ex(i, j, k) -= Ex_ext(i, j, k); } }, @@ -1056,7 +1064,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); - Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ey(i, j, k) = 0._rt; + } else { + Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } @@ -1068,7 +1080,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ey(i, j, k) -= eta_h * nabla2Jy; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Ey(i, j, k) -= Ey_ext(i, j, k); } }, @@ -1103,7 +1115,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); - Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; + if (rho_val < rho_val_limited && holmstrom_vacuum_region) { + Ez(i, j, k) = 0._rt; + } else { + Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } @@ -1115,7 +1131,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez(i, j, k) -= eta_h * nabla2Jz; } - if (include_external_fields && rho_val >= rho_floor) { + if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { Ez(i, j, k) -= Ez_ext(i, j, k); } } From 8dd2cfb06100648d9478d1532d2649fe4f5c609d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 22:50:43 +0000 Subject: [PATCH 58/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Docs/source/refs.bib | 6 +++--- Python/pywarpx/picmi.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 9d3bbd709d7..a9def19c995 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -509,11 +509,11 @@ @article{Rhee1987 } @misc{holmstrom2013handlingvacuumregionshybrid, - title={Handling vacuum regions in a hybrid plasma solver}, + title={Handling vacuum regions in a hybrid plasma solver}, author={M. Holmstrom}, year={2013}, eprint={1301.0272}, archivePrefix={arXiv}, primaryClass={physics.space-ph}, - url={https://arxiv.org/abs/1301.0272}, -} \ No newline at end of file + url={https://arxiv.org/abs/1301.0272}, +} diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 11bdd553a1c..4c645a4ba75 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1856,7 +1856,7 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): holmstrom_vacuum_region: bool, default=False Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms. This flag is useful for suppressing vacuum region fluctuations. A large resistivity value must be used when rho <= rho_floor. - + Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. From 3ae78aab0c1ffd20ef924716900bc66df1f432e1 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:00:43 -0800 Subject: [PATCH 59/86] Adjusting CI tests and application of external fields in vacuum region. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...d_ohm_solver_cylinder_compression_picmi.py | 8 ++-- ...z_ohm_solver_cylinder_compression_picmi.py | 4 +- ...ohm_solver_cylinder_compression_picmi.json | 44 +++++++++---------- ...ohm_solver_cylinder_compression_picmi.json | 42 +++++++++--------- .../HybridPICSolveE.cpp | 14 +++--- 5 files changed, 56 insertions(+), 56 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index b8273f0e8cf..59a6f338221 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -52,9 +52,9 @@ class PlasmaCylinderCompression(object): DT = 1e-3 # ion cyclotron periods # Resolution parameters - NX = 128 - NY = 128 - NZ = 64 + NX = 256 + NY = 256 + NZ = 128 # Starting number of particles per cell NPPC = 100 @@ -281,7 +281,7 @@ def setup_run(self): A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, - rho_floor=0.01 * self.n0 * constants.q_e, + rho_floor=0.05 * self.n0 * constants.q_e, eta_p=1e-8, eta_v=1e-3, ) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index e4afa886faf..fe3682d1bff 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -58,7 +58,7 @@ class PlasmaCylinderCompression(object): NPPC = 100 # Number of substeps used to update B - substeps = 25 + substeps = 20 def Bz(self, r): return np.sqrt( @@ -275,7 +275,7 @@ def setup_run(self): A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, - rho_floor=0.01 * self.n0 * constants.q_e, + rho_floor=0.05 * self.n0 * constants.q_e, eta_p=1e-8, eta_v=1e-3, ) diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json index 6998ab0da0e..86a13ed5b1c 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,28 @@ { "ions": { - "particle_momentum_x": 2.161895374235295e-16, - "particle_momentum_y": 2.161411203560475e-16, - "particle_momentum_z": 2.0520677781299486e-16, - "particle_position_x": 770127.8429835918, - "particle_position_y": 770025.4680569558, - "particle_position_z": 620825.7144136425, - "particle_weight": 1.0082612097115662e+19 + "particle_momentum_x": 2.1618821814775787e-16, + "particle_momentum_y": 2.1613949855651297e-16, + "particle_momentum_z": 2.052067792064273e-16, + "particle_position_x": 770127.6962188012, + "particle_position_y": 770025.3184340652, + "particle_position_z": 620825.7144209276, + "particle_weight": 1.0082612097115668e+19 }, "lev=0": { - "Bx": 0.6173939208933583, - "By": 0.6156654475976036, - "Bz": 2252.109000563476, - "Ex": 7803095.042882249, - "Ey": 7792996.137344913, - "Ez": 10116.81554132315, - "T_ions": 516200.00033491524, - "divB": 1.5610639489247467e-10, - "jx": 1067436275.7870781, - "jx_displacement": 2716391036.0076694, - "jy": 1086816146.375398, - "jy_displacement": 2736702988.4469233, - "jz": 156137768.19274762, - "jz_displacement": 170222197.6393573, - "rho": 384100.415250341 + "Bx": 0.5919247781229885, + "By": 0.5904710728423274, + "Bz": 2252.108905639935, + "Ex": 7790985.762089458, + "Ey": 7781061.639658057, + "Ez": 9733.133365231726, + "T_ions": 516199.4024777795, + "divB": 1.5822053522980272e-10, + "jx": 1067418047.9995593, + "jx_displacement": 2703884354.4611087, + "jy": 1086794990.5108862, + "jy_displacement": 2724082729.727557, + "jz": 156137757.1849215, + "jz_displacement": 168627730.5656837, + "rho": 384100.41525034094 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json index 42a177f537c..f63ec396d31 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,28 @@ { "ions": { - "particle_momentum_x": 3.1279139840698964e-18, - "particle_momentum_y": 3.1249019874762554e-18, - "particle_momentum_z": 3.0166365667623666e-18, - "particle_position_x": 13625.881373895038, - "particle_position_y": 2285.883598780935, - "particle_theta": 114866.80236740559, + "particle_momentum_x": 3.127993944979249e-18, + "particle_momentum_y": 3.1249255520900954e-18, + "particle_momentum_z": 3.016637661609894e-18, + "particle_position_x": 13625.867179315941, + "particle_position_y": 2285.883613515636, + "particle_theta": 114866.80273240982, "particle_weight": 2.525068883145065e+18 }, "lev=0": { - "Br": 0.01217904585912652, - "Bt": 0.025667145483503906, - "Bz": 11.684988558642056, - "Er": 117654.48249411368, - "Et": 3705.6428969840963, - "Ez": 182.5019568964504, - "T_ions": 7914.730922747134, - "divB": 2.742922090926581e-12, - "jr": 7767403.865920181, - "jr_displacement": 8710530.146246921, - "jt": 21427740.289813034, - "jt_displacement": 51770521.049816445, - "jz": 5806273.707548495, - "jz_displacement": 6914160.6345753735, - "rho": 7968.091650164713 + "Br": 0.011689949004758091, + "Bt": 0.0217620818266206, + "Bz": 11.68509095868504, + "Er": 119831.66984726039, + "Et": 3555.4449800058346, + "Ez": 171.494771607016, + "T_ions": 7914.784402123198, + "divB": 2.6252712725405736e-12, + "jr": 7762577.163779583, + "jr_displacement": 8431352.185849473, + "jt": 21428194.864771597, + "jt_displacement": 51771237.4899279, + "jz": 5806270.6558109, + "jz_displacement": 6602851.244161872, + "rho": 7968.092058156224 } } \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 50d21806508..a027ad14cf2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -655,7 +655,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Er(i, j, 0) -= eta_h * nabla2Jr; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Er(i, j, 0) -= Er_ext(i, j, 0); } }, @@ -713,7 +713,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Et(i, j, 0) -= eta_h * nabla2Jt; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Et(i, j, 0) -= Et_ext(i, j, 0); } }, @@ -769,7 +769,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) -= eta_h * nabla2Jz; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Ez(i, j, 0) -= Ez_ext(i, j, 0); } } @@ -1029,7 +1029,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ex(i, j, k) -= eta_h * nabla2Jx; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Ex(i, j, k) -= Ex_ext(i, j, k); } }, @@ -1080,7 +1080,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ey(i, j, k) -= eta_h * nabla2Jy; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Ey(i, j, k) -= Ey_ext(i, j, k); } }, @@ -1115,7 +1115,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); - if (rho_val < rho_val_limited && holmstrom_vacuum_region) { + if (rho_val < rho_floor && holmstrom_vacuum_region) { Ez(i, j, k) = 0._rt; } else { Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; @@ -1131,7 +1131,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez(i, j, k) -= eta_h * nabla2Jz; } - if (include_external_fields && (rho_val >= rho_floor || !holmstrom_vacuum_region)) { + if (include_external_fields && (rho_val >= rho_floor)) { Ez(i, j, k) -= Ez_ext(i, j, k); } } From e4b285d62db83bbf25dcf876dcff32679cf7ee54 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 10:48:06 -0800 Subject: [PATCH 60/86] Adding check around lookup of external fields to not lookup from registry unless initialized. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FiniteDifferenceSolver/HybridPICSolveE.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index a027ad14cf2..4600690179d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -397,8 +397,6 @@ void FiniteDifferenceSolver::HybridPICSolveE ( amrex::Abort(Utils::TextMsg::Err( "HybridSolveE: The hybrid-PIC electromagnetic solver algorithm must be used")); } - // auto& warpx = WarpX::GetInstance(); - // warpx.ApplyEfieldBoundary(lev, PatchType::fine); } #ifdef WARPX_DIM_RZ @@ -438,8 +436,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 - ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + ablastr::fields::VectorField Bfield_external, Efield_external; = + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -816,8 +817,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 - ablastr::fields::VectorField Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + ablastr::fields::VectorField Bfield_external, Efield_external; = + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations From 066f72afc8df49546cd3ab369160e4aa97e2cafd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 18:48:42 +0000 Subject: [PATCH 61/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 4600690179d..da7a7863478 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -436,7 +436,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external, Efield_external; = + ablastr::fields::VectorField Bfield_external, Efield_external; = if (include_external_fields) { Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 @@ -817,7 +817,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external, Efield_external; = + ablastr::fields::VectorField Bfield_external, Efield_external; = if (include_external_fields) { Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 From f49494cdd039d4934c9c0ceeb3c5748db75ed822 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 11:43:45 -0800 Subject: [PATCH 62/86] Removing typo. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index da7a7863478..02bce1ccb1f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -436,7 +436,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external, Efield_external; = + ablastr::fields::VectorField Bfield_external, Efield_external; if (include_external_fields) { Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 @@ -817,7 +817,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; auto & warpx = WarpX::GetInstance(); - ablastr::fields::VectorField Bfield_external, Efield_external; = + ablastr::fields::VectorField Bfield_external, Efield_external; if (include_external_fields) { Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 From dd6f5d40133fe4d6657ef1d53ffa528328f80086 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 12:01:56 -0800 Subject: [PATCH 63/86] Fixed bug in EB maksing for field loading for external fields. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/Initialization/WarpXInitData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 257f2b6d3cd..625f8740727 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1092,7 +1092,7 @@ void ComputeExternalFieldOnGridUsingParser_template ( auto const& mfzfab = mfz->array(mfi); amrex::Array4 update_fx_arr, update_fy_arr, update_fz_arr; - if (EB::enabled()) { + if (use_eb_flags && EB::enabled()) { update_fx_arr = eb_update_field[lev][0]->array(mfi); update_fy_arr = eb_update_field[lev][1]->array(mfi); update_fz_arr = eb_update_field[lev][2]->array(mfi); From 49043298a47c7d2978c2b45785cdf1f1bf142b1d Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 13:40:57 -0800 Subject: [PATCH 64/86] Adding directory prefix for file writing to CI tests. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 4 +++- .../inputs_test_rz_ohm_solver_cylinder_compression_picmi.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 59a6f338221..43eeeafb423 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -342,6 +342,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir='diags' warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) @@ -350,6 +351,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + write_dir='diags' warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) @@ -361,7 +363,7 @@ def setup_run(self): if comm.rank == 0: if Path.exists(Path("diags")): shutil.rmtree("diags") - Path("diags/fields").mkdir(parents=True, exist_ok=True) + Path("diags").mkdir(parents=True, exist_ok=True) # Initialize inputs and WarpX instance simulation.initialize_inputs() diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index fe3682d1bff..477ee8609dd 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -332,6 +332,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir='diags' warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) @@ -340,6 +341,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + write_dir='diags', warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) @@ -351,7 +353,7 @@ def setup_run(self): if comm.rank == 0: if Path.exists(Path("diags")): shutil.rmtree("diags") - Path("diags/fields").mkdir(parents=True, exist_ok=True) + Path("diags").mkdir(parents=True, exist_ok=True) # Initialize inputs and WarpX instance simulation.initialize_inputs() From c3f40cf132b3472478d2640b76e67367bd2365bb Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 13:42:56 -0800 Subject: [PATCH 65/86] Fixing typo in CI tests. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 4 ++-- .../inputs_test_rz_ohm_solver_cylinder_compression_picmi.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 43eeeafb423..81fece44155 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -342,7 +342,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - write_dir='diags' + write_dir='diags', warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) @@ -351,7 +351,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - write_dir='diags' + write_dir='diags', warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 477ee8609dd..7aa0dabbac8 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -332,7 +332,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - write_dir='diags' + write_dir='diags', warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) From 2081e33dd3127b62092bf89e4a28508c97f5f765 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 21:43:34 +0000 Subject: [PATCH 66/86] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 4 ++-- .../inputs_test_rz_ohm_solver_cylinder_compression_picmi.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 81fece44155..745c95d3216 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -342,7 +342,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - write_dir='diags', + write_dir="diags", warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) @@ -351,7 +351,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - write_dir='diags', + write_dir="diags", warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 7aa0dabbac8..17747c5b212 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -332,7 +332,7 @@ def setup_run(self): period=self.diag_steps, species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - write_dir='diags', + write_dir="diags", warpx_format="plotfile", ) simulation.add_diagnostic(particle_diag) @@ -341,7 +341,7 @@ def setup_run(self): grid=self.grid, period=self.diag_steps, data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], - write_dir='diags', + write_dir="diags", warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) From 863b6451e1ec362cee653d9a1888873e3fc99d88 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 14:56:44 -0800 Subject: [PATCH 67/86] Adjusting CI invocation script. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../Tests/ohm_solver_cylinder_compression/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt index 8d6f35ae241..86596a92a87 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs - analysis_default_regression.py # analysis - diags/diag1000020 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) @@ -17,8 +17,8 @@ add_warpx_test( RZ # dims 2 # nprocs "inputs_test_rz_ohm_solver_cylinder_compression_picmi.py --test" # inputs - analysis_default_regression.py # analysis - diags/diag1000020 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # output OFF # dependency ) label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) From 55eb2a3019353bd21723005ac1430ca72a703f88 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 7 Feb 2025 15:15:34 -0800 Subject: [PATCH 68/86] Resetting benchmarks and turning off holmstrom vacuum flag in test. Dropping some field output in test. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...d_ohm_solver_cylinder_compression_picmi.py | 3 +- ...z_ohm_solver_cylinder_compression_picmi.py | 3 +- ...ohm_solver_cylinder_compression_picmi.json | 34 +++++++----------- ...ohm_solver_cylinder_compression_picmi.json | 36 ++++++++----------- 4 files changed, 29 insertions(+), 47 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 745c95d3216..4d0ab4b2474 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -277,7 +277,6 @@ def setup_run(self): plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, - holmstrom_vacuum_region=True, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, @@ -350,7 +349,7 @@ def setup_run(self): name="diag1", grid=self.grid, period=self.diag_steps, - data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + data_list=["B", "E", "rho"], write_dir="diags", warpx_format="plotfile", ) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 17747c5b212..8c65f88ae79 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -271,7 +271,6 @@ def setup_run(self): plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", plasma_hyper_resistivity=1e-8, substeps=self.substeps, - holmstrom_vacuum_region=True, A_external=A_ext, tau_ramp=20e-6, t0_ramp=5e-6, @@ -340,7 +339,7 @@ def setup_run(self): name="diag1", grid=self.grid, period=self.diag_steps, - data_list=["B", "E", "rho", "divB", "T_ions", "J", "J_displacement"], + data_list=["B", "E", "rho"], write_dir="diags", warpx_format="plotfile", ) diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json index 86a13ed5b1c..3a01e8f8c1c 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,20 @@ { "ions": { - "particle_momentum_x": 2.1618821814775787e-16, - "particle_momentum_y": 2.1613949855651297e-16, - "particle_momentum_z": 2.052067792064273e-16, - "particle_position_x": 770127.6962188012, - "particle_position_y": 770025.3184340652, - "particle_position_z": 620825.7144209276, - "particle_weight": 1.0082612097115668e+19 + "particle_momentum_x": 2.1618998071121926e-16, + "particle_momentum_y": 2.1613997914988703e-16, + "particle_momentum_z": 2.0520680743076253e-16, + "particle_position_x": 770081.8277125143, + "particle_position_y": 769979.5801777747, + "particle_position_z": 620825.7141402963, + "particle_weight": 1.0082612097115652e+19 }, "lev=0": { - "Bx": 0.5919247781229885, - "By": 0.5904710728423274, - "Bz": 2252.108905639935, - "Ex": 7790985.762089458, - "Ey": 7781061.639658057, - "Ez": 9733.133365231726, - "T_ions": 516199.4024777795, - "divB": 1.5822053522980272e-10, - "jx": 1067418047.9995593, - "jx_displacement": 2703884354.4611087, - "jy": 1086794990.5108862, - "jy_displacement": 2724082729.727557, - "jz": 156137757.1849215, - "jz_displacement": 168627730.5656837, + "Bx": 0.5340987325583662, + "By": 0.5360837796264808, + "Bz": 2252.108905639938, + "Ex": 10520426.833244782, + "Ey": 10496181.343018861, + "Ez": 9096.090953218165, "rho": 384100.41525034094 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json index f63ec396d31..f6573843bf4 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -1,28 +1,20 @@ { "ions": { - "particle_momentum_x": 3.127993944979249e-18, - "particle_momentum_y": 3.1249255520900954e-18, - "particle_momentum_z": 3.016637661609894e-18, - "particle_position_x": 13625.867179315941, - "particle_position_y": 2285.883613515636, - "particle_theta": 114866.80273240982, - "particle_weight": 2.525068883145065e+18 + "particle_momentum_x": 3.1280628665674638e-18, + "particle_momentum_y": 3.1252946705877305e-18, + "particle_momentum_z": 3.0166413039610812e-18, + "particle_position_x": 13625.292837926521, + "particle_position_y": 2285.8836297014673, + "particle_theta": 114866.8034719477, + "particle_weight": 2.5250688831450644e+18 }, "lev=0": { - "Br": 0.011689949004758091, - "Bt": 0.0217620818266206, - "Bz": 11.68509095868504, - "Er": 119831.66984726039, - "Et": 3555.4449800058346, - "Ez": 171.494771607016, - "T_ions": 7914.784402123198, - "divB": 2.6252712725405736e-12, - "jr": 7762577.163779583, - "jr_displacement": 8431352.185849473, - "jt": 21428194.864771597, - "jt_displacement": 51771237.4899279, - "jz": 5806270.6558109, - "jz_displacement": 6602851.244161872, - "rho": 7968.092058156224 + "Br": 0.009197642866247434, + "Bt": 0.011930809637653323, + "Bz": 11.68496740335286, + "Er": 153881.49352243243, + "Et": 4738.287247858425, + "Ez": 129.67990827147068, + "rho": 7968.099628420868 } } \ No newline at end of file From cd68984d125786fcea5b00436be42a243f190b01 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 10 Feb 2025 11:07:13 -0800 Subject: [PATCH 69/86] Updating benchmarks and fixinf clang-tiday issue. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...ohm_solver_cylinder_compression_picmi.json | 30 ++++++++--------- ...ohm_solver_cylinder_compression_picmi.json | 32 +++++++++---------- .../HybridPICModel/ExternalVectorPotential.H | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json index 3a01e8f8c1c..6cde3a9450e 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -1,20 +1,20 @@ { - "ions": { - "particle_momentum_x": 2.1618998071121926e-16, - "particle_momentum_y": 2.1613997914988703e-16, - "particle_momentum_z": 2.0520680743076253e-16, - "particle_position_x": 770081.8277125143, - "particle_position_y": 769979.5801777747, - "particle_position_z": 620825.7141402963, - "particle_weight": 1.0082612097115652e+19 - }, "lev=0": { - "Bx": 0.5340987325583662, - "By": 0.5360837796264808, + "Bx": 0.5334253070691776, + "By": 0.5318560243634998, "Bz": 2252.108905639938, - "Ex": 10520426.833244782, - "Ey": 10496181.343018861, - "Ez": 9096.090953218165, - "rho": 384100.41525034094 + "Ex": 10509838.331420777, + "Ey": 10512676.798857061, + "Ez": 8848.113963901804, + "rho": 384112.2912140536 + }, + "ions": { + "particle_momentum_x": 2.161294367543349e-16, + "particle_momentum_y": 2.161870747294985e-16, + "particle_momentum_z": 2.0513400435256855e-16, + "particle_position_x": 769864.202585846, + "particle_position_y": 769908.6569812088, + "particle_position_z": 620721.1900338201, + "particle_weight": 1.008292384042714e+19 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json index f6573843bf4..6fd2ca04fce 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -1,20 +1,20 @@ { - "ions": { - "particle_momentum_x": 3.1280628665674638e-18, - "particle_momentum_y": 3.1252946705877305e-18, - "particle_momentum_z": 3.0166413039610812e-18, - "particle_position_x": 13625.292837926521, - "particle_position_y": 2285.8836297014673, - "particle_theta": 114866.8034719477, - "particle_weight": 2.5250688831450644e+18 - }, "lev=0": { - "Br": 0.009197642866247434, - "Bt": 0.011930809637653323, - "Bz": 11.68496740335286, - "Er": 153881.49352243243, - "Et": 4738.287247858425, - "Ez": 129.67990827147068, - "rho": 7968.099628420868 + "Br": 0.01190012639573578, + "Bt": 0.011313481779415917, + "Bz": 11.684908684984164, + "Er": 154581.58512851578, + "Et": 4798.276941148807, + "Ez": 193.22344271401872, + "rho": 7968.182346905438 + }, + "ions": { + "particle_momentum_x": 3.1125151786241107e-18, + "particle_momentum_y": 3.119385993047207e-18, + "particle_momentum_z": 3.0289560038617916e-18, + "particle_position_x": 13628.662686419664, + "particle_position_y": 2285.6952310457755, + "particle_theta": 115055.48935725243, + "particle_weight": 2.525423582445981e+18 } } \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index bc47530a34a..e25d14672d2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -70,7 +70,7 @@ public: void AllocateLevelMFs ( ablastr::fields::MultiFabRegister & fields, int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - const int ncomps, + int ncomps, const amrex::IntVect& ngEB, const amrex::IntVect& Ex_nodal_flag, const amrex::IntVect& Ey_nodal_flag, From aef570f64e23404ce2218a21b459d05af0fd0c43 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 10 Feb 2025 16:53:49 -0800 Subject: [PATCH 70/86] Cleaning up to pass clang-tidy. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FiniteDifferenceSolver/ComputeCurlA.cpp | 1 - .../FiniteDifferenceSolver.H | 2 +- .../HybridPICModel/ExternalVectorPotential.H | 4 +-- .../ExternalVectorPotential.cpp | 32 +++++++++---------- .../HybridPICModel/HybridPICModel.H | 4 +-- .../HybridPICModel/HybridPICModel.cpp | 4 +-- .../HybridPICSolveE.cpp | 12 +++---- Source/Initialization/WarpXInitData.cpp | 2 +- 8 files changed, 30 insertions(+), 31 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index b1f8408f43f..d71eead1f75 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -22,7 +22,6 @@ #include using namespace amrex; -using warpx::fields::FieldType; void FiniteDifferenceSolver::ComputeCurlA ( ablastr::fields::VectorField& Bfield, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index ead754f4c1c..bcac1bcf0db 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -192,7 +192,7 @@ class FiniteDifferenceSolver void ComputeCurlA ( ablastr::fields::VectorField& Bfield, ablastr::fields::VectorField const& Afield, - std::array< std::unique_ptr,3> const& eb_update_E, + std::array< std::unique_ptr,3> const& eb_update_B, int lev ); private: diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index e25d14672d2..71be73d5693 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -91,8 +91,8 @@ public: std::array< std::unique_ptr,3> const& eb_update); void UpdateHybridExternalFields ( - const amrex::Real t, - const amrex::Real dt + amrex::Real t, + amrex::Real dt ); }; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 69d906668e1..f8b2e604cf1 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -43,22 +43,22 @@ ExternalVectorPotential::ReadParameters () m_Ax_ext_grid_function.resize(m_nFields); m_Ay_ext_grid_function.resize(m_nFields); m_Az_ext_grid_function.resize(m_nFields); - for (std::string & field : m_Ax_ext_grid_function) field = "0.0"; - for (std::string & field : m_Ay_ext_grid_function) field = "0.0"; - for (std::string & field : m_Az_ext_grid_function) field = "0.0"; + for (std::string & field : m_Ax_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Ay_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Az_ext_grid_function) { field = "0.0"; } m_A_external_parser.resize(m_nFields); m_A_external.resize(m_nFields); m_A_ext_time_function.resize(m_nFields); - for (std::string & field_time : m_A_ext_time_function) field_time = "1.0"; + for (std::string & field_time : m_A_ext_time_function) {field_time = "1.0"; } m_A_external_time_parser.resize(m_nFields); m_A_time_scale.resize(m_nFields); m_read_A_from_file.resize(m_nFields); m_external_file_path.resize(m_nFields); - for (std::string & file_name : m_external_file_path) file_name = ""; + for (std::string & file_name : m_external_file_path) { file_name = ""; } for (int i = 0; i < m_nFields; ++i) { bool read_from_file = false; @@ -97,7 +97,7 @@ ExternalVectorPotential::AllocateLevelMFs ( { using ablastr::fields::Direction; for (std::string const & field_name : m_field_names) { - std::string Aext_field = field_name + std::string{"_Aext"}; + const std::string Aext_field = field_name + std::string{"_Aext"}; fields.alloc_init(Aext_field, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); @@ -108,7 +108,7 @@ ExternalVectorPotential::AllocateLevelMFs ( lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - std::string curlAext_field = field_name + std::string{"_curlAext"}; + const std::string curlAext_field = field_name + std::string{"_curlAext"}; fields.alloc_init(curlAext_field, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); @@ -298,15 +298,15 @@ ExternalVectorPotential::ZeroFieldinEB ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field update in the embedded boundaries - if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) Fx(i, j, k) = 0_rt; + if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { Fx(i, j, k) = 0_rt; } }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) Fy(i, j, k) = 0_rt; + if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { Fy(i, j, k) = 0_rt; } }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) Fz(i, j, k) = 0_rt; + if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { Fz(i, j, k) = 0_rt; } } ); } @@ -333,16 +333,16 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const } for (int i = 0; i < m_nFields; ++i) { - std::string const Aext_field = m_field_names[i] + std::string{"_Aext"}; - std::string const curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + const std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; // Get B-field Scaling Factor - amrex::Real scale_factor_B = m_A_time_scale[i](t); + const amrex::Real scale_factor_B = m_A_time_scale[i](t); // Get dA/dt scaling factor based on time centered FD around t - amrex::Real sf_l = m_A_time_scale[i](t-0.5_rt*dt); - amrex::Real sf_r = m_A_time_scale[i](t+0.5_rt*dt); - amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; + const amrex::Real sf_l = m_A_time_scale[i](t-0.5_rt*dt); + const amrex::Real sf_r = m_A_time_scale[i](t+0.5_rt*dt); + const amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; ablastr::fields::MultiLevelVectorField A_ext = warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 8cbb8f1d16e..2a489e1c806 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -51,7 +51,7 @@ public: int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - const int ncomps, + int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, const amrex::IntVect& ngEB, @@ -65,7 +65,7 @@ public: const amrex::IntVect& Bx_nodal_flag, const amrex::IntVect& By_nodal_flag, const amrex::IntVect& Bz_nodal_flag - ); + ) const; void InitData (); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 50bb432f736..428e36bca08 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -83,7 +83,7 @@ void HybridPICModel::AllocateLevelMFs ( const IntVect& Ez_nodal_flag, const IntVect& Bx_nodal_flag, const IntVect& By_nodal_flag, - const IntVect& Bz_nodal_flag) + const IntVect& Bz_nodal_flag) const { using ablastr::fields::Direction; @@ -610,7 +610,7 @@ WarpX::CalculateExternalCurlA() { auto & warpx = WarpX::GetInstance(); // Get reference to External Field Object - auto ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); + auto* ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); ext_vector->CalculateExternalCurlA(); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 02bce1ccb1f..b750a7e4f20 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -615,7 +615,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Er_arr && update_Er_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field @@ -676,7 +676,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field @@ -726,7 +726,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Ez_arr && update_Ez_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field @@ -994,7 +994,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field @@ -1045,7 +1045,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field @@ -1096,7 +1096,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 625f8740727..0e605697df2 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1066,7 +1066,7 @@ void ComputeExternalFieldOnGridUsingParser_template ( auto dx_lev = geom.CellSizeArray(); const RealBox& real_box = geom.ProbDomain(); - amrex::IntVect refratio = (lev > 0 ) ? warpx.RefRatio(lev-1) : amrex::IntVect(1); + amrex::IntVect refratio = (lev > 0 ) ? WarpX::RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; From f6174ea943884e99b6f82e5e397c9f2119180623 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 11 Feb 2025 09:42:52 -0800 Subject: [PATCH 71/86] A few more tidy ups. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICModel/ExternalVectorPotential.cpp | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index f8b2e604cf1..7b337e89454 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -32,12 +32,7 @@ ExternalVectorPotential::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_field_names.empty(), "No external field names defined in external_vector_potential.fields"); -// #if defined(WARPX_DIM_RZ) -// WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, -// "External Time Varying Fields in the Hybrid module is currently not supported. Coming Soon!"); -// #endif - - m_nFields = m_field_names.size(); + m_nFields = static_cast(m_field_names.size()); // Resize vectors and set defaults m_Ax_ext_grid_function.resize(m_nFields); @@ -147,7 +142,7 @@ ExternalVectorPotential::InitData () for (int i = 0; i < m_nFields; ++i) { - std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; if (m_read_A_from_file[i]) { // Read A fields from file @@ -241,8 +236,8 @@ ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) auto & warpx = WarpX::GetInstance(); // Compute the curl of at at max and store - std::string Aext_field = coil_name + std::string{"_Aext"}; - std::string curlAext_field = coil_name + std::string{"_curlAext"}; + const std::string Aext_field = coil_name + std::string{"_Aext"}; + const std::string curlAext_field = coil_name + std::string{"_curlAext"}; ablastr::fields::MultiLevelVectorField A_ext = warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); From 2e099752aa4cbd6c53ff09505c92ef16b893fddb Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 11 Feb 2025 16:01:47 -0800 Subject: [PATCH 72/86] Tidying up in RZ. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index bcac1bcf0db..0d12d104436 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -277,7 +277,7 @@ class FiniteDifferenceSolver void ComputeCurlACylindrical ( ablastr::fields::VectorField& Bfield, ablastr::fields::VectorField const& Afield, - std::array< std::unique_ptr,3> const& eb_update_E, + std::array< std::unique_ptr,3> const& eb_update_B, int lev ); From 7fb2217a6609c21062b78ec6b35681e18df9da13 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 09:42:32 -0800 Subject: [PATCH 73/86] Update Docs/source/usage/parameters.rst Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 2bff856b4b7..8d6268c9282 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2545,7 +2545,7 @@ Maxwell solver: kinetic-fluid hybrid If ``hybid_pic_model.add_external_fields`` is set to ``true``, this adds a list names for external time varying vector potentials to be added to hybrid solver. * ``external_vector_potential..read_from_file`` (`bool`) optional (default ``false``) - If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate teh time varying field. + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate the time varying field. * ``external_vector_potential..path`` (`str`) optional (default ``""``) If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally in :math:`weber/m`. From c0eaf3a4da67a1f0681db61780e06c4ba4f920f0 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 09:42:49 -0800 Subject: [PATCH 74/86] Update Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- .../inputs_test_3d_ohm_solver_cylinder_compression_picmi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 4d0ab4b2474..a49a3ccd584 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -99,7 +99,7 @@ def __init__(self, test, verbose): Ay_data = 0.5 * XM * self.dB Az_data = np.zeros_like(RM) - # Write vector potential to file to exercise field loading via OPenPMD + # Write vector potential to file to exercise field loading via OpenPMD series = io.Series("Afield.h5", io.Access.create) it = series.iterations[0] From be868fbccae29b3d96ca28f2f52196b67eac2d67 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 10:02:26 -0800 Subject: [PATCH 75/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 4c645a4ba75..3b4bc6859a9 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1854,7 +1854,7 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): Number of substeps to take when updating the B-field. holmstrom_vacuum_region: bool, default=False - Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms. + Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms in the vacuum region. This flag is useful for suppressing vacuum region fluctuations. A large resistivity value must be used when rho <= rho_floor. Jx/y/z_external_function: str From 3d7915e18727d50eb370cfa86ae47585b068eade Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 10:04:37 -0800 Subject: [PATCH 76/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 3b4bc6859a9..61d4f689342 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1868,8 +1868,8 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): A_external = { '': { 'Ax_external_function': , - 'Ax_external_function': , - 'Ax_external_function': , + 'Ay_external_function': , + 'Az_external_function': , 'A_time_external_function': }, ': {...}' From 9d468d95f4131c846f9e83cbd35b4542e4917384 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 10:05:51 -0800 Subject: [PATCH 77/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 61d4f689342..fdd429f044e 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1985,10 +1985,7 @@ def solver_initialize_inputs(self): ), ) for field_name, field_dict in self.A_external.items(): - if ( - "read_from_file" in field_dict.keys() - and field_dict["read_from_file"] - ): + if field_dict.get("read_from_file", False): pywarpx.external_vector_potential.__setattr__( f"{field_name}.read_from_file", field_dict["read_from_file"] ) From cbd12537adb32d59c72d4b1c66372bad9fe68c54 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 10:06:46 -0800 Subject: [PATCH 78/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index fdd429f044e..92ee048782f 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1976,8 +1976,8 @@ def solver_initialize_inputs(self): self.Jz_external_function, self.mangle_dict ), ) - pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields - if self.add_external_fields: + if self.A_external is not None: + pywarpx.hybridpicmodel.add_external_fields = True pywarpx.external_vector_potential.__setattr__( "fields", pywarpx.my_constants.mangle_expression( From c5e5a2f869377a70391c634d802d3d1c1dbdee00 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 12 Feb 2025 10:07:17 -0800 Subject: [PATCH 79/86] Update Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp index d71eead1f75..30cbdb60508 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -19,8 +19,6 @@ #include "Utils/TextMsg.H" #include "WarpX.H" -#include - using namespace amrex; void FiniteDifferenceSolver::ComputeCurlA ( From 27cbea916de21c001df96ab59e0c3a52e54c161a Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 13 Feb 2025 14:51:48 -0800 Subject: [PATCH 80/86] Addressing code review comments. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...d_ohm_solver_cylinder_compression_picmi.py | 2 +- Python/pywarpx/picmi.py | 2 +- .../HybridPICModel/ExternalVectorPotential.H | 6 +- .../ExternalVectorPotential.cpp | 82 +++++++++---------- .../HybridPICModel/HybridPICModel.cpp | 14 ---- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 12 +++ Source/Fields.H | 26 +++--- Source/Initialization/WarpXInitData.cpp | 7 -- Source/WarpX.cpp | 20 +++-- 9 files changed, 87 insertions(+), 84 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index a49a3ccd584..4f05fd15d83 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -39,7 +39,7 @@ class PlasmaCylinderCompression(object): # Flux Conserver radius R_c = 0.5 - # Plasma Radius (These values match GS solution in gs_psi.csv) + # Plasma Radius (These values control the analytical GS solution) R_p = 0.25 delta_p = 0.025 diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 92ee048782f..3cad7a34fc4 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1977,7 +1977,7 @@ def solver_initialize_inputs(self): ), ) if self.A_external is not None: - pywarpx.hybridpicmodel.add_external_fields = True + pywarpx.hybridpicmodel.add_external_fields = True pywarpx.external_vector_potential.__setattr__( "fields", pywarpx.my_constants.mangle_expression( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index 71be73d5693..632ff2bd785 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -86,8 +86,10 @@ public: void CalculateExternalCurlA (std::string& coil_name); AMREX_FORCE_INLINE - void ZeroFieldinEB ( - ablastr::fields::VectorField const& Field, + void PopulateExternalFieldFromVectorPotential ( + ablastr::fields::VectorField const& dstField, + amrex::Real scale_factor, + ablastr::fields::VectorField const& srcField, std::array< std::unique_ptr,3> const& eb_update); void UpdateHybridExternalFields ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 7b337e89454..dc4a1a0898d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -140,6 +140,8 @@ ExternalVectorPotential::InitData () using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); + int A_time_dep_count = 0; + for (int i = 0; i < m_nFields; ++i) { const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; @@ -215,6 +217,16 @@ ExternalVectorPotential::InitData () utils::parser::makeParser(m_A_ext_time_function[i],{"t",})); m_A_time_scale[i] = m_A_external_time_parser[i]->compile<1>(); + const std::set A_time_ext_symbols = m_A_external_time_parser[i]->symbols(); + A_time_dep_count += A_time_ext_symbols.count("t"); + } + + if (A_time_dep_count > 0) { + ablastr::warn_manager::WMRecordWarning( + "HybridPIC ExternalVectorPotential", + "Coulomb Gauge is Expected, please be sure to have a divergence free A. Divergence cleaning of A to be implemented soon.", + ablastr::warn_manager::WarnPriority::low + ); } UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); @@ -235,7 +247,7 @@ ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) using ablastr::fields::Direction; auto & warpx = WarpX::GetInstance(); - // Compute the curl of at at max and store + // Compute the curl of the reference A field (unscaled by time function) const std::string Aext_field = coil_name + std::string{"_Aext"}; const std::string curlAext_field = coil_name + std::string{"_curlAext"}; @@ -260,19 +272,25 @@ ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) AMREX_FORCE_INLINE void -ExternalVectorPotential::ZeroFieldinEB ( - ablastr::fields::VectorField const& Field, +ExternalVectorPotential::PopulateExternalFieldFromVectorPotential ( + ablastr::fields::VectorField const& dstField, + amrex::Real scale_factor, + ablastr::fields::VectorField const& srcField, std::array< std::unique_ptr,3> const& eb_update) { // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*Field[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + for ( MFIter mfi(*dstField[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& Fx = Field[0]->array(mfi); - Array4 const& Fy = Field[1]->array(mfi); - Array4 const& Fz = Field[2]->array(mfi); + Array4 const& Fx = dstField[0]->array(mfi); + Array4 const& Fy = dstField[1]->array(mfi); + Array4 const& Fz = dstField[2]->array(mfi); + + Array4 const& Sx = srcField[0]->const_array(mfi); + Array4 const& Sy = srcField[1]->const_array(mfi); + Array4 const& Sz = srcField[2]->const_array(mfi); // Extract structures indicating where the fields // should be updated, given the position of the embedded boundaries. @@ -284,24 +302,32 @@ ExternalVectorPotential::ZeroFieldinEB ( } // Extract tileboxes for which to loop - Box const& tbx = mfi.tilebox(Field[0]->ixType().toIntVect()); - Box const& tby = mfi.tilebox(Field[1]->ixType().toIntVect()); - Box const& tbz = mfi.tilebox(Field[2]->ixType().toIntVect()); + Box const& tbx = mfi.tilebox(dstField[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(dstField[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(dstField[2]->ixType().toIntVect()); // Loop over the cells and update the fields amrex::ParallelFor(tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field update in the embedded boundaries - if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { Fx(i, j, k) = 0_rt; } + if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { return; } + + Fx(i,j,k) = scale_factor * Sx(i,j,k); }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { Fy(i, j, k) = 0_rt; } + // Skip field update in the embedded boundaries + if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { return; } + + Fy(i,j,k) = scale_factor * Sy(i,j,k); }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { Fz(i, j, k) = 0_rt; } + // Skip field update in the embedded boundaries + if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { return; } + + Fz(i,j,k) = scale_factor * Sz(i,j,k); } ); } @@ -319,14 +345,6 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const ablastr::fields::MultiLevelVectorField E_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); - // Zero E and B external fields - for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - for (int idir = 0; idir < 3; ++idir) { - B_ext[lev][Direction{idir}]->setVal(0.0_rt); - E_ext[lev][Direction{idir}]->setVal(0.0_rt); - } - } - for (int i = 0; i < m_nFields; ++i) { const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; const std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; @@ -345,31 +363,13 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - for (int idir = 0; idir < 3; ++idir) { - // Scale A_ext by - \partial A / \partial t and add to E_ext - amrex::MultiFab::LinComb( - *E_ext[lev][Direction{idir}], - 1.0_rt, *E_ext[lev][Direction{idir}], 0, - scale_factor_E, *A_ext[lev][Direction{idir}], 0, - 0, 1, 0); - - // Scale curlA_ext by the t function and add to B_ext - amrex::MultiFab::LinComb( - *B_ext[lev][Direction{idir}], - 1.0_rt, *B_ext[lev][Direction{idir}], 0, - scale_factor_B, *curlA_ext[lev][Direction{idir}], 0, - 0, 1, 0); - } + PopulateExternalFieldFromVectorPotential(E_ext[lev], scale_factor_E, A_ext[lev], warpx.GetEBUpdateEFlag()[lev]); + PopulateExternalFieldFromVectorPotential(B_ext[lev], scale_factor_B, curlA_ext[lev], warpx.GetEBUpdateBFlag()[lev]); for (int idir = 0; idir < 3; ++idir) { E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); } - - if (EB::enabled()) { - ZeroFieldinEB(B_ext[lev], warpx.GetEBUpdateBFlag()[lev]); - ZeroFieldinEB(E_ext[lev], warpx.GetEBUpdateEFlag()[lev]); - } } } amrex::Gpu::streamSynchronize(); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 428e36bca08..3e5c04e9794 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -595,22 +595,8 @@ void HybridPICModel::FieldPush ( // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, eb_update_E, true); warpx.FillBoundaryE(ng, nodal_sync); - warpx.ApplyEfieldBoundary(0, PatchType::fine, t_old); // Push forward the B-field using Faraday's law warpx.EvolveB(dt, dt_type, t_old); warpx.FillBoundaryB(ng, nodal_sync); - warpx.ApplyBfieldBoundary(0, PatchType::fine, dt_type, t_old); -} - -void -WarpX::CalculateExternalCurlA() { - WARPX_PROFILE("WarpX::CalculateExternalCurlA()"); - - auto & warpx = WarpX::GetInstance(); - - // Get reference to External Field Object - auto* ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); - ext_vector->CalculateExternalCurlA(); - } diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 048c4b29cc0..6f32f642d74 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -288,3 +288,15 @@ void WarpX::HybridPICDepositInitialRhoAndJ () ); } } + +void +WarpX::CalculateExternalCurlA() { + WARPX_PROFILE("WarpX::CalculateExternalCurlA()"); + + auto & warpx = WarpX::GetInstance(); + + // Get reference to External Field Object + auto* ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); + ext_vector->CalculateExternalCurlA(); + +} diff --git a/Source/Fields.H b/Source/Fields.H index e47beec2880..271d5a835a3 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -52,19 +52,19 @@ namespace warpx::fields hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ hybrid_B_fp_external, /**< Used with Ohm's law solver. Stores external B field */ hybrid_E_fp_external, /**< Used with Ohm's law solver. Stores external E field */ - Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level - rho_cp, //!< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level - F_cp, //!< Only used with MR. Used for divE cleaning, on the coarse patch of each level - G_cp, //!< Only used with MR. Used for divB cleaning, on the coarse patch of each level - Efield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - Bfield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - E_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - B_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - distance_to_eb, //!< Only used with embedded boundaries (EB). Stores the distance to the nearest EB - edge_lengths, //!< Only used with the ECT solver. Indicates the length of the cell edge that is covered by the EB, in SI units - face_areas, //!< Only used with the ECT solver. Indicates the area of the cell face that is covered by the EB, in SI units + Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ + rho_cp, /**< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level */ + F_cp, /**< Only used with MR. Used for divE cleaning, on the coarse patch of each level */ + G_cp, /**< Only used with MR. Used for divB cleaning, on the coarse patch of each level */ + Efield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + Bfield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ + edge_lengths, /**< Only used with the ECT solver. Indicates the length of the cell edge that is covered by the EB, in SI units */ + face_areas, /**< Only used with the ECT solver. Indicates the area of the cell face that is covered by the EB, in SI units */ area_mod, pml_E_fp, pml_B_fp, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 0e605697df2..f576cc8ed9c 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1374,13 +1374,6 @@ void WarpX::CheckKnownIssues() const bool external_particle_field_used = ( mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" ); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (!external_particle_field_used - || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function" - || mypc->m_E_ext_particle_s == "parse_e_ext_particle_function"), - "The hybrid-PIC algorithm only works with analytical external E/B fields " - "applied directly to particles." - ); } #if defined(__CUDACC__) && (__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ == 6) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index fe89b04807e..c965ecf9373 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -768,12 +768,22 @@ WarpX::ReadParameters () use_kspace_filter = use_filter; use_filter = false; } - else if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::HybridPIC) + else { - // Filter currently not working with FDTD solver in RZ geometry along R - // (see https://github.com/ECP-WarpX/WarpX/issues/1943) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, - "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::HybridPIC) { + // Filter currently not working with FDTD solver in RZ geometry along R + // (see https://github.com/ECP-WarpX/WarpX/issues/1943) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, + "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + } else { + if (use_filter && filter_npass_each_dir[0] > 0) { + ablastr::warn_manager::WMRecordWarning( + "HybridPIC ElectromagneticSolver", + "Radial Filtering in RZ is not currently using radial geometric weighting to conserve charge. Use at your own risk.", + ablastr::warn_manager::WarnPriority::low + ); + } + } } #endif From 7677aa2fb7e6577e19a0b372d473fd125ed1b6fd Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Thu, 13 Feb 2025 16:04:53 -0800 Subject: [PATCH 81/86] Removing dead code. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Source/Initialization/WarpXInitData.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index f576cc8ed9c..9c312a3f42e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1370,10 +1370,6 @@ void WarpX::CheckKnownIssues() "in a single step, so be careful with your choice of time step.", ablastr::warn_manager::WarnPriority::low); } - - const bool external_particle_field_used = ( - mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" - ); } #if defined(__CUDACC__) && (__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ == 6) From cb4637302ff2952e669cd70d7166b9b8acad7408 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 09:53:25 -0800 Subject: [PATCH 82/86] Update Python/pywarpx/picmi.py Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Python/pywarpx/picmi.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 3cad7a34fc4..f660570ca7c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1921,12 +1921,8 @@ def __init__( self.Jy_external_function = Jy_external_function self.Jz_external_function = Jz_external_function - self.add_external_fields = None self.A_external = A_external - if A_external is not None: - self.add_external_fields = True - # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): From 6dd2a2b6ffd961335c9bc775f8911fde48f09c1b Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 09:53:40 -0800 Subject: [PATCH 83/86] Update Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 6f32f642d74..b57def5c4fe 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -210,7 +210,6 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels(FieldType::rho_fp, finest_level), m_eb_update_E, false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - // ApplyEfieldBoundary(0, PatchType::fine); // Handle field splitting for Hybrid field push if (add_external_fields) { From ff5fef87d6be7a264cadb1c066e23879d0b53993 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 09:55:27 -0800 Subject: [PATCH 84/86] Update Source/WarpX.cpp Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Source/WarpX.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index c965ecf9373..bbac858b592 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -774,7 +774,7 @@ WarpX::ReadParameters () // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, - "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + "In RZ geometry with FDTD, filtering can only be applied along z. This can be controlled by setting warpx.filter_npass_each_dir"); } else { if (use_filter && filter_npass_each_dir[0] > 0) { ablastr::warn_manager::WMRecordWarning( From 37edfc23e306a1cc555c624370aabde7f7e04439 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 11:47:12 -0800 Subject: [PATCH 85/86] Updating to add a cast for clang-tidy. Adding particle based external fields warning back to Initialization routine. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../HybridPICModel/ExternalVectorPotential.cpp | 2 +- Source/Initialization/WarpXInitData.cpp | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index dc4a1a0898d..50a62335b57 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -218,7 +218,7 @@ ExternalVectorPotential::InitData () m_A_time_scale[i] = m_A_external_time_parser[i]->compile<1>(); const std::set A_time_ext_symbols = m_A_external_time_parser[i]->symbols(); - A_time_dep_count += A_time_ext_symbols.count("t"); + A_time_dep_count += static_cast(A_time_ext_symbols.count("t")); } if (A_time_dep_count > 0) { diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 9c312a3f42e..0e0f4f75099 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1370,6 +1370,15 @@ void WarpX::CheckKnownIssues() "in a single step, so be careful with your choice of time step.", ablastr::warn_manager::WarnPriority::low); } + + const bool external_particle_field_used = ( + mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" + ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !external_particle_field_used, + "The hybrid-PIC algorithm does not work with external fields " + "applied directly to particles." + ); } #if defined(__CUDACC__) && (__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ == 6) From 33243b96aa58b6dc3c0667412b1293e3a0e5161a Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 13:20:56 -0800 Subject: [PATCH 86/86] Loosening tolerance on CI test for relative tolerance. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt index 86596a92a87..c813d669fa6 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -7,7 +7,7 @@ add_warpx_test( 2 # nprocs "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs OFF # analysis - "analysis_default_regression.py --path diags/diag1000020" # checksum + "analysis_default_regression.py --path diags/diag1000020 --rtol 1e-6" # checksum OFF # dependency ) label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) @@ -18,7 +18,7 @@ add_warpx_test( 2 # nprocs "inputs_test_rz_ohm_solver_cylinder_compression_picmi.py --test" # inputs OFF # analysis - "analysis_default_regression.py --path diags/diag1000020" # output + "analysis_default_regression.py --path diags/diag1000020 --rtol 1e-6" # output OFF # dependency ) label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow)