diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c581183703a..1b668d5931e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: # Run the linter - id: ruff diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index f46dc3d1640..72c599ae2bd 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -28,7 +28,7 @@ Optional dependencies include: - `FFTW3 `__: for spectral solver (PSATD or IGF) support when running on CPU or SYCL - also needs the ``pkg-config`` tool on Unix -- `heFFTe 2.4.0+ `__: for multi-node spectral solver (IGF) support - `BLAS++ `__ and `LAPACK++ `__: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support - `openPMD-api 0.15.1+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support diff --git a/Docs/source/latex_theory/allbibs.bib b/Docs/source/latex_theory/allbibs.bib index 62810ca5d6a..e44ab5cf112 100644 --- a/Docs/source/latex_theory/allbibs.bib +++ b/Docs/source/latex_theory/allbibs.bib @@ -2188,6 +2188,21 @@ @book{godfrey1985iprop year = {1985} } +@article{shapovalPRE2024, +author = {Shapoval, Olga and Zoni, Edoardo and Lehe, Remi and Thevenet, Maxence and Vay, Jean-Luc}, +doi = {10.1103/PhysRevE.110.025206}, +issue = {2}, +journal = {Phys. Rev. E}, +month = {Aug}, +numpages = {19}, +pages = {025206}, +publisher = {American Physical Society}, +title = {{Pseudospectral particle-in-cell formulation with arbitrary charge and current-density time dependencies for the modeling of relativistic plasmas}}, +url = {https://link.aps.org/doi/10.1103/PhysRevE.110.025206}, +volume = {110}, +year = {2024} +} + @article{Ammosov1986, title = {Tunnel ionization of complex atoms and of atomic ions in an alternating electromagnetic field}, volume = {64}, diff --git a/Docs/source/theory/kinetic_fluid_hybrid_model.rst b/Docs/source/theory/kinetic_fluid_hybrid_model.rst index b4f494d8382..f764ce4e02b 100644 --- a/Docs/source/theory/kinetic_fluid_hybrid_model.rst +++ b/Docs/source/theory/kinetic_fluid_hybrid_model.rst @@ -46,7 +46,7 @@ integrating over velocity), also called the generalized Ohm's law, is given by: .. math:: - en_e\vec{E} = \frac{m}{e}\frac{\partial \vec{J}_e}{\partial t} + \frac{m}{e^2}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e + en_e\vec{E} = \frac{m}{e}\frac{\partial \vec{J}_e}{\partial t} + \frac{m}{e}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e where :math:`\vec{U}_e = \vec{J}_e/(en_e)` is the electron fluid velocity, :math:`{\overleftrightarrow P}_e` is the electron pressure tensor and @@ -64,7 +64,7 @@ Plugging this back into the generalized Ohm' law gives: \left(en_e +\frac{m}{e\mu_0}\nabla\times\nabla\times\right)\vec{E} =& - \frac{m}{e}\left( \frac{\partial\vec{J}_{ext}}{\partial t} + \sum_{s\neq e}\frac{\partial\vec{J}_s}{\partial t} \right) \\ - &+ \frac{m}{e^2}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e. + &+ \frac{m}{e}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e. If we now further assume electrons are inertialess (i.e. :math:`m=0`), the above equation simplifies to, diff --git a/Docs/source/theory/multiphysics/ionization.rst b/Docs/source/theory/multiphysics/ionization.rst index 11abea386c8..5003872b1a1 100644 --- a/Docs/source/theory/multiphysics/ionization.rst +++ b/Docs/source/theory/multiphysics/ionization.rst @@ -56,18 +56,18 @@ where :math:`\mathrm{d}\tau` is the simulation timestep, which is divided by the Empirical Extension to Over-the-Barrier Regime for Hydrogen ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For hydrogen, WarpX offers the modified empirical ADK extension to the Over-the-Barrier (OTB) published in :cite:t:`mpion-zhang_empirical_2014` Eq. (8). +For hydrogen, WarpX offers the modified empirical ADK extension to the Over-the-Barrier (OTB) published in :cite:t:`mpion-zhang_empirical_2014` Eq. (8) (note there is a typo in the paper and there should not be a minus sign in Eq. 8). .. math:: - W_\mathrm{M} = \exp\left[ -\left( a_1 \frac{E^2}{E_\mathrm{b}} + a_2 \frac{E}{E_\mathrm{b}} + a_3 \right) \right] W_\mathrm{ADK} + W_\mathrm{M} = \exp\left[ a_1 \frac{E^2}{E_\mathrm{b}} + a_2 \frac{E}{E_\mathrm{b}} + a_3 \right] W_\mathrm{ADK} The parameters :math:`a_1` through :math:`a_3` are independent of :math:`E` and can be found in the same reference. :math:`E_\mathrm{b}` is the classical Barrier Suppresion Ionization (BSI) field strength :math:`E_\mathrm{b} = U_\mathrm{ion}^2 / (4 Z)` given here in atomic units (AU). For a detailed description of conversion between unit systems consider the book by :cite:t:`mpion-Mulser2010`. Testing ^^^^^^^ -* `Testing the field ionization module <../../../../Examples/Tests/field_ionization/README.rst>`_. +* `Testing the field ionization module <../../../../en/latest/usage/examples/field_ionization/README.html>`_. .. bibliography:: :keyprefix: mpion- diff --git a/Docs/source/theory/pic.rst b/Docs/source/theory/pic.rst index 820cdba50e6..8356ba9f0f8 100644 --- a/Docs/source/theory/pic.rst +++ b/Docs/source/theory/pic.rst @@ -434,6 +434,83 @@ of this model can be found in the section .. _current_deposition: +Pseudo Spectral Analytical Time Domain with arbitrary charge and current-density time dependencies (PSATD-JRhom) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In :cite:`pt-shapovalPRE2024` we introduce a formulation of the particle-in-cell (PIC) method for the modeling of relativistic plasmas, which leverages the ability of the pseudo-spectral analytical time-domain solver (PSATD) to handle arbitrary time dependencies of the charge and current densities during one PIC cycle (up to second-order polynomial dependencies here). +The formulation is applied to a modified set of Maxwell's equations, which in Fourier space reads + +.. math:: + + \begin{align} + \frac{\partial\boldsymbol{\widetilde{E}}}{\partial t} & = i\boldsymbol{k}\times\boldsymbol{\widetilde{B}}-\boldsymbol{\widetilde{J}} + i\boldsymbol{k}{\widetilde{F}} \,, \\ + \frac{\partial\boldsymbol{\widetilde{B}}}{\partial t} & = -i\boldsymbol{k}\times\boldsymbol{\widetilde{E}} \,, \\ + \frac{\partial{\widetilde{F}}}{\partial t} & = i\boldsymbol{k}\cdot\boldsymbol{\widetilde{E}} - \widetilde{\rho} \,. + \end{align} + +Here, in addition to the usual Maxwell-Faraday and Ampere-Maxwell equations, the system contains an extra equation for the scalar field :math:`\widetilde{F}`, which propagates deviations to Gauss' law (if Gauss' law is verified in the PIC simulation, :math:`\widetilde{F}=0` and the modified Maxwell’s equations reduce to the standard Maxwell's equations). +These additional terms were introduced in :cite:p:`pt-Vayfed1996,pt-Munzjcp2000` from the potential formulation in the Lorenz gauge and used as a propagative divergence cleaning procedure, as an alternative to the Langdon-Marder :cite:p:`pt-Langdoncpc92,pt-Marderjcp87` diffusive procedures. +The above-mentioned earlier works :cite:p:`pt-Vayfed1996,pt-Munzjcp2000` considered this formulation in the context of the standard PIC method using FDTD discretization, while the PSATD-JRhom method introduced in :cite:`pt-shapovalPRE2024` exploits the PSATD discretization of the modified Maxwell's equations. +In contrast to the standard PSATD algorithm :cite:p:`pt-VayJCP2013`, where :math:`\boldsymbol{\widetilde{J}}` is assumed to be constant in time and :math:`\widetilde{\rho}` is assumed to be linear in time, within a given time step :math:`\Delta t`, the PSATD-JRhom provides more general time dependencies for :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` within one timestep, which can be divided into :math:`m` subintervals of equal size :math:`\delta t = \Delta t/m`. +During these subintervals, :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` are considered to be either **piecewise constant** (macro-particles deposit their density in the middle of each time subinterval), **piecewise linear** (macro-particles deposit their density at the edge of each time subinterval), or **piecewise quadratic** (macro-particles deposit their density at the edge of each time subinterval) in time. + +.. _fig-psatd_jrhom: + +.. figure:: https://gist.githubusercontent.com/oshapoval/88a73cada764364ad4ffce13563cedf1/raw/697ce1897cde0416bebdde8f1c1e8fcf859cb419/psatd_jrhom.png + :alt: figure not found, caption only + + Diagrams illustrating various time dependencies of the current density :math:`\boldsymbol{\widetilde{J}}` and charge density :math:`\widetilde{\rho}` for constant/linear (CL), both constant (CC), linear (LL) and quadratic (QQ) dependencies with :math:`m` subintervals: (first column) :math:`m=1`, (second) :math:`m=2` and (third) :math:`m=4`. CL1 corresponds to the standard PSATD PIC method. The triangle and circle glyphs represent the times at which the macroparticles deposit :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` on the grid, respectively. The dashed and solid lines represent the assumed time dependency of :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` within one time step, when integrating the Maxwell equations analytically. + +Using the piecewise definition of :math:`\widetilde{\rho}` and :math:`\boldsymbol{\widetilde{J}}`, the modified Maxwell's equations can be integrated analytically over one time step :math:`\Delta t`, i.e., from :math:`t=n\Delta t` to :math:`t=(n+1)\Delta t`. +In practice, this is done by sequentially integrating these equations over each subinterval :math:`\ell \in [0,m-1]`. +The final discretized equations write as: + +.. math:: + + \begin{align} + \begin{split} + \boldsymbol{\widetilde{E}}^{n+(\ell+1)/m} & = C{\boldsymbol{\widetilde{J}}}^{n+\ell/m}+ic^2\frac{S}{ck}\boldsymbol{k}\times{\boldsymbol{\widetilde{J}}}^{n+\ell/m}+ic^2\frac{S}{ck}\widetilde{F}^{n+\ell/m}\boldsymbol{k} \\ + &\quad + \frac{1}{\varepsilon_0 ck}\left(Y_3\boldsymbol{a_J} + Y_2\boldsymbol{b_J} - S\boldsymbol{c_J}\right) + + \frac{ic^2}{\varepsilon_0 c^2k^2}\left({Y_1}a_{\rho}-Y_{5}b_{\rho}-Y_{4}c_{\rho}\right)\boldsymbol{k}, + \end{split} + \\[4pt] + \begin{split} + \boldsymbol{\widetilde{B}}^{n+(\ell+1)/m} & = C {\boldsymbol{\widetilde{B}}}^{n+\ell/m}-i\frac{S}{ck}\boldsymbol{k}\times{\boldsymbol{\widetilde{E}}}^{n+\ell/m} - \frac{i}{\varepsilon_0 c^2k^2}\boldsymbol{k}\times\left(Y_1\boldsymbol{a_J} -Y_5\boldsymbol{b_J} -Y_4\boldsymbol{c_J} \right), + \end{split} + \\[4pt] + \begin{split} + \widetilde{F}^{n+(\ell+1)/m} & = C \widetilde{F}^{n+\ell/m}+i\frac{S}{ck}\boldsymbol{k} \cdot {\boldsymbol{\widetilde{E}}}^{n+\ell/m}+\frac{i}{\varepsilon_0 c^2k^2}\boldsymbol{k}\cdot\left(Y_1\boldsymbol{a_J}-Y_5\boldsymbol{b_J}-Y_4\boldsymbol{c_J}\right) \\ + &\quad + \frac{1}{\varepsilon_0 ck}\left({Y_3}a_{\rho}+{Y_2}b_{\rho}-Sc_{\rho}\right), + \end{split} + \end{align} + +where + +.. math:: + + \begin{aligned} + C &= \cos(ck\delta t), \ S = \sin(ck\delta t), + \\ + Y_1 & = \frac{(1-C)(8-c^2k^2\delta t^2)-4Sck\delta t}{2 c^2 k^2 \delta t^2}, + \\ + Y_2 & = \frac{2(C-1)+ S ck\delta t }{2 ck\delta t}, + \\ + Y_3 & = \frac{S(8- c^2k^2\delta t^2 ) - 4ck\delta t(1+C)}{2c^2 k^2 \delta t^2}, + \\ + Y_4 &= (1-C), \ Y_5 = \frac{(1+C) ck\delta t - 2S}{2ck \delta t}. + \end{aligned} + +Here, :math:`\boldsymbol{a_J}, \boldsymbol{b_J}, \boldsymbol{c_J}, a_{\rho}, b_{\rho}, c_{\rho}` are polynomial coefficients based on the time dependencies of the current and charge densities, as shown in the following table: + +.. _fig-j_rho_table: + +.. figure:: + https://gist.githubusercontent.com/oshapoval/88a73cada764364ad4ffce13563cedf1/raw/ebc249f8e875a952c65a5319fd523821baccfd5a/j_rho_table.png + :alt: figure not found, caption only + + Polynomial coefficients based on the time dependency of the current and charge densities :math:`{\boldsymbol{\widetilde{J}}}(t)` and :math:`\widetilde{\rho}(t)` over one time subinterval, :math:`\delta t = \Delta t/m`. + +Detailed analysis and tests revealed that, under certain conditions, the formulation can expand the range of numerical parameters under which PIC simulations are stable and accurate when modeling relativistic plasmas, such as, e.g., plasma-based particle accelerators. + Current deposition ------------------ diff --git a/Examples/Physics_applications/beam_beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README.rst index a7a06521218..28fdc1ee70e 100644 --- a/Examples/Physics_applications/beam_beam_collision/README.rst +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -11,7 +11,8 @@ We turn on the Quantum Synchrotron QED module for photon emission (also known as the Breit-Wheeler QED module for the generation of electron-positron pairs (also known as coherent pair generation in the collider community). To solve for the electromagnetic field we use the nodal version of the electrostatic relativistic solver. -This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). This solver accurately reproduced the subtle cancellation that occur for some component of the ``E + v x B`` terms which are crucial in simulations of relativistic particles. +This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). +This solver accurately reproduces the subtle cancellation that occur for some component of ``E + v x B``, which are crucial in simulations of relativistic particles. This example is based on the following paper :cite:t:`ex-Yakimenko2019`. @@ -26,7 +27,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. literalinclude:: inputs_test_3d_beam_beam_collision :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/inputs_test_3d_beam_beam_collision``. + :caption: You can copy this file from ``Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision``. Visualize @@ -34,12 +35,15 @@ Visualize The figure below shows the number of photons emitted per beam particle (left) and the number of secondary pairs generated per beam particle (right). -We compare different results: +We compare different results for the reduced diagnostics with the literature: * (red) simplified WarpX simulation as the example stored in the directory ``/Examples/Physics_applications/beam-beam_collision``; * (blue) large-scale WarpX simulation (high resolution and ad hoc generated tables ; * (black) literature results from :cite:t:`ex-Yakimenko2019`. -The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. For the large-scale simulation we have used the following options: +The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. +Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. +To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. +For the large-scale simulation we have used the following options: .. code-block:: ini @@ -63,8 +67,45 @@ The small-scale simulation has been performed with a resolution of ``nx = 64, ny qed_bw.tab_pair_frac_how_many=512 qed_bw.save_table_in=my_bw_table.txt + .. figure:: https://gist.github.com/user-attachments/assets/2dd43782-d039-4faa-9d27-e3cf8fb17352 :alt: Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. :width: 100% Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. + + +Below are two visualizations scripts that provide examples to graph the field and reduced diagnostics. +They are available in the ``Examples/Physics_applications/beam-beam_collision/`` folder and can be run as simply as ``python3 plot_fields.py`` and ``python3 plot_reduced.py``. + +.. tab-set:: + + .. tab-item:: Field Diagnostics + + This script visualizes the evolution of the fields (:math:`|E|, |B|, \rho`) during the collision between the two ultra-relativistic lepton beams. + The magnitude of E and B and the charge densities of the primary beams and of the secondary pairs are sliced along either one of the two transverse coordinates (:math:`x` and :math:`y`). + + .. literalinclude:: plot_fields.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_fields.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/04c9c0ec-b580-446f-a11a-437c1b244a41 + :alt: Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + :width: 100% + + Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + + + .. tab-item:: Reduced Diagnostics + + A similar script to the one below was used to produce the image showing the benchmark against :cite:t:`ex-Yakimenko2019`. + + .. literalinclude:: plot_reduced.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_reduced.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/c280490a-f1f2-4329-ad3c-46817d245dc1 + :alt: Photon and pair production rates in time throughout the collision. + :width: 100% + + Photon and pair production rates in time throughout the collision. diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index e856a078003..d0cf3cd7ebf 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -211,7 +211,7 @@ warpx.do_qed_schwinger = 0. # FULL diagnostics.diags_names = diag1 -diag1.intervals = 0 +diag1.intervals = 15 diag1.diag_type = Full diag1.write_species = 1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho_beam1 rho_beam2 rho_ele1 rho_pos1 rho_ele2 rho_pos2 diff --git a/Examples/Physics_applications/beam_beam_collision/plot_fields.py b/Examples/Physics_applications/beam_beam_collision/plot_fields.py new file mode 100644 index 00000000000..a7ddb2d13e9 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_fields.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable +from openpmd_viewer import OpenPMDTimeSeries + +plt.rcParams.update({"font.size": 16}) + +series = OpenPMDTimeSeries("./diags/diag1") +steps = series.iterations + + +for slice_axis in ["x", "y"]: # slice the fields along x and y + for n in steps: # loop through the available timesteps + fig, ax = plt.subplots( + ncols=2, nrows=2, figsize=(10, 6), dpi=300, sharex=True, sharey=True + ) + + # get E field + Ex, info = series.get_field( + field="E", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + Ey, info = series.get_field( + field="E", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Ez, info = series.get_field( + field="E", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get B field + Bx, info = series.get_field( + field="B", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + By, info = series.get_field( + field="B", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Bz, info = series.get_field( + field="B", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get charge densities + rho_beam1, info = series.get_field( + field="rho_beam1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_beam2, info = series.get_field( + field="rho_beam2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele1, info = series.get_field( + field="rho_ele1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos1, info = series.get_field( + field="rho_pos1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele2, info = series.get_field( + field="rho_ele2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos2, info = series.get_field( + field="rho_pos2", iteration=n, plot=False, slice_across=slice_axis + ) + + xmin = info.z.min() + xmax = info.z.max() + xlabel = "z [m]" + + if slice_axis == "x": + ymin = info.y.min() + ymax = info.y.max() + ylabel = "y [m]" + elif slice_axis == "y": + ymin = info.x.min() + ymax = info.x.max() + ylabel = "x [m]" + + # plot E magnitude + Emag = np.sqrt(Ex**2 + Ey**2 + Ez**2) + im = ax[0, 0].imshow( + np.transpose(Emag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Emag)), + ) + ax[0, 0].set_title("E [V/m]") + divider = make_axes_locatable(ax[0, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot B magnitude + Bmag = np.sqrt(Bx**2 + By**2 + Bz**2) + im = ax[1, 0].imshow( + np.transpose(Bmag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Bmag)), + ) + ax[1, 0].set_title("B [T]") + divider = make_axes_locatable(ax[1, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot beam densities + rho_beams = rho_beam1 + rho_beam2 + im = ax[0, 1].imshow( + np.transpose(rho_beams), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho_beams)), + vmax=np.max(np.abs(rho_beams)), + ) + ax[0, 1].set_title(r"$\rho$ beams [C/m$^3$]") + divider = make_axes_locatable(ax[0, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot secondary densities + rho2 = rho_ele1 + rho_pos1 + rho_ele2 + rho_pos2 + im = ax[1, 1].imshow( + np.transpose(rho2), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho2)), + vmax=np.max(np.abs(rho2)), + ) + ax[1, 1].set_title(r"$\rho$ secondaries [C/m$^3$]") + divider = make_axes_locatable(ax[1, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + for a in ax[-1, :].reshape(-1): + a.set_xlabel(xlabel) + for a in ax[:, 0].reshape(-1): + a.set_ylabel(ylabel) + + fig.suptitle(f"Iteration = {n}, time [s] = {series.current_t}", fontsize=20) + plt.tight_layout() + + image_file_name = "FIELDS_" + slice_axis + f"_{n:03d}.png" + plt.savefig(image_file_name, dpi=100, bbox_inches="tight") + plt.close() diff --git a/Examples/Physics_applications/beam_beam_collision/plot_reduced.py b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py new file mode 100644 index 00000000000..3f59f975519 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py @@ -0,0 +1,48 @@ +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from scipy.constants import c, nano, physical_constants + +r_e = physical_constants["classical electron radius"][0] +my_dpi = 300 +sigmaz = 10 * nano + +fig, ax = plt.subplots( + ncols=2, nrows=1, figsize=(2000.0 / my_dpi, 1000.0 / my_dpi), dpi=my_dpi +) + +rdir = "./diags/reducedfiles/" + +df_cr = pd.read_csv(f"{rdir}" + "ColliderRelevant_beam1_beam2.txt", sep=" ", header=0) +df_pn = pd.read_csv(f"{rdir}" + "ParticleNumber.txt", sep=" ", header=0) + + +times = df_cr[[col for col in df_cr.columns if "]time" in col]].to_numpy() +steps = df_cr[[col for col in df_cr.columns if "]step" in col]].to_numpy() + +x = df_cr[[col for col in df_cr.columns if "]dL_dt" in col]].to_numpy() +coll_index = np.argmax(x) +coll_time = times[coll_index] + +# number of photons per beam particle +np1 = df_pn[[col for col in df_pn.columns if "]pho1_weight" in col]].to_numpy() +np2 = df_pn[[col for col in df_pn.columns if "]pho2_weight" in col]].to_numpy() +Ne = df_pn[[col for col in df_pn.columns if "]beam1_weight" in col]].to_numpy()[0] +Np = df_pn[[col for col in df_pn.columns if "]beam2_weight" in col]].to_numpy()[0] + +ax[0].plot((times - coll_time) / (sigmaz / c), (np1 + np2) / (Ne + Np), lw=2) +ax[0].set_title(r"photon number/beam particle") + +# number of NLBW particles per beam particle +e1 = df_pn[[col for col in df_pn.columns if "]ele1_weight" in col]].to_numpy() +e2 = df_pn[[col for col in df_pn.columns if "]ele2_weight" in col]].to_numpy() + +ax[1].plot((times - coll_time) / (sigmaz / c), (e1 + e2) / (Ne + Np), lw=2) +ax[1].set_title(r"NLBW particles/beam particle") + +for a in ax.reshape(-1): + a.set_xlabel(r"time [$\sigma_z/c$]") +image_file_name = "reduced.png" +plt.tight_layout() +plt.savefig(image_file_name, dpi=300, bbox_inches="tight") +plt.close("all") diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt index bd0cea79c7a..b259083c695 100644 --- a/Examples/Tests/langmuir/CMakeLists.txt +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -11,6 +11,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_2d_langmuir_multi # name + 2 # dims + 2 # nprocs + inputs_test_2d_langmuir_multi # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + add_warpx_test( test_2d_langmuir_multi_mr # name 2 # dims diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi new file mode 100644 index 00000000000..df0189acd91 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = direct +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz diff --git a/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json new file mode 100644 index 00000000000..899352c45ba --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json @@ -0,0 +1,29 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 5.726296856755232, + "Bz": 0.0, + "Ex": 3751589134191.326, + "Ey": 0.0, + "Ez": 3751589134191.332, + "jx": 1.0100623329922576e+16, + "jy": 0.0, + "jz": 1.0100623329922578e+16 + }, + "electrons": { + "particle_momentum_x": 5.668407513430198e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668407513430198e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.65536, + "particle_weight": 3200000000000000.5 + }, + "positrons": { + "particle_momentum_x": 5.668407513430198e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668407513430198e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.65536, + "particle_weight": 3200000000000000.5 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json index 5a80590891c..2ba80d4fb0e 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json @@ -1,14 +1,15 @@ { - "electron": { - "particle_momentum_x": 7.168456345337534e-18, - "particle_momentum_y": 7.02290351254873e-18, - "particle_momentum_z": 9.565641373942318e-42, - "particle_position_x": 6962.988311042427, - "particle_position_y": 2034.5301680154264, - "particle_theta": 6397.068924320389, - "particle_weight": 3.215011942598676e-08 - }, "lev=0": { - "Bz": 9.526664429810971e-24 + "Bz": 9.524453851623612e-24 + }, + "electron": { + "particle_momentum_x": 7.146168286112378e-18, + "particle_momentum_y": 7.073108431229069e-18, + "particle_momentum_z": 9.282175511339672e-42, + "particle_position_x": 6978.157994231982, + "particle_position_y": 2044.6981840260364, + "particle_theta": 6298.956888689097, + "particle_weight": 3.2236798669537214e-08 } -} \ No newline at end of file +} + diff --git a/Source/FieldSolver/CMakeLists.txt b/Source/FieldSolver/CMakeLists.txt index 859117eb214..896b3f04fc2 100644 --- a/Source/FieldSolver/CMakeLists.txt +++ b/Source/FieldSolver/CMakeLists.txt @@ -2,13 +2,14 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE - ElectrostaticSolver.cpp WarpXPushFieldsEM.cpp WarpXPushFieldsHybridPIC.cpp WarpX_QED_Field_Pushers.cpp + WarpXSolveFieldsES.cpp ) endforeach() +add_subdirectory(ElectrostaticSolvers) add_subdirectory(FiniteDifferenceSolver) add_subdirectory(MagnetostaticSolver) add_subdirectory(ImplicitSolvers) diff --git a/Source/FieldSolver/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolver.H deleted file mode 100644 index c5c2ce8b6eb..00000000000 --- a/Source/FieldSolver/ElectrostaticSolver.H +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2021 Modern Electron - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_ELECTROSTATICSOLVER_H_ -#define WARPX_ELECTROSTATICSOLVER_H_ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -namespace ElectrostaticSolver { - - struct PhiCalculatorEB { - - amrex::Real t; - amrex::ParserExecutor<4> potential_eb; - - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - amrex::Real operator()(const amrex::Real x, const amrex::Real z) const noexcept { - using namespace amrex::literals; - return potential_eb(x, 0.0_rt, z, t); - } - - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - amrex::Real operator()(const amrex::Real x, const amrex::Real y, const amrex::Real z) const noexcept { - return potential_eb(x, y, z, t); - } - }; - - class PoissonBoundaryHandler { - public: - - amrex::Array lobc, hibc; - bool bcs_set = false; - std::array dirichlet_flag; - bool has_non_periodic = false; - bool phi_EB_only_t = true; - - void definePhiBCs (const amrex::Geometry& geom); - - void buildParsers (); - void buildParsersEB (); - - /* \brief Sets the EB potential string and updates the function parser - * - * \param [in] potential The string value of the potential - */ - void setPotentialEB(const std::string& potential) { - potential_eb_str = potential; - buildParsersEB(); - } - - [[nodiscard]] PhiCalculatorEB - getPhiEB(amrex::Real t) const noexcept - { - return PhiCalculatorEB{t, potential_eb}; - } - - // set default potentials to zero in order for current tests to pass - // but forcing the user to specify a potential might be better - std::string potential_xlo_str = "0"; - std::string potential_xhi_str = "0"; - std::string potential_ylo_str = "0"; - std::string potential_yhi_str = "0"; - std::string potential_zlo_str = "0"; - std::string potential_zhi_str = "0"; - std::string potential_eb_str = "0"; - - amrex::ParserExecutor<1> potential_xlo; - amrex::ParserExecutor<1> potential_xhi; - amrex::ParserExecutor<1> potential_ylo; - amrex::ParserExecutor<1> potential_yhi; - amrex::ParserExecutor<1> potential_zlo; - amrex::ParserExecutor<1> potential_zhi; - amrex::ParserExecutor<1> potential_eb_t; - amrex::ParserExecutor<4> potential_eb; - - private: - - amrex::Parser potential_xlo_parser; - amrex::Parser potential_xhi_parser; - amrex::Parser potential_ylo_parser; - amrex::Parser potential_yhi_parser; - amrex::Parser potential_zlo_parser; - amrex::Parser potential_zhi_parser; - amrex::Parser potential_eb_parser; - }; - - /** use amrex to directly calculate the electric field since with EB's the - * - * simple finite difference scheme in WarpX::computeE sometimes fails - */ - class EBCalcEfromPhiPerLevel { - private: - amrex::Vector< - amrex::Array - > m_e_field; - - public: - EBCalcEfromPhiPerLevel(amrex::Vector > e_field) - : m_e_field(std::move(e_field)) {} - - void operator()(amrex::MLMG & mlmg, int const lev) { - using namespace amrex::literals; - - mlmg.getGradSolution({m_e_field[lev]}); - for (auto &field: m_e_field[lev]) { - field->mult(-1._rt); - } - } - }; -} // namespace ElectrostaticSolver - -#endif // WARPX_ELECTROSTATICSOLVER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp deleted file mode 100644 index 22d1c663a53..00000000000 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ /dev/null @@ -1,1147 +0,0 @@ -/* Copyright 2019 Remi Lehe - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#include "WarpX.H" - -#include "FieldSolver/ElectrostaticSolver.H" -#include "EmbeddedBoundary/Enabled.H" -#include "Fluids/MultiFluidContainer.H" -#include "Fluids/WarpXFluidContainer.H" -#include "Parallelization/GuardCellManager.H" -#include "Particles/MultiParticleContainer.H" -#include "Particles/WarpXParticleContainer.H" -#include "Python/callbacks.H" -#include "Utils/Parser/ParserUtils.H" -#include "Utils/WarpXAlgorithmSelection.H" -#include "Utils/WarpXConst.H" -#include "Utils/TextMsg.H" -#include "Utils/WarpXProfilerWrapper.H" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef AMREX_USE_EB -# include -#endif - -#include -#include -#include - -using namespace amrex; -using namespace warpx::fields; - -void -WarpX::ComputeSpaceChargeField (bool const reset_fields) -{ - WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); - if (reset_fields) { - // Reset all E and B fields to 0, before calculating space-charge fields - WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); - for (int lev = 0; lev <= max_level; lev++) { - for (int comp=0; comp<3; comp++) { - Efield_fp[lev][comp]->setVal(0); - Bfield_fp[lev][comp]->setVal(0); - } - } - } - - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { - AddSpaceChargeFieldLabFrame(); - } - else { - // Loop over the species and add their space-charge contribution to E and B. - // Note that the fields calculated here does not include the E field - // due to simulation boundary potentials - for (int ispecies=0; ispeciesnSpecies(); ispecies++){ - WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); - if (species.initialize_self_fields || - (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic)) { - AddSpaceChargeField(species); - } - } - - // Add the field due to the boundary potentials - if (m_boundary_potential_specified || - (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic)){ - AddBoundaryField(); - } - } -} - -/* Compute the potential `phi` by solving the Poisson equation with the - simulation specific boundary conditions and boundary values, then add the - E field due to that `phi` to `Efield_fp`. -*/ -void -WarpX::AddBoundaryField () -{ - WARPX_PROFILE("WarpX::AddBoundaryField"); - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - - // Allocate fields for charge and potential - const int num_levels = max_level + 1; - Vector > rho(num_levels); - Vector > phi(num_levels); - // Use number of guard cells used for local deposition of rho - const amrex::IntVect ng = guard_cells.ng_depos_rho; - for (int lev = 0; lev <= max_level; lev++) { - BoxArray nba = boxArray(lev); - nba.surroundingNodes(); - rho[lev] = std::make_unique(nba, DistributionMap(lev), 1, ng); - rho[lev]->setVal(0.); - phi[lev] = std::make_unique(nba, DistributionMap(lev), 1, 1); - phi[lev]->setVal(0.); - } - - // Set the boundary potentials appropriately - setPhiBC(phi); - - // beta is zero for boundaries - const std::array beta = {0._rt}; - - // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, self_fields_required_precision, - self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); - - // Compute the corresponding electric and magnetic field, from the potential phi. - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); -} - -void -WarpX::AddSpaceChargeField (WarpXParticleContainer& pc) -{ - WARPX_PROFILE("WarpX::AddSpaceChargeField"); - - if (pc.getCharge() == 0) { - return; - } - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - -#ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, - "Error: RZ electrostatic only implemented for a single mode"); -#endif - - // Allocate fields for charge and potential - const int num_levels = max_level + 1; - Vector > rho(num_levels); - Vector > rho_coarse(num_levels); // Used in order to interpolate between levels - Vector > phi(num_levels); - // Use number of guard cells used for local deposition of rho - const amrex::IntVect ng = guard_cells.ng_depos_rho; - for (int lev = 0; lev <= max_level; lev++) { - BoxArray nba = boxArray(lev); - nba.surroundingNodes(); - rho[lev] = std::make_unique(nba, DistributionMap(lev), 1, ng); - rho[lev]->setVal(0.); - phi[lev] = std::make_unique(nba, DistributionMap(lev), 1, 1); - phi[lev]->setVal(0.); - if (lev > 0) { - // For MR levels: allocated the coarsened version of rho - BoxArray cba = nba; - cba.coarsen(refRatio(lev-1)); - rho_coarse[lev] = std::make_unique(cba, DistributionMap(lev), 1, ng); - rho_coarse[lev]->setVal(0.); - } - } - - // Deposit particle charge density (source of Poisson solver) - // The options below are identical to those in MultiParticleContainer::DepositCharge - bool const local = true; - bool const reset = false; - bool const apply_boundary_and_scale_volume = true; - bool const interpolate_across_levels = false; - if ( !pc.do_not_deposit) { - pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, - interpolate_across_levels); - } - for (int lev = 0; lev <= max_level; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels - - // Get the particle beta vector - bool const local_average = false; // Average across all MPI ranks - std::array beta_pr = pc.meanParticleVelocity(local_average); - std::array beta; - for (int i=0 ; i < static_cast(beta.size()) ; i++) { - beta[i] = beta_pr[i]/PhysConst::c; // Normalize - } - - // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, pc.self_fields_required_precision, - pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, - pc.self_fields_verbosity ); - - // Compute the corresponding electric and magnetic field, from the potential phi - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); - -} - -void -WarpX::AddSpaceChargeFieldLabFrame () -{ - WARPX_PROFILE("WarpX::AddSpaceChargeFieldLabFrame"); - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - -#ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, - "Error: RZ electrostatic only implemented for a single mode"); -#endif - - // Deposit particle charge density (source of Poisson solver) - mypc->DepositCharge(rho_fp, 0.0_rt); - if (do_fluid_species) { - int const lev = 0; - myfl->DepositCharge( lev, *rho_fp[lev] ); - } - for (int lev = 0; lev <= max_level; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels -#ifndef WARPX_DIM_RZ - for (int lev = 0; lev <= finestLevel(); lev++) { - // Reflect density over PEC boundaries, if needed. - ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); - } -#endif - - // beta is zero in lab frame - // Todo: use simpler finite difference form with beta=0 - const std::array beta = {0._rt}; - - // set the boundary potentials appropriately - setPhiBC(phi_fp); - - // Compute the potential phi, by solving the Poisson equation - if (IsPythonCallbackInstalled("poissonsolver")) { - - // Use the Python level solver (user specified) - ExecutePythonCallback("poissonsolver"); - - } else { - -#if defined(WARPX_DIM_1D_Z) - // Use the tridiag solver with 1D - computePhiTriDiagonal(rho_fp, phi_fp); -#else - // Use the AMREX MLMG or the FFT (IGF) solver otherwise - computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, - self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity); -#endif - - } - - // Compute the electric field. Note that if an EB is used the electric - // field will be calculated in the computePhi call. - if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } - else { - if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } - } - - // Compute the magnetic field - computeB( Bfield_fp, phi_fp, beta ); -} - -/* Compute the potential `phi` by solving the Poisson equation with `rho` as - a source, assuming that the source moves at a constant speed \f$\vec{\beta}\f$. - This uses the amrex solver. - - More specifically, this solves the equation - \f[ - \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} - \f] - - \param[in] rho The charge density a given species - \param[out] phi The potential to be computed by this function - \param[in] beta Represents the velocity of the source of `phi` - \param[in] required_precision The relative convergence threshold for the MLMG solver - \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver - \param[in] max_iters The maximum number of iterations allowed for the MLMG solver - \param[in] verbosity The verbosity setting for the MLMG solver -*/ -void -WarpX::computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array const beta, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const { - // create a vector to our fields, sorted by level - amrex::Vector sorted_rho; - amrex::Vector sorted_phi; - for (int lev = 0; lev <= finest_level; ++lev) { - sorted_rho.emplace_back(rho[lev].get()); - sorted_phi.emplace_back(phi[lev].get()); - } - - std::optional post_phi_calculation; -#ifdef AMREX_USE_EB - std::optional > eb_farray_box_factory; -#else - std::optional > const eb_farray_box_factory; -#endif - if (EB::enabled()) - { - // EB: use AMReX to directly calculate the electric field since with EB's the - // simple finite difference scheme in WarpX::computeE sometimes fails - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - // TODO: maybe make this a helper function or pass Efield_fp directly - amrex::Vector< - amrex::Array - > e_field; - for (int lev = 0; lev <= finest_level; ++lev) { - e_field.push_back( -# if defined(WARPX_DIM_1D_Z) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# elif defined(WARPX_DIM_3D) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# endif - ); - } - post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); - } - -#ifdef AMREX_USE_EB - amrex::Vector< - amrex::EBFArrayBoxFactory const * - > factories; - for (int lev = 0; lev <= finest_level; ++lev) { - factories.push_back(&WarpX::fieldEBFactory(lev)); - } - eb_farray_box_factory = factories; -#endif - } - - bool const is_solver_igf_on_lev0 = - WarpX::poisson_solver_id == PoissonSolverAlgo::IntegratedGreenFunction; - - ablastr::fields::computePhi( - sorted_rho, - sorted_phi, - beta, - required_precision, - absolute_tolerance, - max_iters, - verbosity, - this->geom, - this->dmap, - this->grids, - WarpX::grid_type, - this->m_poisson_boundary_handler, - is_solver_igf_on_lev0, - EB::enabled(), - WarpX::do_single_precision_comms, - this->ref_ratio, - post_phi_calculation, - gett_new(0), - eb_farray_box_factory - ); - -} - - -/* \brief Set Dirichlet boundary conditions for the electrostatic solver. - - The given potential's values are fixed on the boundaries of the given - dimension according to the desired values from the simulation input file, - boundary.potential_lo and boundary.potential_hi. - - \param[inout] phi The electrostatic potential - \param[in] idim The dimension for which the Dirichlet boundary condition is set -*/ -void -WarpX::setPhiBC ( amrex::Vector>& phi ) const -{ - // check if any dimension has non-periodic boundary conditions - if (!m_poisson_boundary_handler.has_non_periodic) { return; } - - // get the boundary potentials at the current time - amrex::Array phi_bc_values_lo; - amrex::Array phi_bc_values_hi; - phi_bc_values_lo[WARPX_ZINDEX] = m_poisson_boundary_handler.potential_zlo(gett_new(0)); - phi_bc_values_hi[WARPX_ZINDEX] = m_poisson_boundary_handler.potential_zhi(gett_new(0)); -#ifndef WARPX_DIM_1D_Z - phi_bc_values_lo[0] = m_poisson_boundary_handler.potential_xlo(gett_new(0)); - phi_bc_values_hi[0] = m_poisson_boundary_handler.potential_xhi(gett_new(0)); -#endif -#if defined(WARPX_DIM_3D) - phi_bc_values_lo[1] = m_poisson_boundary_handler.potential_ylo(gett_new(0)); - phi_bc_values_hi[1] = m_poisson_boundary_handler.potential_yhi(gett_new(0)); -#endif - - auto dirichlet_flag = m_poisson_boundary_handler.dirichlet_flag; - - // loop over all mesh refinement levels and set the boundary values - for (int lev=0; lev <= max_level; lev++) { - - amrex::Box domain = Geom(lev).Domain(); - domain.surroundingNodes(); - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - // Extract the potential - auto phi_arr = phi[lev]->array(mfi); - // Extract tileboxes for which to loop - const Box& tb = mfi.tilebox( phi[lev]->ixType().toIntVect() ); - - // loop over dimensions - for (int idim=0; idim, 3> >& E, - const amrex::Vector >& phi, - std::array const beta ) const -{ - for (int lev = 0; lev <= max_level; lev++) { - - const Real* dx = Geom(lev).CellSize(); - -#ifdef AMREX_USE_OMP -# pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { -#if defined(WARPX_DIM_3D) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dy = 1._rt/dx[1]; - const Real inv_dz = 1._rt/dx[2]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dz = 1._rt/dx[1]; -#else - const Real inv_dz = 1._rt/dx[0]; -#endif - const amrex::IntVect ex_type = E[lev][0]->ixType().toIntVect(); - const amrex::IntVect ey_type = E[lev][1]->ixType().toIntVect(); - const amrex::IntVect ez_type = E[lev][2]->ixType().toIntVect(); - - const amrex::Box& tbx = mfi.tilebox(ex_type); - const amrex::Box& tby = mfi.tilebox(ey_type); - const amrex::Box& tbz = mfi.tilebox(ez_type); - - const auto& phi_arr = phi[lev]->array(mfi); - const auto& Ex_arr = (*E[lev][0])[mfi].array(); - const auto& Ey_arr = (*E[lev][1])[mfi].array(); - const auto& Ez_arr = (*E[lev][2])[mfi].array(); - - const Real beta_x = beta[0]; - const Real beta_y = beta[1]; - const Real beta_z = beta[2]; - - // Calculate the electric field - // Use discretized derivative that matches the staggering of the grid. - // Nodal solver - if (ex_type == amrex::IntVect::TheNodeVector() && - ey_type == amrex::IntVect::TheNodeVector() && - ez_type == amrex::IntVect::TheNodeVector()) - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - + beta_x*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - + beta_y*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - +(beta_y*beta_y-1._rt)*0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - + beta_y*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - + beta_z*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - +beta_x*beta_y*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - } - ); -#else - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - } - ); -#endif - } - else // Staggered solver - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt) *inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i ,j ,k )) - + beta_x*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) - + phi_arr(i+1,j+1,k )-phi_arr(i+1,j-1,k )) - + beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) - + phi_arr(i+1,j ,k+1)-phi_arr(i+1,j ,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - + beta_y*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) - + phi_arr(i+1,j+1,k )-phi_arr(i-1,j+1,k )) - +(beta_y*beta_y-1._rt) *inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j ,k )) - + beta_y*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) - + phi_arr(i ,j+1,k+1)-phi_arr(i ,j+1,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) - + phi_arr(i+1,j ,k+1)-phi_arr(i-1,j ,k+1)) - + beta_z*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) - + phi_arr(i ,j+1,k+1)-phi_arr(i ,j-1,k+1)) - +(beta_z*beta_z-1._rt) *inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k )); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k)) - +beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i-1,j+1,k)) - +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k)); - } - ); - ignore_unused(beta_y); -#else - amrex::ParallelFor( tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k)); - } - ); - ignore_unused(beta_x,beta_y); -#endif - } - } - } -} - - -/* \brief Compute the magnetic field that corresponds to `phi`, and - add it to the set of MultiFab `B`. - - The magnetic field is calculated by assuming that the source that - produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: - \f[ - \vec{B} = -\frac{1}{c}\vec{\beta}\times\vec{\nabla}\phi - \f] - (this represents the term \f$\vec{\nabla} \times \vec{A}\f$, in the case of a moving source) - - \param[inout] E Electric field on the grid - \param[in] phi The potential from which to compute the electric field - \param[in] beta Represents the velocity of the source of `phi` -*/ -void -WarpX::computeB (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array const beta ) const -{ - // return early if beta is 0 since there will be no B-field - if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } - - for (int lev = 0; lev <= max_level; lev++) { - - const Real* dx = Geom(lev).CellSize(); - -#ifdef AMREX_USE_OMP -# pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { -#if defined(WARPX_DIM_3D) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dy = 1._rt/dx[1]; - const Real inv_dz = 1._rt/dx[2]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dz = 1._rt/dx[1]; -#else - const Real inv_dz = 1._rt/dx[0]; -#endif - const amrex::IntVect bx_type = B[lev][0]->ixType().toIntVect(); - const amrex::IntVect by_type = B[lev][1]->ixType().toIntVect(); - const amrex::IntVect bz_type = B[lev][2]->ixType().toIntVect(); - - const amrex::Box& tbx = mfi.tilebox(bx_type); - const amrex::Box& tby = mfi.tilebox(by_type); - const amrex::Box& tbz = mfi.tilebox(bz_type); - - const auto& phi_arr = phi[lev]->array(mfi); - const auto& Bx_arr = (*B[lev][0])[mfi].array(); - const auto& By_arr = (*B[lev][1])[mfi].array(); - const auto& Bz_arr = (*B[lev][2])[mfi].array(); - - const Real beta_x = beta[0]; - const Real beta_y = beta[1]; - const Real beta_z = beta[2]; - - constexpr Real inv_c = 1._rt/PhysConst::c; - - // Calculate the magnetic field - // Use discretized derivative that matches the staggering of the grid. - // Nodal solver - if (bx_type == amrex::IntVect::TheNodeVector() && - by_type == amrex::IntVect::TheNodeVector() && - bz_type == amrex::IntVect::TheNodeVector()) - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k-1)) - +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j-1,k ))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i-1,j,k )) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k-1))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)) - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k))); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j+1,k)-phi_arr(i,j-1,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k))); - } - ); -#else - amrex::ParallelFor( tbx, tby, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); - ignore_unused(beta_z,tbz,Bz_arr); -#endif - } - else // Staggered solver - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k ) - + phi_arr(i,j+1,k+1)-phi_arr(i,j+1,k )) - +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j ,k ) - + phi_arr(i,j+1,k+1)-phi_arr(i,j ,k+1))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i ,j,k ) - + phi_arr(i+1,j,k+1)-phi_arr(i ,j,k+1)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k ) - + phi_arr(i+1,j,k+1)-phi_arr(i+1,j,k ))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k)) - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k))); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i,j+1,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - +beta_y*inv_dx*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); -#else - amrex::ParallelFor( tbx, tby, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); - ignore_unused(beta_z,tbz,Bz_arr); -#endif - } - } - } -} - -/* \brief Compute the potential by solving Poisson's equation with - a 1D tridiagonal solve. - - \param[in] rho The charge density a given species - \param[out] phi The potential to be computed by this function -*/ -void -WarpX::computePhiTriDiagonal (const amrex::Vector >& rho, - amrex::Vector >& phi) const -{ - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(max_level == 0, - "The tridiagonal solver cannot be used with mesh refinement"); - - const int lev = 0; - - const amrex::Real* dx = Geom(lev).CellSize(); - const amrex::Real xmin = Geom(lev).ProbLo(0); - const amrex::Real xmax = Geom(lev).ProbHi(0); - const int nx_full_domain = static_cast( (xmax - xmin)/dx[0] + 0.5_rt ); - - int nx_solve_min = 1; - int nx_solve_max = nx_full_domain - 1; - - auto field_boundary_lo0 = WarpX::field_boundary_lo[0]; - auto field_boundary_hi0 = WarpX::field_boundary_hi[0]; - if (field_boundary_lo0 == FieldBoundaryType::Neumann || field_boundary_lo0 == FieldBoundaryType::Periodic) { - // Neumann or periodic boundary condition - // Solve for the point on the lower boundary - nx_solve_min = 0; - } - if (field_boundary_hi0 == FieldBoundaryType::Neumann || field_boundary_hi0 == FieldBoundaryType::Periodic) { - // Neumann or periodic boundary condition - // Solve for the point on the upper boundary - nx_solve_max = nx_full_domain; - } - - // Create a 1-D MultiFab that covers all of x. - // The tridiag solve will be done in this MultiFab and then copied out afterwards. - const amrex::IntVect lo_full_domain(AMREX_D_DECL(0,0,0)); - const amrex::IntVect hi_full_domain(AMREX_D_DECL(nx_full_domain,0,0)); - const amrex::Box box_full_domain_node(lo_full_domain, hi_full_domain, amrex::IntVect::TheNodeVector()); - const BoxArray ba_full_domain_node(box_full_domain_node); - const amrex::Vector pmap = {0}; // The data will only be on processor 0 - const amrex::DistributionMapping dm_full_domain(pmap); - - // Put the data in the pinned arena since the tridiag solver will be done on the CPU, but have - // the data readily accessible from the GPU. - auto phi1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - auto zwork1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - auto rho1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - - if (field_boundary_lo0 == FieldBoundaryType::PEC || field_boundary_hi0 == FieldBoundaryType::PEC) { - // Copy from phi to get the boundary values - phi1d_mf.ParallelCopy(*phi[lev], 0, 0, 1); - } - rho1d_mf.ParallelCopy(*rho[lev], 0, 0, 1); - - // Multiplier on the charge density - const amrex::Real norm = dx[0]*dx[0]/PhysConst::ep0; - rho1d_mf.mult(norm); - - // Use the MFIter loop since when parallel, only process zero has a FAB. - // This skips the loop on all other processors. - for (MFIter mfi(phi1d_mf); mfi.isValid(); ++mfi) { - - const auto& phi1d_arr = phi1d_mf[mfi].array(); - const auto& zwork1d_arr = zwork1d_mf[mfi].array(); - const auto& rho1d_arr = rho1d_mf[mfi].array(); - - // The loops are always performed on the CPU - - amrex::Real diag = 2._rt; - - // The initial values depend on the boundary condition - if (field_boundary_lo0 == FieldBoundaryType::PEC) { - - phi1d_arr(1,0,0) = (phi1d_arr(0,0,0) + rho1d_arr(1,0,0))/diag; - - } else if (field_boundary_lo0 == FieldBoundaryType::Neumann) { - - // Neumann boundary condition - phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; - - zwork1d_arr(1,0,0) = 2._rt/diag; - diag = 2._rt - zwork1d_arr(1,0,0); - phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; - - } else if (field_boundary_lo0 == FieldBoundaryType::Periodic) { - - phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; - - zwork1d_arr(1,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(1,0,0); - phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; - - } - - // Loop upward, calculating the Gaussian elimination multipliers and right hand sides - for (int i_up = 2 ; i_up < nx_solve_max ; i_up++) { - - zwork1d_arr(i_up,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(i_up,0,0); - phi1d_arr(i_up,0,0) = (rho1d_arr(i_up,0,0) - (-1._rt)*phi1d_arr(i_up-1,0,0))/diag; - - } - - // The last value depend on the boundary condition - amrex::Real zwork_product = 1.; // Needed for parallel boundaries - if (field_boundary_hi0 == FieldBoundaryType::PEC) { - - int const nxm1 = nx_full_domain - 1; - zwork1d_arr(nxm1,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(nxm1,0,0); - phi1d_arr(nxm1,0,0) = (phi1d_arr(nxm1+1,0,0) + rho1d_arr(nxm1,0,0) - (-1._rt)*phi1d_arr(nxm1-1,0,0))/diag; - - } else if (field_boundary_hi0 == FieldBoundaryType::Neumann) { - - // Neumann boundary condition - zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; - diag = 2._rt - 2._rt*zwork1d_arr(nx_full_domain,0,0); - if (diag == 0._rt) { - // This happens if the lower boundary is also Neumann. - // It this case, the potential is relative to an arbitrary constant, - // so set the upper boundary to zero to force a value. - phi1d_arr(nx_full_domain,0,0) = 0.; - } else { - phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(nx_full_domain,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; - } - - } else if (field_boundary_hi0 == FieldBoundaryType::Periodic) { - - zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; - - for (int i = 1 ; i <= nx_full_domain ; i++) { - zwork_product *= zwork1d_arr(i,0,0); - } - - diag = 2._rt - zwork1d_arr(nx_full_domain,0,0) - zwork_product; - // Note that rho1d_arr(0,0,0) is used to ensure that the same value is used - // on both boundaries. - phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(0,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; - - } - - // Loop downward to calculate the phi - if (field_boundary_lo0 == FieldBoundaryType::Periodic) { - - // With periodic, the right hand column adds an extra term for all rows - for (int i_down = nx_full_domain-1 ; i_down >= 0 ; i_down--) { - zwork_product /= zwork1d_arr(i_down+1,0,0); - phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0) + zwork_product*phi1d_arr(nx_full_domain,0,0); - } - - } else { - - for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { - phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0); - } - - } - - } - - // Copy phi1d to phi - phi[lev]->ParallelCopy(phi1d_mf, 0, 0, 1); -} - -void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs (const amrex::Geometry& geom) -{ -#ifdef WARPX_DIM_RZ - if (geom.ProbLo(0) == 0){ - lobc[0] = LinOpBCType::Neumann; - dirichlet_flag[0] = false; - - // handle the r_max boundary explicitly - if (WarpX::field_boundary_hi[0] == FieldBoundaryType::PEC) { - hibc[0] = LinOpBCType::Dirichlet; - dirichlet_flag[1] = true; - } - else if (WarpX::field_boundary_hi[0] == FieldBoundaryType::Neumann) { - hibc[0] = LinOpBCType::Neumann; - dirichlet_flag[1] = false; - } - else { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, - "Field boundary condition at the outer radius must be either PEC or neumann " - "when using the electrostatic solver" - ); - } - } - const int dim_start = 1; -#else - const int dim_start = 0; - amrex::ignore_unused(geom); -#endif - for (int idim=dim_start; idim(); - potential_xhi = potential_xhi_parser.compile<1>(); - potential_ylo = potential_ylo_parser.compile<1>(); - potential_yhi = potential_yhi_parser.compile<1>(); - potential_zlo = potential_zlo_parser.compile<1>(); - potential_zhi = potential_zhi_parser.compile<1>(); - - buildParsersEB(); -} - -void ElectrostaticSolver::PoissonBoundaryHandler::buildParsersEB () -{ - potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"x", "y", "z", "t"}); - - // check if the EB potential is a function of space or only of time - const std::set eb_symbols = potential_eb_parser.symbols(); - if ((eb_symbols.count("x") != 0) || (eb_symbols.count("y") != 0) - || (eb_symbols.count("z") != 0)) { - potential_eb = potential_eb_parser.compile<4>(); - phi_EB_only_t = false; - } - else { - potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"t"}); - potential_eb_t = potential_eb_parser.compile<1>(); - } -} diff --git a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt new file mode 100644 index 00000000000..39c4478c110 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt @@ -0,0 +1,10 @@ +foreach(D IN LISTS WarpX_DIMS) + warpx_set_suffix_dims(SD ${D}) + target_sources(lib_${SD} + PRIVATE + ElectrostaticSolver.cpp + LabFrameExplicitES.cpp + PoissonBoundaryHandler.cpp + RelativisticExplicitES.cpp + ) +endforeach() diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H new file mode 100644 index 00000000000..8d23088799f --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -0,0 +1,163 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ELECTROSTATICSOLVER_H_ +#define WARPX_ELECTROSTATICSOLVER_H_ + +#include "PoissonBoundaryHandler.H" +#include "Fluids/MultiFluidContainer.H" +#include "Particles/MultiParticleContainer.H" +#include "Utils/WarpXProfilerWrapper.H" +#include "WarpX.H" + +#include + + +/** + * \brief Base class for Electrostatic Solver + * + */ +class ElectrostaticSolver +{ +public: + ElectrostaticSolver() = default; + ElectrostaticSolver( int nlevs_max ); + + virtual ~ElectrostaticSolver(); + + // Prohibit Move and Copy operations + ElectrostaticSolver(const ElectrostaticSolver&) = delete; + ElectrostaticSolver& operator=(const ElectrostaticSolver&) = delete; + ElectrostaticSolver(ElectrostaticSolver&&) = delete; + ElectrostaticSolver& operator=(ElectrostaticSolver&&) = delete; + + void ReadParameters (); + + virtual void InitData () {} + + /** + * \brief Computes charge density, rho, and solves Poisson's equation + * to obtain the associated electrostatic potential, phi. + * Using the electrostatic potential, the electric field is computed + * in lab frame, and if relativistic, then the electric and magnetic + * fields are computed using potential, phi, and + * velocity of source for potential, beta. + * This function must be defined in the derived classes. + */ + virtual void ComputeSpaceChargeField ( + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& charge_buf, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& phi_fp, + [[maybe_unused]] MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) = 0; + + /** + * \brief Set Dirichlet boundary conditions for the electrostatic solver. + * The given potential's values are fixed on the boundaries of the given + * dimension according to the desired values from the simulation input file, + * boundary.potential_lo and boundary.potential_hi. + * \param[inout] phi The electrostatic potential + * \param[in] idim The dimension for which the Dirichlet boundary condition is set + */ + void setPhiBC ( + amrex::Vector>& phi, + amrex::Real t + ) const; + + /** + * Compute the potential `phi` by solving the Poisson equation with `rho` as + * a source, assuming that the source moves at a constant speed \f$\vec{\beta}\f$. + * This uses the amrex solver. + * More specifically, this solves the equation + * \f[ + * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} + * \f] + * \param[out] phi The potential to be computed by this function + * \param[in] rho The charge density for a given species (relativistic solver) + * or total charge density (labframe solver) + * \param[in] beta Represents the velocity of the source of `phi` + * \param[in] required_precision The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + */ + void computePhi ( + const amrex::Vector >& rho, + amrex::Vector >& phi, + std::array beta, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity + ) const; + + /** + * \brief Compute the electric field that corresponds to `phi`, and + * add it to the set of MultiFab `E`. + * The electric field is calculated by assuming that the source that + * produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: + * \f[ + * \vec{E} = -\vec{\nabla}\phi + \vec{\beta}(\vec{\beta} \cdot \vec{\nabla}\phi) + * \f] + * (where the second term represent the term \f$\partial_t \vec{A}\f$, in + * the case of a moving source) + * + * \param[inout] E Electric field on the grid + * \param[in] phi The potential from which to compute the electric field + * \param[in] beta Represents the velocity of the source of `phi` + */ + void computeE ( + amrex::Vector, 3> >& E, + const amrex::Vector >& phi, + std::array beta + ) const; + + /** + * \brief Compute the magnetic field that corresponds to `phi`, and + * add it to the set of MultiFab `B`. + *The magnetic field is calculated by assuming that the source that + *produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: + *\f[ + * \vec{B} = -\frac{1}{c}\vec{\beta}\times\vec{\nabla}\phi + *\f] + *(this represents the term \f$\vec{\nabla} \times \vec{A}\f$, in the case of a moving source) + * + *\param[inout] B Electric field on the grid + *\param[in] phi The potential from which to compute the electric field + *\param[in] beta Represents the velocity of the source of `phi` + */ + void computeB ( + amrex::Vector, 3> >& B, + const amrex::Vector >& phi, + std::array beta + ) const; + + /** Maximum levels for the electrostatic solver grid */ + int num_levels; + + /** Boundary handler object to set potential for EB and on the domain boundary */ + std::unique_ptr m_poisson_boundary_handler; + + /** Parameters for MLMG Poisson solve */ + amrex::Real self_fields_required_precision = 1e-11; + amrex::Real self_fields_absolute_tolerance = 0.0; + /** Limit on number of MLMG iterations */ + int self_fields_max_iters = 200; + /** Verbosity for the MLMG solver. + * 0 : no verbosity + * 1 : timing and convergence at the end of MLMG + * 2 : convergence progress at every MLMG iteration + */ + int self_fields_verbosity = 2; +}; + +#endif // WARPX_ELECTROSTATICSOLVER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp new file mode 100644 index 00000000000..895615a5b21 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -0,0 +1,533 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ + +#include "ElectrostaticSolver.H" +#include +#include "EmbeddedBoundary/Enabled.H" + +using namespace amrex; + +ElectrostaticSolver::ElectrostaticSolver (int nlevs_max) : num_levels{nlevs_max} +{ + // Create an instance of the boundary handler to properly set boundary + // conditions + m_poisson_boundary_handler = std::make_unique(); +} + +ElectrostaticSolver::~ElectrostaticSolver () = default; + +void ElectrostaticSolver::ReadParameters () { + + ParmParse const pp_warpx("warpx"); + + // Note that with the relativistic version, these parameters would be + // input for each species. + utils::parser::queryWithParser( + pp_warpx, "self_fields_required_precision", self_fields_required_precision); + utils::parser::queryWithParser( + pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); + utils::parser::queryWithParser( + pp_warpx, "self_fields_max_iters", self_fields_max_iters); + pp_warpx.query("self_fields_verbosity", self_fields_verbosity); +} + +void +ElectrostaticSolver::setPhiBC ( + amrex::Vector>& phi, + amrex::Real t +) const +{ + // check if any dimension has non-periodic boundary conditions + if (!m_poisson_boundary_handler->has_non_periodic) { return; } + + // get the boundary potentials at the current time + amrex::Array phi_bc_values_lo; + amrex::Array phi_bc_values_hi; + phi_bc_values_lo[WARPX_ZINDEX] = m_poisson_boundary_handler->potential_zlo(t); + phi_bc_values_hi[WARPX_ZINDEX] = m_poisson_boundary_handler->potential_zhi(t); +#ifndef WARPX_DIM_1D_Z + phi_bc_values_lo[0] = m_poisson_boundary_handler->potential_xlo(t); + phi_bc_values_hi[0] = m_poisson_boundary_handler->potential_xhi(t); +#endif +#if defined(WARPX_DIM_3D) + phi_bc_values_lo[1] = m_poisson_boundary_handler->potential_ylo(t); + phi_bc_values_hi[1] = m_poisson_boundary_handler->potential_yhi(t); +#endif + + auto dirichlet_flag = m_poisson_boundary_handler->dirichlet_flag; + + auto & warpx = WarpX::GetInstance(); + + // loop over all mesh refinement levels and set the boundary values + for (int lev=0; lev < num_levels; lev++) { + + amrex::Box domain = warpx.Geom(lev).Domain(); + domain.surroundingNodes(); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Extract the potential + auto phi_arr = phi[lev]->array(mfi); + // Extract tileboxes for which to loop + const Box& tb = mfi.tilebox( phi[lev]->ixType().toIntVect() ); + + // loop over dimensions + for (int idim=0; idim >& rho, + amrex::Vector >& phi, + std::array const beta, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) const { + // create a vector to our fields, sorted by level + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; + for (int lev = 0; lev < num_levels; ++lev) { + sorted_rho.emplace_back(rho[lev].get()); + sorted_phi.emplace_back(phi[lev].get()); + } + + std::optional post_phi_calculation; +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > const eb_farray_box_factory; +#endif + auto & warpx = WarpX::GetInstance(); + if (EB::enabled()) + { + if (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || + WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + { + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev < num_levels; ++lev) { + e_field.push_back( +#if defined(WARPX_DIM_1D_Z) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#elif defined(WARPX_DIM_3D) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 1), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#endif + ); + } + post_phi_calculation = EBCalcEfromPhiPerLevel(e_field); + } +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev < num_levels; ++lev) { + factories.push_back(&warpx.fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; +#endif + } + + bool const is_solver_igf_on_lev0 = + WarpX::poisson_solver_id == PoissonSolverAlgo::IntegratedGreenFunction; + + ablastr::fields::computePhi( + sorted_rho, + sorted_phi, + beta, + required_precision, + absolute_tolerance, + max_iters, + verbosity, + warpx.Geom(), + warpx.DistributionMap(), + warpx.boxArray(), + WarpX::grid_type, + *m_poisson_boundary_handler, + is_solver_igf_on_lev0, + EB::enabled(), + WarpX::do_single_precision_comms, + warpx.refRatio(), + post_phi_calculation, + warpx.gett_new(0), + eb_farray_box_factory + ); + +} + +void +ElectrostaticSolver::computeE (amrex::Vector, 3> >& E, + const amrex::Vector >& phi, + std::array const beta ) const +{ + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + + const Real* dx = warpx.Geom(lev).CellSize(); + +#ifdef AMREX_USE_OMP +# pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { +#if defined(WARPX_DIM_3D) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dy = 1._rt/dx[1]; + const Real inv_dz = 1._rt/dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dz = 1._rt/dx[1]; +#else + const Real inv_dz = 1._rt/dx[0]; +#endif + const amrex::IntVect ex_type = E[lev][0]->ixType().toIntVect(); + const amrex::IntVect ey_type = E[lev][1]->ixType().toIntVect(); + const amrex::IntVect ez_type = E[lev][2]->ixType().toIntVect(); + + const amrex::Box& tbx = mfi.tilebox(ex_type); + const amrex::Box& tby = mfi.tilebox(ey_type); + const amrex::Box& tbz = mfi.tilebox(ez_type); + + const auto& phi_arr = phi[lev]->array(mfi); + const auto& Ex_arr = (*E[lev][0])[mfi].array(); + const auto& Ey_arr = (*E[lev][1])[mfi].array(); + const auto& Ez_arr = (*E[lev][2])[mfi].array(); + + const Real beta_x = beta[0]; + const Real beta_y = beta[1]; + const Real beta_z = beta[2]; + + // Calculate the electric field + // Use discretized derivative that matches the staggering of the grid. + // Nodal solver + if (ex_type == amrex::IntVect::TheNodeVector() && + ey_type == amrex::IntVect::TheNodeVector() && + ez_type == amrex::IntVect::TheNodeVector()) + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + + beta_x*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + + beta_y*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + +(beta_y*beta_y-1._rt)*0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + + beta_y*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + + beta_z*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + +beta_x*beta_y*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + } + ); +#else + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + } + ); +#endif + } + else // Staggered solver + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt) *inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i ,j ,k )) + + beta_x*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) + + phi_arr(i+1,j+1,k )-phi_arr(i+1,j-1,k )) + + beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) + + phi_arr(i+1,j ,k+1)-phi_arr(i+1,j ,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + + beta_y*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) + + phi_arr(i+1,j+1,k )-phi_arr(i-1,j+1,k )) + +(beta_y*beta_y-1._rt) *inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j ,k )) + + beta_y*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) + + phi_arr(i ,j+1,k+1)-phi_arr(i ,j+1,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) + + phi_arr(i+1,j ,k+1)-phi_arr(i-1,j ,k+1)) + + beta_z*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) + + phi_arr(i ,j+1,k+1)-phi_arr(i ,j-1,k+1)) + +(beta_z*beta_z-1._rt) *inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k )); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k)) + +beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i-1,j+1,k)) + +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k)); + } + ); + ignore_unused(beta_y); +#else + amrex::ParallelFor( tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k)); + } + ); + ignore_unused(beta_x,beta_y); +#endif + } + } + } +} + + +void ElectrostaticSolver::computeB (amrex::Vector, 3> >& B, + const amrex::Vector >& phi, + std::array const beta ) const +{ + // return early if beta is 0 since there will be no B-field + if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } + + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + + const Real* dx = warpx.Geom(lev).CellSize(); + +#ifdef AMREX_USE_OMP +# pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { +#if defined(WARPX_DIM_3D) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dy = 1._rt/dx[1]; + const Real inv_dz = 1._rt/dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dz = 1._rt/dx[1]; +#else + const Real inv_dz = 1._rt/dx[0]; +#endif + const amrex::IntVect bx_type = B[lev][0]->ixType().toIntVect(); + const amrex::IntVect by_type = B[lev][1]->ixType().toIntVect(); + const amrex::IntVect bz_type = B[lev][2]->ixType().toIntVect(); + + const amrex::Box& tbx = mfi.tilebox(bx_type); + const amrex::Box& tby = mfi.tilebox(by_type); + const amrex::Box& tbz = mfi.tilebox(bz_type); + + const auto& phi_arr = phi[lev]->array(mfi); + const auto& Bx_arr = (*B[lev][0])[mfi].array(); + const auto& By_arr = (*B[lev][1])[mfi].array(); + const auto& Bz_arr = (*B[lev][2])[mfi].array(); + + const Real beta_x = beta[0]; + const Real beta_y = beta[1]; + const Real beta_z = beta[2]; + + constexpr Real inv_c = 1._rt/PhysConst::c; + + // Calculate the magnetic field + // Use discretized derivative that matches the staggering of the grid. + // Nodal solver + if (bx_type == amrex::IntVect::TheNodeVector() && + by_type == amrex::IntVect::TheNodeVector() && + bz_type == amrex::IntVect::TheNodeVector()) + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k-1)) + +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j-1,k ))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i-1,j,k )) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k-1))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)) + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k))); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j+1,k)-phi_arr(i,j-1,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k))); + } + ); +#else + amrex::ParallelFor( tbx, tby, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); + ignore_unused(beta_z,tbz,Bz_arr); +#endif + } + else // Staggered solver + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k ) + + phi_arr(i,j+1,k+1)-phi_arr(i,j+1,k )) + +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j ,k ) + + phi_arr(i,j+1,k+1)-phi_arr(i,j ,k+1))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i ,j,k ) + + phi_arr(i+1,j,k+1)-phi_arr(i ,j,k+1)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k ) + + phi_arr(i+1,j,k+1)-phi_arr(i+1,j,k ))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k)) + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k))); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i,j+1,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + +beta_y*inv_dx*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); +#else + amrex::ParallelFor( tbx, tby, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); + ignore_unused(beta_z,tbz,Bz_arr); +#endif + } + } + } +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H new file mode 100644 index 00000000000..00cd061e975 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H @@ -0,0 +1,15 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_ELECTROSTATICSOLVER_FWD_H +#define WARPX_ELECTROSTATICSOLVER_FWD_H + +class ElectrostaticSolver; + +#endif /* WARPX_ELECTROSTATICSOLVER_FWD_H */ diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H new file mode 100644 index 00000000000..7dc41f0a056 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H @@ -0,0 +1,42 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_LABFRAMEEXPLICITES_H_ +#define WARPX_LABFRAMEEXPLICITES_H_ + +#include "ElectrostaticSolver.H" + +class LabFrameExplicitES final : public ElectrostaticSolver +{ +public: + + LabFrameExplicitES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + } + + void InitData () override; + + void ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) override; + + void computePhiTriDiagonal ( + const amrex::Vector >& rho, + amrex::Vector >& phi + ); + +}; + +#endif // WARPX_LABFRAMEEXPLICITES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp new file mode 100644 index 00000000000..d14abd1848a --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -0,0 +1,256 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "LabFrameExplicitES.H" +#include "Fluids/MultiFluidContainer_fwd.H" +#include "EmbeddedBoundary/Enabled.H" +#include "Particles/MultiParticleContainer_fwd.H" +#include "Python/callbacks.H" +#include "WarpX.H" + +using namespace amrex; + +void LabFrameExplicitES::InitData() { + auto & warpx = WarpX::GetInstance(); + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); +} + +void LabFrameExplicitES::ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& /*Bfield_fp*/ +) { + mpc.DepositCharge(rho_fp, 0.0_rt); + if (mfl) { + const int lev = 0; + mfl->DepositCharge(lev, *rho_fp[lev]); + } + + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + if (lev > 0) { + if (charge_buf[lev]) { + charge_buf[lev]->setVal(0.); + } + } + } + warpx.SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + +#ifndef WARPX_DIM_RZ + for (int lev = 0; lev < num_levels; lev++) { + // Reflect density over PEC boundaries, if needed. + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + } +#endif + // beta is zero in lab frame + // Todo: use simpler finite difference form with beta=0 + const std::array beta = {0._rt}; + + // set the boundary potentials appropriately + setPhiBC(phi_fp, warpx.gett_new(0)); + + // Compute the potential phi, by solving the Poisson equation + if (IsPythonCallbackInstalled("poissonsolver")) { + + // Use the Python level solver (user specified) + ExecutePythonCallback("poissonsolver"); + + } else { + +#if defined(WARPX_DIM_1D_Z) + // Use the tridiag solver with 1D + computePhiTriDiagonal(rho_fp, phi_fp); +#else + // Use the AMREX MLMG or the FFT (IGF) solver otherwise + computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); +#endif + + } + + // Compute the electric field. Note that if an EB is used the electric + // field will be calculated in the computePhi call. + if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } + else { + if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } + } +} + +/* \brief Compute the potential by solving Poisson's equation with + a 1D tridiagonal solve. + + \param[in] rho The charge density a given species + \param[out] phi The potential to be computed by this function +*/ +void LabFrameExplicitES::computePhiTriDiagonal ( + const amrex::Vector >& rho, + amrex::Vector >& phi) +{ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(num_levels == 1, + "The tridiagonal solver cannot be used with mesh refinement"); + + const int lev = 0; + auto & warpx = WarpX::GetInstance(); + + const amrex::Real* dx = warpx.Geom(lev).CellSize(); + const amrex::Real xmin = warpx.Geom(lev).ProbLo(0); + const amrex::Real xmax = warpx.Geom(lev).ProbHi(0); + const int nx_full_domain = static_cast( (xmax - xmin)/dx[0] + 0.5_rt ); + + int nx_solve_min = 1; + int nx_solve_max = nx_full_domain - 1; + + auto field_boundary_lo0 = WarpX::field_boundary_lo[0]; + auto field_boundary_hi0 = WarpX::field_boundary_hi[0]; + if (field_boundary_lo0 == FieldBoundaryType::Neumann || field_boundary_lo0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the lower boundary + nx_solve_min = 0; + } + if (field_boundary_hi0 == FieldBoundaryType::Neumann || field_boundary_hi0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the upper boundary + nx_solve_max = nx_full_domain; + } + + // Create a 1-D MultiFab that covers all of x. + // The tridiag solve will be done in this MultiFab and then copied out afterwards. + const amrex::IntVect lo_full_domain(AMREX_D_DECL(0,0,0)); + const amrex::IntVect hi_full_domain(AMREX_D_DECL(nx_full_domain,0,0)); + const amrex::Box box_full_domain_node(lo_full_domain, hi_full_domain, amrex::IntVect::TheNodeVector()); + const BoxArray ba_full_domain_node(box_full_domain_node); + const amrex::Vector pmap = {0}; // The data will only be on processor 0 + const amrex::DistributionMapping dm_full_domain(pmap); + + // Put the data in the pinned arena since the tridiag solver will be done on the CPU, but have + // the data readily accessible from the GPU. + auto phi1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + auto zwork1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + auto rho1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + + if (field_boundary_lo0 == FieldBoundaryType::PEC || field_boundary_hi0 == FieldBoundaryType::PEC) { + // Copy from phi to get the boundary values + phi1d_mf.ParallelCopy(*phi[lev], 0, 0, 1); + } + rho1d_mf.ParallelCopy(*rho[lev], 0, 0, 1); + + // Multiplier on the charge density + const amrex::Real norm = dx[0]*dx[0]/PhysConst::ep0; + rho1d_mf.mult(norm); + + // Use the MFIter loop since when parallel, only process zero has a FAB. + // This skips the loop on all other processors. + for (MFIter mfi(phi1d_mf); mfi.isValid(); ++mfi) { + + const auto& phi1d_arr = phi1d_mf[mfi].array(); + const auto& zwork1d_arr = zwork1d_mf[mfi].array(); + const auto& rho1d_arr = rho1d_mf[mfi].array(); + + // The loops are always performed on the CPU + + amrex::Real diag = 2._rt; + + // The initial values depend on the boundary condition + if (field_boundary_lo0 == FieldBoundaryType::PEC) { + + phi1d_arr(1,0,0) = (phi1d_arr(0,0,0) + rho1d_arr(1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::Neumann) { + + // Neumann boundary condition + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 2._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } + + // Loop upward, calculating the Gaussian elimination multipliers and right hand sides + for (int i_up = 2 ; i_up < nx_solve_max ; i_up++) { + + zwork1d_arr(i_up,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(i_up,0,0); + phi1d_arr(i_up,0,0) = (rho1d_arr(i_up,0,0) - (-1._rt)*phi1d_arr(i_up-1,0,0))/diag; + + } + + // The last value depend on the boundary condition + amrex::Real zwork_product = 1.; // Needed for parallel boundaries + if (field_boundary_hi0 == FieldBoundaryType::PEC) { + + int const nxm1 = nx_full_domain - 1; + zwork1d_arr(nxm1,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(nxm1,0,0); + phi1d_arr(nxm1,0,0) = (phi1d_arr(nxm1+1,0,0) + rho1d_arr(nxm1,0,0) - (-1._rt)*phi1d_arr(nxm1-1,0,0))/diag; + + } else if (field_boundary_hi0 == FieldBoundaryType::Neumann) { + + // Neumann boundary condition + zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; + diag = 2._rt - 2._rt*zwork1d_arr(nx_full_domain,0,0); + if (diag == 0._rt) { + // This happens if the lower boundary is also Neumann. + // It this case, the potential is relative to an arbitrary constant, + // so set the upper boundary to zero to force a value. + phi1d_arr(nx_full_domain,0,0) = 0.; + } else { + phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(nx_full_domain,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; + } + + } else if (field_boundary_hi0 == FieldBoundaryType::Periodic) { + + zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; + + for (int i = 1 ; i <= nx_full_domain ; i++) { + zwork_product *= zwork1d_arr(i,0,0); + } + + diag = 2._rt - zwork1d_arr(nx_full_domain,0,0) - zwork_product; + // Note that rho1d_arr(0,0,0) is used to ensure that the same value is used + // on both boundaries. + phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(0,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; + + } + + // Loop downward to calculate the phi + if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + // With periodic, the right hand column adds an extra term for all rows + for (int i_down = nx_full_domain-1 ; i_down >= 0 ; i_down--) { + zwork_product /= zwork1d_arr(i_down+1,0,0); + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0) + zwork_product*phi1d_arr(nx_full_domain,0,0); + } + + } else { + + for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0); + } + + } + + } + + // Copy phi1d to phi + phi[lev]->ParallelCopy(phi1d_mf, 0, 0, 1); +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/Make.package b/Source/FieldSolver/ElectrostaticSolvers/Make.package new file mode 100644 index 00000000000..a1d2d78dbb0 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/Make.package @@ -0,0 +1,6 @@ +CEXE_sources += PoissonBoundaryHandler.cpp +CEXE_sources += LabFrameExplicitES.cpp +CEXE_sources += RelativisticExplicitES.cpp +CEXE_sources += ElectrostaticSolver.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers diff --git a/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H new file mode 100644 index 00000000000..04e427d7122 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H @@ -0,0 +1,142 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_BOUNDARYHANDLER_H_ +#define WARPX_BOUNDARYHANDLER_H_ + +#include "Utils/Parser/ParserUtils.H" +#include "WarpX.H" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +class PoissonBoundaryHandler +{ +public: + PoissonBoundaryHandler (); // constructor + + /** Read runtime parameters. Called in constructor. */ + void ReadParameters (); + + /** + * \brief Read the input settings and set the boundary conditions used + * on each domain boundary for the Poisson solver. + */ + void DefinePhiBCs (const amrex::Geometry& geom); + + /** + * \brief Initialize amrex::Parser objects to get the boundary potential + * values at specified times. + */ + void BuildParsers (); + void BuildParsersEB (); + + /** + * \brief Sets the EB potential string and updates the function parser. + * + * \param [in] potential The string value of the potential + */ + void setPotentialEB(const std::string& potential) { + potential_eb_str = potential; + BuildParsersEB(); + } + + struct PhiCalculatorEB { + + amrex::Real t; + amrex::ParserExecutor<4> potential_eb; + + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real operator()(const amrex::Real x, const amrex::Real z) const noexcept { + using namespace amrex::literals; + return potential_eb(x, 0.0_rt, z, t); + } + + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real operator()(const amrex::Real x, const amrex::Real y, const amrex::Real z) const noexcept { + return potential_eb(x, y, z, t); + } + }; + + [[nodiscard]] PhiCalculatorEB + getPhiEB(amrex::Real t) const noexcept + { + return PhiCalculatorEB{t, potential_eb}; + } + + bool m_boundary_potential_specified = false; + + // set default potentials to zero in order for current tests to pass + // but forcing the user to specify a potential might be better + std::string potential_xlo_str = "0"; + std::string potential_xhi_str = "0"; + std::string potential_ylo_str = "0"; + std::string potential_yhi_str = "0"; + std::string potential_zlo_str = "0"; + std::string potential_zhi_str = "0"; + std::string potential_eb_str = "0"; + + amrex::ParserExecutor<1> potential_xlo; + amrex::ParserExecutor<1> potential_xhi; + amrex::ParserExecutor<1> potential_ylo; + amrex::ParserExecutor<1> potential_yhi; + amrex::ParserExecutor<1> potential_zlo; + amrex::ParserExecutor<1> potential_zhi; + amrex::ParserExecutor<1> potential_eb_t; + amrex::ParserExecutor<4> potential_eb; + + amrex::Array lobc, hibc; + std::array dirichlet_flag; + bool has_non_periodic = false; + bool phi_EB_only_t = true; + +private: + + amrex::Parser potential_xlo_parser; + amrex::Parser potential_xhi_parser; + amrex::Parser potential_ylo_parser; + amrex::Parser potential_yhi_parser; + amrex::Parser potential_zlo_parser; + amrex::Parser potential_zhi_parser; + amrex::Parser potential_eb_parser; +}; + +/** use amrex to directly calculate the electric field since with EB's the + * + * simple finite difference scheme in WarpX::computeE sometimes fails + */ +class EBCalcEfromPhiPerLevel { + private: + amrex::Vector< + amrex::Array + > m_e_field; + + public: + EBCalcEfromPhiPerLevel(amrex::Vector > e_field) + : m_e_field(std::move(e_field)) {} + + void operator()(amrex::MLMG & mlmg, int const lev) { + using namespace amrex::literals; + + mlmg.getGradSolution({m_e_field[lev]}); + for (auto &field: m_e_field[lev]) { + field->mult(-1._rt); + } + } +}; + +#endif // WARPX_BOUNDARYHANDLER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp new file mode 100644 index 00000000000..8afaf4c0587 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp @@ -0,0 +1,187 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#include "PoissonBoundaryHandler.H" + +using namespace amrex; + +PoissonBoundaryHandler::PoissonBoundaryHandler () +{ + ReadParameters(); + BuildParsers(); +} + +void PoissonBoundaryHandler::ReadParameters() +{ + // Parse the input file for domain boundary potentials + const ParmParse pp_boundary("boundary"); + + // Read potentials from input file + m_boundary_potential_specified |= pp_boundary.query("potential_lo_x", potential_xlo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_x", potential_xhi_str); + m_boundary_potential_specified |= pp_boundary.query("potential_lo_y", potential_ylo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_y", potential_yhi_str); + m_boundary_potential_specified |= pp_boundary.query("potential_lo_z", potential_zlo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_z", potential_zhi_str); + + const ParmParse pp_warpx("warpx"); + m_boundary_potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", potential_eb_str); + + if (m_boundary_potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { + ablastr::warn_manager::WMRecordWarning( + "Algorithms", + "The input script specifies the electric potential (phi) at the boundary, but \ + also uses the hybrid PIC solver based on Ohm’s law. When using this solver, the \ + electric potential does not have any impact on the simulation.", + ablastr::warn_manager::WarnPriority::low); + } + else if (m_boundary_potential_specified & (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None)) { + ablastr::warn_manager::WMRecordWarning( + "Algorithms", + "The input script specifies the electric potential (phi) at the boundary so \ + an initial Poisson solve will be performed.", + ablastr::warn_manager::WarnPriority::low); + } +} + +void PoissonBoundaryHandler::DefinePhiBCs (const amrex::Geometry& geom) +{ +#ifdef WARPX_DIM_RZ + if (geom.ProbLo(0) == 0){ + lobc[0] = LinOpBCType::Neumann; + dirichlet_flag[0] = false; + + // handle the r_max boundary explicitly + if (WarpX::field_boundary_hi[0] == FieldBoundaryType::PEC) { + hibc[0] = LinOpBCType::Dirichlet; + dirichlet_flag[1] = true; + } + else if (WarpX::field_boundary_hi[0] == FieldBoundaryType::Neumann) { + hibc[0] = LinOpBCType::Neumann; + dirichlet_flag[1] = false; + } + else { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, + "Field boundary condition at the outer radius must be either PEC or neumann " + "when using the electrostatic solver" + ); + } + } + const int dim_start = 1; +#else + const int dim_start = 0; + amrex::ignore_unused(geom); +#endif + for (int idim=dim_start; idim(); + potential_xhi = potential_xhi_parser.compile<1>(); + potential_ylo = potential_ylo_parser.compile<1>(); + potential_yhi = potential_yhi_parser.compile<1>(); + potential_zlo = potential_zlo_parser.compile<1>(); + potential_zhi = potential_zhi_parser.compile<1>(); + + BuildParsersEB(); +} + +void PoissonBoundaryHandler::BuildParsersEB () +{ + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"x", "y", "z", "t"}); + + // check if the EB potential is a function of space or only of time + const std::set eb_symbols = potential_eb_parser.symbols(); + if ((eb_symbols.count("x") != 0) || (eb_symbols.count("y") != 0) + || (eb_symbols.count("z") != 0)) { + potential_eb = potential_eb_parser.compile<4>(); + phi_EB_only_t = false; + } + else { + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"t"}); + potential_eb_t = potential_eb_parser.compile<1>(); + } +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H new file mode 100644 index 00000000000..70382d7ced5 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H @@ -0,0 +1,84 @@ + +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_RELATIVISTICEXPLICITES_H_ +#define WARPX_RELATIVISTICEXPLICITES_H_ + +#include "ElectrostaticSolver.H" +#include "Particles/WarpXParticleContainer.H" + + +class RelativisticExplicitES final : public ElectrostaticSolver +{ +public: + + RelativisticExplicitES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + } + + void InitData () override; + + /** + * \brief Computes electrostatic fields for species + * that have initialize self fields turned on. + * A loop over all the species is performed and for each species (with self fields) + * the function ``AddSpaceChargeField`` is called. + * This function computes the electrostatic potential for species charge denisyt as source + * and then the electric and magnetic fields are updated to include the + * corresponding fields from the electrostatic potential. + * Then electric and magnetic fields are updated to include potential variation + * due to boundary conditions using the function ``AddBoundaryField`` + * + * \param[in,unused] rho_fp A temporary multifab is used for species charge density + * \param[in,unused] rho_cp A temporary multifab is used to store species charge density on coarse patch + * \param[in] charge_buf Buffer region to synchronize charge density from fine and coarse patch + * \param[in,unused] phi_fp A temporary multifab is used to compute electrostatic potentail for each species + * \param[in] mpc Pointer to multi particle container to access species data + * \param[in,out] Efield_fp Field contribution from phi computed from each species' charge density is added + * \param[in,out] Bfield Field contribution from phi computed from each species' charge density is added + */ + void ComputeSpaceChargeField ( + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) override; + + /** + * Compute the charge density of the species paricle container, pc, + * and obtain the corresponding electrostatic potential to + * update the electric and magnetic fields. + * \param[in] charge_buf Multifab that stores buffer region to + * synchronize charge density on fine and coarse patch + * \param[in] pc particle container for the selected species + * \param[in] Efield Efield updated to include potential computed for selected species charge density as source + * \param[in] Bfield Bfield updated to include potential computed for selected species charge density as source + */ + void AddSpaceChargeField ( + amrex::Vector >& charge_buf, + WarpXParticleContainer& pc, + amrex::Vector, 3>>& Efield, + amrex::Vector, 3>>& Bfield + ); + + /** Compute the potential `phi` by solving the Poisson equation with the + simulation specific boundary conditions and boundary values, then add the + E field due to that `phi` to `Efield_fp`. + * \param[in] Efield Efield updated to include potential gradient from boundary condition + */ + void AddBoundaryField ( + amrex::Vector, 3>>& Efield + ); +}; + +#endif // WARPX_RELATIVISTICEXPLICITES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp new file mode 100644 index 00000000000..1660efd48c2 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp @@ -0,0 +1,168 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "WarpX.H" + +#include "RelativisticExplicitES.H" + +#include "Particles/MultiParticleContainer.H" +#include "Particles/WarpXParticleContainer.H" + +using namespace amrex; + +void RelativisticExplicitES::InitData () { + auto & warpx = WarpX::GetInstance(); + bool prepare_field_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + // check if any of the particle containers have initialize_self_fields = True + for (auto const& species : warpx.GetPartContainer()) { + prepare_field_solve |= species->initialize_self_fields; + } + prepare_field_solve |= m_poisson_boundary_handler->m_boundary_potential_specified; + + if (prepare_field_solve) { + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); + } +} + +void RelativisticExplicitES::ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp +) { + WARPX_PROFILE("RelativisticExplicitES::ComputeSpaceChargeField"); + amrex::ignore_unused(rho_fp, rho_cp, phi_fp, mfl); + + const bool always_run_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + + // Loop over the species and add their space-charge contribution to E and B. + // Note that the fields calculated here does not include the E field + // due to simulation boundary potentials + for (auto const& species : mpc) { + if (always_run_solve || (species->initialize_self_fields)) { + AddSpaceChargeField(charge_buf, *species, Efield_fp, Bfield_fp); + } + } + + // Add the field due to the boundary potentials + if (always_run_solve || (m_poisson_boundary_handler->m_boundary_potential_specified)) + { + AddBoundaryField(Efield_fp); + } +} + +void RelativisticExplicitES::AddSpaceChargeField ( + amrex::Vector >& charge_buf, + WarpXParticleContainer& pc, + amrex::Vector, 3>>& Efield_fp, + amrex::Vector, 3>>& Bfield_fp) +{ + WARPX_PROFILE("RelativisticExplicitES::AddSpaceChargeField"); + + if (pc.getCharge() == 0) { return; } + +#ifdef WARPX_DIM_RZ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes == 1, + "Error: RZ electrostatic only implemented for a single mode"); +#endif + + auto & warpx = WarpX::GetInstance(); + + // Allocate fields for charge and potential + Vector > rho(num_levels); + Vector > rho_coarse(num_levels); // Used in order to interpolate between levels + Vector > phi(num_levels); + // Use number of guard cells used for local deposition of rho + const amrex::IntVect ng = warpx.get_ng_depos_rho(); + for (int lev = 0; lev < num_levels; lev++) { + BoxArray nba = warpx.boxArray(lev); + nba.surroundingNodes(); + rho[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, ng); + rho[lev]->setVal(0.); + phi[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, 1); + phi[lev]->setVal(0.); + if (lev > 0) { + // For MR levels: allocated the coarsened version of rho + BoxArray cba = nba; + cba.coarsen(warpx.refRatio(lev-1)); + rho_coarse[lev] = std::make_unique(cba, warpx.DistributionMap(lev), 1, ng); + rho_coarse[lev]->setVal(0.); + if (charge_buf[lev]) { + charge_buf[lev]->setVal(0.); + } + } + } + // Deposit particle charge density (source of Poisson solver) + // The options below are identical to those in MultiParticleContainer::DepositCharge + bool const local = true; + bool const reset = false; + bool const apply_boundary_and_scale_volume = true; + bool const interpolate_across_levels = false; + if ( !pc.do_not_deposit) { + pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, + interpolate_across_levels); + } + warpx.SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + + // Get the particle beta vector + bool const local_average = false; // Average across all MPI ranks + std::array beta_pr = pc.meanParticleVelocity(local_average); + std::array beta; + for (int i=0 ; i < static_cast(beta.size()) ; i++) { + beta[i] = beta_pr[i]/PhysConst::c; // Normalize + } + + // Compute the potential phi, by solving the Poisson equation + computePhi( rho, phi, beta, pc.self_fields_required_precision, + pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, + pc.self_fields_verbosity ); + + // Compute the corresponding electric and magnetic field, from the potential phi + computeE( Efield_fp, phi, beta ); + computeB( Bfield_fp, phi, beta ); + +} + +void RelativisticExplicitES::AddBoundaryField (amrex::Vector, 3>>& Efield_fp) +{ + WARPX_PROFILE("RelativisticExplicitES::AddBoundaryField"); + + auto & warpx = WarpX::GetInstance(); + + // Allocate fields for charge and potential + amrex::Vector > rho(num_levels); + amrex::Vector > phi(num_levels); + // Use number of guard cells used for local deposition of rho + const amrex::IntVect ng = warpx.get_ng_depos_rho(); + for (int lev = 0; lev < num_levels; lev++) { + BoxArray nba = warpx.boxArray(lev); + nba.surroundingNodes(); + rho[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, ng); + rho[lev]->setVal(0.); + phi[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, 1); + phi[lev]->setVal(0.); + } + + // Set the boundary potentials appropriately + setPhiBC(phi, warpx.gett_new(0)); + + // beta is zero for boundaries + const std::array beta = {0._rt}; + + // Compute the potential phi, by solving the Poisson equation + computePhi( rho, phi, beta, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity ); + + // Compute the corresponding electric field, from the potential phi. + computeE( Efield_fp, phi, beta ); +} diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 4ae988b9d10..031bc915afc 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -116,7 +116,12 @@ WarpX::AddMagnetostaticFieldLabFrame() WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsPythonCallbackInstalled("poissonsolver"), "Python Level Poisson Solve not supported for Magnetostatic implementation."); - const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; + // const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; + // temporary fix!!! + const amrex::Real magnetostatic_absolute_tolerance = 0.0; + const amrex::Real self_fields_required_precision = 1e-12; + const int self_fields_max_iters = 200; + const int self_fields_verbosity = 2; computeVectorPotential( current_fp, vector_potential_fp_nodal, self_fields_required_precision, magnetostatic_absolute_tolerance, self_fields_max_iters, diff --git a/Source/FieldSolver/Make.package b/Source/FieldSolver/Make.package index a8af4c2de97..4c7c41f6f8e 100644 --- a/Source/FieldSolver/Make.package +++ b/Source/FieldSolver/Make.package @@ -2,9 +2,11 @@ CEXE_sources += WarpXPushFieldsEM.cpp CEXE_sources += WarpXPushFieldsHybridPIC.cpp CEXE_sources += ElectrostaticSolver.cpp CEXE_sources += WarpX_QED_Field_Pushers.cpp +CEXE_sources += WarpXSolveFieldsES.cpp ifeq ($(USE_FFT),TRUE) include $(WARPX_HOME)/Source/FieldSolver/SpectralSolver/Make.package endif +include $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers/Make.package include $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/Make.package include $(WARPX_HOME)/Source/FieldSolver/MagnetostaticSolver/Make.package include $(WARPX_HOME)/Source/FieldSolver/ImplicitSolvers/Make.package diff --git a/Source/FieldSolver/WarpXSolveFieldsES.cpp b/Source/FieldSolver/WarpXSolveFieldsES.cpp new file mode 100644 index 00000000000..42a537b5c2a --- /dev/null +++ b/Source/FieldSolver/WarpXSolveFieldsES.cpp @@ -0,0 +1,30 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" +#include "Utils/WarpXProfilerWrapper.H" +#include "WarpX.H" + +void WarpX::ComputeSpaceChargeField (bool const reset_fields) +{ + WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); + if (reset_fields) { + // Reset all E and B fields to 0, before calculating space-charge fields + WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); + for (int lev = 0; lev <= max_level; lev++) { + for (int comp=0; comp<3; comp++) { + Efield_fp[lev][comp]->setVal(0); + Bfield_fp[lev][comp]->setVal(0); + } + } + } + + m_electrostatic_solver->ComputeSpaceChargeField( + rho_fp, rho_cp, charge_buf, phi_fp, *mypc, myfl.get(), Efield_fp, Bfield_fp + ); +} diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 49b0d439c50..0cf9496e63e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -18,6 +18,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Filter/BilinearFilter.H" @@ -551,6 +552,8 @@ WarpX::InitData () ); } + m_electrostatic_solver->InitData(); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { m_hybrid_pic_model->InitData(); } diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 0617b36a273..23af57b9206 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1030,7 +1030,6 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int Gpu::DeviceVector counts(overlap_box.numPts(), 0); Gpu::DeviceVector offset(overlap_box.numPts()); auto *pcounts = counts.data(); - const amrex::IntVect lrrfac = rrfac; Box fine_overlap_box; // default Box is NOT ok(). if (refine_injection) { fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); @@ -1048,7 +1047,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int { auto index = overlap_box.index(iv); const amrex::Long r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv))? - (AMREX_D_TERM(lrrfac[0],*lrrfac[1],*lrrfac[2])) : (1); + (AMREX_D_TERM(rrfac[0],*rrfac[1],*rrfac[2])) : (1); pcounts[index] = num_ppc*r; // update pcount by checking if cell-corners or cell-center // has non-zero density @@ -1154,8 +1153,8 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int long ip = poffset[index] + i_part; pa_idcpu[ip] = amrex::SetParticleIDandCPU(pid+ip, cpuid); const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `lrrfac` - inj_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // In the refined injection region: use refinement ratio `rrfac` + inj_pos->getPositionUnitBox(i_part, rrfac, engine) : // Otherwise: use 1 as the refinement ratio inj_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); @@ -1441,7 +1440,6 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, Gpu::DeviceVector counts(overlap_box.numPts(), 0); Gpu::DeviceVector offset(overlap_box.numPts()); auto *pcounts = counts.data(); - const amrex::IntVect lrrfac = rrfac; const int flux_normal_axis = plasma_injector.flux_normal_axis; Box fine_overlap_box; // default Box is NOT ok(). if (refine_injection) { @@ -1450,23 +1448,21 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv(AMREX_D_DECL(i, j, k)); + amrex::ignore_unused(j,k); + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - const int num_ppc_int = static_cast(num_ppc_real + amrex::Random(engine)); - if (flux_pos->overlapsWith(lo, hi)) { auto index = overlap_box.index(iv); - int r; + int r = 1; if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(lrrfac, flux_normal_axis); - } else { - r = 1; + r = compute_area_weights(rrfac, flux_normal_axis); } - pcounts[index] = num_ppc_int*r; + const int num_ppc_int = static_cast(num_ppc_real*r + amrex::Random(engine)); + pcounts[index] = num_ppc_int; } - amrex::ignore_unused(j,k); }); // Max number of new particles. All of them are created, @@ -1533,24 +1529,14 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv = IntVect(AMREX_D_DECL(i, j, k)); + amrex::ignore_unused(j,k); const auto index = overlap_box.index(iv); Real scale_fac = compute_scale_fac_area(dx, num_ppc_real, flux_normal_axis); - auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); - auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - - if (flux_pos->overlapsWith(lo, hi)) - { - int r; - if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(lrrfac, flux_normal_axis); - } else { - r = 1; - } - scale_fac /= r; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + scale_fac /= compute_area_weights(rrfac, flux_normal_axis); } - amrex::ignore_unused(j,k); for (int i_part = 0; i_part < pcounts[index]; ++i_part) { @@ -1559,8 +1545,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // This assumes the flux_pos is of type InjectorPositionRandomPlane const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `lrrfac` - flux_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // In the refined injection region: use refinement ratio `rrfac` + flux_pos->getPositionUnitBox(i_part, rrfac, engine) : // Otherwise: use 1 as the refinement ratio flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index e583d42c49c..2689b3115fa 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -36,7 +36,7 @@ #include #include #include - +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include #include #include @@ -176,13 +176,13 @@ The physical fields in WarpX have the following naming: std::string potential_lo_y, std::string potential_hi_y, std::string potential_lo_z, std::string potential_hi_z) { - if (potential_lo_x != "") wx.m_poisson_boundary_handler.potential_xlo_str = potential_lo_x; - if (potential_hi_x != "") wx.m_poisson_boundary_handler.potential_xhi_str = potential_hi_x; - if (potential_lo_y != "") wx.m_poisson_boundary_handler.potential_ylo_str = potential_lo_y; - if (potential_hi_y != "") wx.m_poisson_boundary_handler.potential_yhi_str = potential_hi_y; - if (potential_lo_z != "") wx.m_poisson_boundary_handler.potential_zlo_str = potential_lo_z; - if (potential_hi_z != "") wx.m_poisson_boundary_handler.potential_zhi_str = potential_hi_z; - wx.m_poisson_boundary_handler.buildParsers(); + if (potential_lo_x != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_xlo_str = potential_lo_x; + if (potential_hi_x != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_xhi_str = potential_hi_x; + if (potential_lo_y != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_ylo_str = potential_lo_y; + if (potential_hi_y != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_yhi_str = potential_hi_y; + if (potential_lo_z != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_zlo_str = potential_lo_z; + if (potential_hi_z != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_zhi_str = potential_hi_z; + wx.GetElectrostaticSolver().m_poisson_boundary_handler->BuildParsers(); }, py::arg("potential_lo_x") = "", py::arg("potential_hi_x") = "", @@ -194,7 +194,7 @@ The physical fields in WarpX have the following naming: ) .def("set_potential_on_eb", [](WarpX& wx, std::string potential) { - wx.m_poisson_boundary_handler.setPotentialEB(potential); + wx.GetElectrostaticSolver().m_poisson_boundary_handler->setPotentialEB(potential); }, py::arg("potential"), "Sets the EB potential string and updates the function parser." diff --git a/Source/WarpX.H b/Source/WarpX.H index 912d87f986c..6bb3035e0d2 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -16,6 +16,7 @@ #include "Diagnostics/MultiDiagnostics_fwd.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H" #include "EmbeddedBoundary/WarpXFaceInfoBox_fwd.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel_fwd.H" @@ -39,7 +40,6 @@ #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" #include "FieldSolver/Fields.H" -#include "FieldSolver/ElectrostaticSolver.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "FieldSolver/ImplicitSolvers/ImplicitSolver.H" #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" @@ -157,6 +157,7 @@ public: MultiParticleContainer& GetPartContainer () { return *mypc; } MultiFluidContainer& GetFluidContainer () { return *myfl; } MacroscopicProperties& GetMacroscopicProperties () { return *m_macroscopic_properties; } + ElectrostaticSolver& GetElectrostaticSolver () {return *m_electrostatic_solver;} HybridPICModel& GetHybridPICModel () { return *m_hybrid_pic_model; } [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} @@ -972,12 +973,6 @@ public: static inline auto electrostatic_solver_id = ElectrostaticSolverAlgo::Default; static inline auto poisson_solver_id = PoissonSolverAlgo::Default; - // Parameters for lab frame electrostatic - static amrex::Real self_fields_required_precision; - static amrex::Real self_fields_absolute_tolerance; - static int self_fields_max_iters; - static int self_fields_verbosity; - static int do_moving_window; // boolean static int start_moving_window_step; // the first step to move window static int end_moving_window_step; // the last step to move window @@ -1024,31 +1019,8 @@ public: */ [[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;} - /** Enable embedded boundaries */ - bool m_boundary_potential_specified = false; - ElectrostaticSolver::PoissonBoundaryHandler m_poisson_boundary_handler; + /** Electrostatic solve call */ void ComputeSpaceChargeField (bool reset_fields); - void AddBoundaryField (); - void AddSpaceChargeField (WarpXParticleContainer& pc); - void AddSpaceChargeFieldLabFrame (); - void computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array beta = {{0,0,0}}, - amrex::Real required_precision=amrex::Real(1.e-11), - amrex::Real absolute_tolerance=amrex::Real(0.0), - int max_iters=200, - int verbosity=2) const; - - void setPhiBC (amrex::Vector >& phi ) const; - - void computeE (amrex::Vector, 3> >& E, - const amrex::Vector >& phi, - std::array beta = {{0,0,0}} ) const; - void computeB (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array beta = {{0,0,0}} ) const; - void computePhiTriDiagonal (const amrex::Vector >& rho, - amrex::Vector >& phi) const; // Magnetostatic Solver Interface MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; @@ -1691,6 +1663,9 @@ private: // Macroscopic properties std::unique_ptr m_macroscopic_properties; + // Electrostatic solver + std::unique_ptr m_electrostatic_solver; + // Hybrid PIC algorithm parameters std::unique_ptr m_hybrid_pic_model; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 2e9befba992..ef1668de4c0 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -16,6 +16,9 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" +#include "FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H" +#include "FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -174,11 +177,6 @@ amrex::IntVect WarpX::sort_idx_type(AMREX_D_DECL(0,0,0)); bool WarpX::do_dynamic_scheduling = true; -Real WarpX::self_fields_required_precision = 1.e-11_rt; -Real WarpX::self_fields_absolute_tolerance = 0.0_rt; -int WarpX::self_fields_max_iters = 200; -int WarpX::self_fields_verbosity = 2; - bool WarpX::do_subcycling = false; bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; @@ -356,6 +354,17 @@ WarpX::WarpX () current_fp_vay.resize(nlevs_max); } + // Create Electrostatic Solver object if needed + if ((WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) + || (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic)) + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } + else + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { // Create hybrid-PIC model object if needed @@ -733,20 +742,6 @@ WarpX::ReadParameters () electromagnetic_solver_id = ElectromagneticSolverAlgo::None; } - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - // Note that with the relativistic version, these parameters would be - // input for each species. - utils::parser::queryWithParser( - pp_warpx, "self_fields_required_precision", self_fields_required_precision); - utils::parser::queryWithParser( - pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); - utils::parser::queryWithParser( - pp_warpx, "self_fields_max_iters", self_fields_max_iters); - pp_warpx.query("self_fields_verbosity", self_fields_verbosity); - } - pp_warpx.query_enum_sloppy("poisson_solver", poisson_solver_id, "-_"); #ifndef WARPX_DIM_3D WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -767,7 +762,7 @@ WarpX::ReadParameters () "The FFT Poisson solver is not implemented in labframe-electromagnetostatic mode yet." ); - bool const eb_enabled = EB::enabled(); + [[maybe_unused]] bool const eb_enabled = EB::enabled(); #if !defined(AMREX_USE_EB) WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !eb_enabled, @@ -775,38 +770,8 @@ WarpX::ReadParameters () ); #endif - // Parse the input file for domain boundary potentials - const ParmParse pp_boundary("boundary"); - bool potential_specified = false; - // When reading the potential at the boundary from the input file, set this flag to true if any of the potential is specified - potential_specified |= pp_boundary.query("potential_lo_x", m_poisson_boundary_handler.potential_xlo_str); - potential_specified |= pp_boundary.query("potential_hi_x", m_poisson_boundary_handler.potential_xhi_str); - potential_specified |= pp_boundary.query("potential_lo_y", m_poisson_boundary_handler.potential_ylo_str); - potential_specified |= pp_boundary.query("potential_hi_y", m_poisson_boundary_handler.potential_yhi_str); - potential_specified |= pp_boundary.query("potential_lo_z", m_poisson_boundary_handler.potential_zlo_str); - potential_specified |= pp_boundary.query("potential_hi_z", m_poisson_boundary_handler.potential_zhi_str); - if (eb_enabled) { - potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); - } - m_boundary_potential_specified = potential_specified; - if (potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { - ablastr::warn_manager::WMRecordWarning( - "Algorithms", - "The input script specifies the electric potential (phi) at the boundary, but \ - also uses the hybrid PIC solver based on Ohm’s law. When using this solver, the \ - electric potential does not have any impact on the simulation.", - ablastr::warn_manager::WarnPriority::low); - } - else if (potential_specified & (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None)) { - ablastr::warn_manager::WMRecordWarning( - "Algorithms", - "The input script specifies the electric potential (phi) at the boundary so \ - an initial Poisson solve will be performed.", - ablastr::warn_manager::WarnPriority::low); - } - - m_poisson_boundary_handler.buildParsers(); #ifdef WARPX_DIM_RZ + const ParmParse pp_boundary("boundary"); pp_boundary.query("verboncoeur_axis_correction", verboncoeur_axis_correction); #endif diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index fbd8e5f14be..c36c83bc336 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -57,6 +57,101 @@ namespace ablastr::fields { +/** Compute the L-infinity norm of the charge density `rho` across all MR levels + * to determine if `rho` is zero everywhere + * + * \param[in] rho The charge density a given species + * \param[in] finest_level The most refined mesh refinement level + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[out] max_norm_rho The maximum L-infinity norm of `rho` across all levels + */ +inline amrex::Real getMaxNormRho ( + amrex::Vector const & rho, + int finest_level, + amrex::Real & absolute_tolerance) +{ + amrex::Real max_norm_rho = 0.0; + for (int lev=0; lev<=finest_level; lev++) { + max_norm_rho = amrex::max(max_norm_rho, rho[lev]->norm0()); + } + amrex::ParallelDescriptor::ReduceRealMax(max_norm_rho); + + if (max_norm_rho == 0) { + if (absolute_tolerance == 0.0) { absolute_tolerance = amrex::Real(1e-6); } + ablastr::warn_manager::WMRecordWarning( + "ElectrostaticSolver", + "Max norm of rho is 0", + ablastr::warn_manager::WarnPriority::low + ); + } + return max_norm_rho; +} + +/** Interpolate the potential `phi` from level `lev` to `lev+1` + * + * Needed to solve Poisson equation on `lev+1` + * The coarser level `lev` provides both + * the boundary values and initial guess for `phi` + * on the finer level `lev+1` + * + * \param[in] phi_lev The potential on a given mesh refinement level `lev` + * \param[in] phi_lev_plus_one The potential on the next level `lev+1` + * \param[in] geom_lev The geometry of level `lev` + * \param[in] do_single_precision_comms perform communications in single precision + * \param[in] refratio mesh refinement ratio between level `lev` and `lev+1` + * \param[in] ncomp Number of components of the multifab (1) + * \param[in] ng Number of ghost cells (1 if collocated, 0 otherwise) + */ +inline void interpolatePhiBetweenLevels ( + amrex::MultiFab const* phi_lev, + amrex::MultiFab* phi_lev_plus_one, + amrex::Geometry const & geom_lev, + bool do_single_precision_comms, + const amrex::IntVect& refratio, + const int ncomp, + const int ng) +{ + using namespace amrex::literals; + + // Allocate phi_cp for lev+1 + amrex::BoxArray ba = phi_lev_plus_one->boxArray(); + ba.coarsen(refratio); + amrex::MultiFab phi_cp(ba, phi_lev_plus_one->DistributionMap(), ncomp, ng); + if (ng > 0) { + // Set all values outside the domain to zero + phi_cp.setDomainBndry(0.0_rt, geom_lev); + } + + // Copy from phi[lev] to phi_cp (in parallel) + const amrex::Periodicity& crse_period = geom_lev.periodicity(); + + ablastr::utils::communication::ParallelCopy( + phi_cp, + *phi_lev, + 0, + 0, + 1, + amrex::IntVect(0), + amrex::IntVect(ng), + do_single_precision_comms, + crse_period + ); + + // Local interpolation from phi_cp to phi[lev+1] +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*phi_lev_plus_one, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + amrex::Array4 const phi_fp_arr = phi_lev_plus_one->array(mfi); + amrex::Array4 const phi_cp_arr = phi_cp.array(mfi); + + details::PoissonInterpCPtoFP const interp(phi_fp_arr, phi_cp_arr, refratio); + + amrex::Box const& b = mfi.growntilebox(ng); + amrex::ParallelFor(b, interp); + } +} + /** Compute the potential `phi` by solving the Poisson equation * * Uses `rho` as a source, assuming that the source moves at a @@ -121,8 +216,10 @@ computePhi (amrex::Vector const & rho, ABLASTR_PROFILE("computePhi"); + auto const finest_level = static_cast(rho.size() - 1); + if (!rel_ref_ratio.has_value()) { - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(rho.size() == 1u, + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(finest_level == 0u, "rel_ref_ratio must be set if mesh-refinement is used"); rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } @@ -132,30 +229,18 @@ computePhi (amrex::Vector const & rho, "Embedded boundary solve requested but not compiled in"); #endif - auto const finest_level = static_cast(rho.size() - 1); - - // determine if rho is zero everywhere - amrex::Real max_norm_b = 0.0; - for (int lev=0; lev<=finest_level; lev++) { - max_norm_b = amrex::max(max_norm_b, rho[lev]->norm0()); - } - amrex::ParallelDescriptor::ReduceRealMax(max_norm_b); - - const bool always_use_bnorm = (max_norm_b > 0); - if (!always_use_bnorm) { - if (absolute_tolerance == 0.0) { absolute_tolerance = amrex::Real(1e-6); } - ablastr::warn_manager::WMRecordWarning( - "ElectrostaticSolver", - "Max norm of rho is 0", - ablastr::warn_manager::WarnPriority::low - ); - } +#if !defined(ABLASTR_USE_FFT) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "Must compile with FFT support to use the IGF solver!"); +#endif - amrex::LPInfo info; +#if !defined(WARPX_DIM_3D) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "The FFT Poisson solver is currently only implemented for 3D!"); +#endif - for (int lev=0; lev<=finest_level; lev++) { - // Set the value of beta - amrex::Array beta_solver = + // Set the value of beta + amrex::Array beta_solver = #if defined(WARPX_DIM_1D_Z) {{ beta[2] }}; // beta_x and beta_z #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) @@ -164,36 +249,31 @@ computePhi (amrex::Vector const & rho, {{ beta[0], beta[1], beta[2] }}; #endif -#if !defined(ABLASTR_USE_FFT) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "Must compile with FFT support to use the IGF solver!"); -#endif + // determine if rho is zero everywhere + const amrex::Real max_norm_b = getMaxNormRho(rho, finest_level, absolute_tolerance); -#if !defined(WARPX_DIM_3D) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "The FFT Poisson solver is currently only implemented for 3D!"); -#endif + amrex::LPInfo info; + + for (int lev=0; lev<=finest_level; lev++) { + amrex::Array const dx_scaled + {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), + geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), + geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; #if (defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D)) // Use the Integrated Green Function solver (FFT) on the coarsest level if it was selected if(is_solver_igf_on_lev0 && lev==0){ - amrex::Array const dx_igf - {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), - geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), - geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; if ( max_norm_b == 0 ) { phi[lev]->setVal(0); } else { - computePhiIGF( *rho[lev], *phi[lev], dx_igf, grids[lev] ); + computePhiIGF( *rho[lev], *phi[lev], dx_scaled, grids[lev] ); } continue; } #endif - // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately - using namespace ablastr::constant::SI; - rho[lev]->mult(-1._rt / ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! #ifdef WARPX_DIM_RZ constexpr bool is_rz = true; @@ -203,10 +283,6 @@ computePhi (amrex::Vector const & rho, if (!eb_enabled && !is_rz) { // Determine whether to use semi-coarsening - amrex::Array dx_scaled - {AMREX_D_DECL(geom[lev].CellSize(0) / std::sqrt(1._rt - beta_solver[0] * beta_solver[0]), - geom[lev].CellSize(1) / std::sqrt(1._rt - beta_solver[1] * beta_solver[1]), - geom[lev].CellSize(2) / std::sqrt(1._rt - beta_solver[2] * beta_solver[2]))}; int max_semicoarsening_level = 0; int semicoarsening_direction = -1; const auto min_dir = static_cast(std::distance(dx_scaled.begin(), @@ -263,7 +339,6 @@ computePhi (amrex::Vector const & rho, 1._rt-beta_solver[1]*beta_solver[1], 1._rt-beta_solver[2]*beta_solver[2])}); #endif - #if defined(AMREX_USE_EB) if (eb_enabled) { // if the EB potential only depends on time, the potential can be passed @@ -295,8 +370,10 @@ computePhi (amrex::Vector const & rho, amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); - mlmg.setAlwaysUseBNorm(always_use_bnorm); - if (grid_type == utils::enums::GridType::Collocated) { + mlmg.setAlwaysUseBNorm((max_norm_b > 0)); + + const int ng = int(grid_type == utils::enums::GridType::Collocated); // ghost cells + if (ng) { // In this case, computeE needs to use ghost nodes data. So we // ask MLMG to fill BC for us after it solves the problem. mlmg.setFinalFillBC(true); @@ -306,54 +383,22 @@ computePhi (amrex::Vector const & rho, mlmg.solve( {phi[lev]}, {rho[lev]}, relative_tolerance, absolute_tolerance ); + const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; + const int ncomp = linop->getNComp(); + // needed for solving the levels by levels: // - coarser level is initial guess for finer level // - coarser level provides boundary values for finer level patch // Interpolation from phi[lev] to phi[lev+1] // (This provides both the boundary conditions and initial guess for phi[lev+1]) if (lev < finest_level) { - - // Allocate phi_cp for lev+1 - amrex::BoxArray ba = phi[lev+1]->boxArray(); - const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; - ba.coarsen(refratio); - const int ncomp = linop->getNComp(); - const int ng = (grid_type == utils::enums::GridType::Collocated) ? 1 : 0; - amrex::MultiFab phi_cp(ba, phi[lev+1]->DistributionMap(), ncomp, ng); - if (ng > 0) { - // Set all values outside the domain to zero - phi_cp.setDomainBndry(0.0_rt, geom[lev]); - } - - // Copy from phi[lev] to phi_cp (in parallel) - const amrex::Periodicity& crse_period = geom[lev].periodicity(); - - ablastr::utils::communication::ParallelCopy( - phi_cp, - *phi[lev], - 0, - 0, - 1, - amrex::IntVect(0), - amrex::IntVect(ng), - do_single_precision_comms, - crse_period - ); - - // Local interpolation from phi_cp to phi[lev+1] -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for (amrex::MFIter mfi(*phi[lev + 1], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Array4 const phi_fp_arr = phi[lev + 1]->array(mfi); - amrex::Array4 const phi_cp_arr = phi_cp.array(mfi); - - details::PoissonInterpCPtoFP const interp(phi_fp_arr, phi_cp_arr, refratio); - - amrex::Box const& b = mfi.growntilebox(ng); - amrex::ParallelFor(b, interp); - } - + interpolatePhiBetweenLevels(phi[lev], + phi[lev+1], + geom[lev], + do_single_precision_comms, + refratio, + ncomp, + ng); } // Run additional operations, such as calculation of the E field for embedded boundaries @@ -362,10 +407,8 @@ computePhi (amrex::Vector const & rho, post_phi_calculation.value()(mlmg, lev); } } - } // loop over lev(els) -} - +} // computePhi } // namespace ablastr::fields #endif // ABLASTR_POISSON_SOLVER_H