diff --git a/.codespell-ignore-words b/.codespell-ignore-words new file mode 100644 index 000000000..76aa59b16 --- /dev/null +++ b/.codespell-ignore-words @@ -0,0 +1,18 @@ +blocs +bloc +inout +pres +bion +tye +delt +thi +daa +numer +clen +coul +dum +crate +vie +fromm +nd +hsi diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 000000000..81dac9e4a --- /dev/null +++ b/.codespellrc @@ -0,0 +1,5 @@ +[codespell] +skip = .git,*.ipynb,*.bib,*.ps,*~,periodic_table.list,track-enucdot-profile +ignore-words = .codespell-ignore-words + + diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..031a8eeb5 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,39 @@ +name: codespell + +on: + push: + branches: + - development + - main + pull_request: + branches: + - development + +jobs: + codespell: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Cache pip + uses: actions/cache@v3 + with: + # this path is specific to Ubuntu + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: pip install codespell + + - name: Run codespell + run: | + codespell + diff --git a/CITATION b/CITATION index 175a5b1a4..25853dbe8 100644 --- a/CITATION +++ b/CITATION @@ -1,6 +1,6 @@ MAESTROeX derives from MAESTRO. When citing the code, please cite the -two primary alrogithm papers below as well as any of the MAESTRO papers appropriate -to the discussion. +two primary algorithm papers below as well as any of the MAESTRO +papers appropriate to the discussion. @ARTICLE{2019ApJ...887..212F, author = {{Fan}, Duoming and {Nonaka}, Andrew and {Almgren}, Ann S. and diff --git a/Exec/science/README b/Exec/science/README index 49d976d1a..388e95dd3 100644 --- a/Exec/science/README +++ b/Exec/science/README @@ -19,7 +19,7 @@ flame/ sub_chandra/ - Surpise! We're modelling convection. This time in 3D spherical + Surprise! We're modelling convection. This time in 3D spherical in the helium shell on the surface of a sub-Chandrasekhar mass carbon/oxygen white dwarf. Such systems can potentially yield a variety of explosive outcomes. This setup is the basis for diff --git a/Exec/science/code_comp/MaestroBaseState.cpp b/Exec/science/code_comp/MaestroBaseState.cpp index a6b29e107..0eabe3cdc 100644 --- a/Exec/science/code_comp/MaestroBaseState.cpp +++ b/Exec/science/code_comp/MaestroBaseState.cpp @@ -99,7 +99,7 @@ void Maestro::InitBaseState(BaseState& rho0, BaseState& rhoh0, // do HSE using RK2 - // out intergration starts at y - h + // out integration starts at y - h Real h = r == 0 ? base_geom.dr(n) * 0.5 : base_geom.dr(n); auto k = dUdy(y - h, U_old); diff --git a/Exec/science/rotating_star/ModelParser.cpp b/Exec/science/rotating_star/ModelParser.cpp index 154049560..af86a7824 100644 --- a/Exec/science/rotating_star/ModelParser.cpp +++ b/Exec/science/rotating_star/ModelParser.cpp @@ -33,7 +33,7 @@ void ModelParser::ReadFile(const std::string& model_file_name) { varnames_stored[i] = maestro::trim(line.substr(ipos)); } - // alocate storage for the model data + // allocate storage for the model data model_state.resize(npts_model); for (auto i = 0; i < npts_model; ++i) { model_state[i].resize(nvars_model); diff --git a/Exec/science/urca/analysis/fad_excess.f90 b/Exec/science/urca/analysis/fad_excess.f90 index 7214053ca..787b61262 100644 --- a/Exec/science/urca/analysis/fad_excess.f90 +++ b/Exec/science/urca/analysis/fad_excess.f90 @@ -68,7 +68,7 @@ program fad_excess plot_name(1) = "adiabatic excess" - ! parse the arguements + ! parse the arguments narg = command_argument_count() farg = 1 diff --git a/Exec/science/urca/analysis/fconv_radial.f90 b/Exec/science/urca/analysis/fconv_radial.f90 index be7dbe10b..e6fe4ba7d 100644 --- a/Exec/science/urca/analysis/fconv_radial.f90 +++ b/Exec/science/urca/analysis/fconv_radial.f90 @@ -86,7 +86,7 @@ program fconv_radial component_names(2) = "adiabatic gradient" component_names(3) = "ledoux gradient" - ! parse the arguements + ! parse the arguments narg = command_argument_count() farg = 1 diff --git a/Exec/science/urca/analysis/fneutrinos.f90 b/Exec/science/urca/analysis/fneutrinos.f90 index f49aea764..986be3ed3 100644 --- a/Exec/science/urca/analysis/fneutrinos.f90 +++ b/Exec/science/urca/analysis/fneutrinos.f90 @@ -108,7 +108,7 @@ program fneutrinos dx = plotfile_get_dx(pf, 1) ! get the index bounds for the finest level. - ! Note, lo and hi are ZERO-based indicies + ! Note, lo and hi are ZERO-based indices flo = lwb(plotfile_get_pd_box(pf, pf%flevel)) fhi = upb(plotfile_get_pd_box(pf, pf%flevel)) diff --git a/Exec/science/urca/analysis/probin.f90 b/Exec/science/urca/analysis/probin.f90 index fdbceab54..15eb3d6ac 100644 --- a/Exec/science/urca/analysis/probin.f90 +++ b/Exec/science/urca/analysis/probin.f90 @@ -3,7 +3,7 @@ ! This file is automatically generated by write_probin.py at ! compile-time. ! -! To add a runtime parameter, do so by editting the appropriate _parameters +! To add a runtime parameter, do so by editing the appropriate _parameters ! file. ! This module stores the runtime parameters. The probin_init() routine is diff --git a/Exec/science/urca/analysis/scripts/slice-convgrad b/Exec/science/urca/analysis/scripts/slice-convgrad index 6d966e881..0527df6f2 100755 --- a/Exec/science/urca/analysis/scripts/slice-convgrad +++ b/Exec/science/urca/analysis/scripts/slice-convgrad @@ -1,8 +1,9 @@ #!/usr/bin/env python +import argparse + +import numpy as np import yt from yt import derived_field -import numpy as np -import argparse parser = argparse.ArgumentParser() parser.add_argument('infile', type=str, help='Name of input plotfile.') @@ -15,7 +16,7 @@ parser.add_argument('-sign', '--sign', action='store_true', help='If supplied, p parser.add_argument('-fmin', '--field_min', type=float, help='Minimum scale for colorbar.') parser.add_argument('-fmax', '--field_max', type=float, help='Maximum scale for colorbar.') parser.add_argument('-log', '--logscale', action='store_true', help='If supplied, use a log scale for the field.') -parser.add_argument('-symlog', '--symlog', action='store_true', help='If supplied, use symlog scaling, which is linear near zero, to accomodate positive and negative values.') +parser.add_argument('-symlog', '--symlog', action='store_true', help='If supplied, use symlog scaling, which is linear near zero, to accommodate positive and negative values.') parser.add_argument('-linthresh', '--linthresh', type=float, help='Linear threshold for symlog scaling. (Default is 0.1)') parser.add_argument('-dc', '--drawcells', action='store_true', help='If supplied, draw the cell edges.') parser.add_argument('-dg', '--drawgrids', action='store_true', help='If supplied, draw the grids.') diff --git a/Exec/science/urca/analysis/scripts/slice-enucdot b/Exec/science/urca/analysis/scripts/slice-enucdot index d5c50d9ed..5d2f7677a 100755 --- a/Exec/science/urca/analysis/scripts/slice-enucdot +++ b/Exec/science/urca/analysis/scripts/slice-enucdot @@ -1,15 +1,16 @@ #!/usr/bin/env python +import argparse + +import numpy as np import yt from yt import derived_field -import numpy as np -import argparse parser = argparse.ArgumentParser() parser.add_argument('infile', type=str, help='Name of input plotfile.') parser.add_argument('-w', '--width', type=float, help='Width of slice (cm). Default is domain width.') parser.add_argument('-axis', '--axis', type=str, default='x', help='Axis along which to slice. Should be "x", "y", or "z".') -parser.add_argument('-symlog', '--symlog', action='store_true', help='If supplied, use symlog scaling, which is linear near zero, to accomodate positive and negative values of Hnuc.') +parser.add_argument('-symlog', '--symlog', action='store_true', help='If supplied, use symlog scaling, which is linear near zero, to accommodate positive and negative values of Hnuc.') parser.add_argument('-logmax', '--logmax', type=float, help='Log of the +/- maximum Hnuc value.') parser.add_argument('-rho', '--rhocontours', type=float, nargs='+', help='Draws contours for the densities provided (g/cm^3).') diff --git a/Exec/science/urca/analysis/scripts/slice-field b/Exec/science/urca/analysis/scripts/slice-field index 8e23f10fd..0497076b2 100755 --- a/Exec/science/urca/analysis/scripts/slice-field +++ b/Exec/science/urca/analysis/scripts/slice-field @@ -4,11 +4,12 @@ Use yt to slice a boxlib plotfile supplied through the domain center. Donald E. Willcox """ -import yt -from yt import derived_field -import numpy as np import argparse + +import numpy as np +import yt from UrcaAnalysis.yt_extensions import UrcaShellFields +from yt import derived_field parser = argparse.ArgumentParser() parser.add_argument('infile', type=str, help='Name of input plotfile.') @@ -19,7 +20,7 @@ parser.add_argument('-axis', '--axis', type=str, default='x', parser.add_argument('-w', '--width', type=float, help='Width of slice (cm). Default is domain width.') parser.add_argument('-log', '--logscale', action='store_true', help='If supplied, use a log scale for the field.') -parser.add_argument('-symlog', '--symlog', type=float, help='If supplied, use symlog scaling, which is linear near zero, to accomodate positive and negative values of the field. Pass the value of the field at which to linearize the colorbar.') +parser.add_argument('-symlog', '--symlog', type=float, help='If supplied, use symlog scaling, which is linear near zero, to accommodate positive and negative values of the field. Pass the value of the field at which to linearize the colorbar.') parser.add_argument('-rho', '--rhocontours', type=float, nargs='+', help='Draws contours for the densities provided (g/cm^3).') parser.add_argument('-rhocolors', '--rhocolors', type=str, nargs='+', default='cyan', help='Color(s) of density contours.') parser.add_argument('-ctr', '--center', type=float, nargs='+', help='Centers the plot on the coordinates provided (x, y, z).') diff --git a/Exec/science/urca/analysis/scripts/streamlines b/Exec/science/urca/analysis/scripts/streamlines index 8dcd2c25e..6c374ef66 100755 --- a/Exec/science/urca/analysis/scripts/streamlines +++ b/Exec/science/urca/analysis/scripts/streamlines @@ -1,11 +1,10 @@ #!/usr/bin/env python -import yt -import numpy as np import matplotlib.pylab as pl - -from yt.visualization.api import Streamlines -from yt.units import cm +import numpy as np +import yt from mpl_toolkits.mplot3d import Axes3D +from yt.units import cm +from yt.visualization.api import Streamlines # Load the dataset ds = yt.load('wd_512_rhoc4-5_plt64636') @@ -24,7 +23,7 @@ pos = pos_dx streamlines = Streamlines(ds, pos, ('boxlib', 'x_vel'), ('boxlib', 'y_vel'), ('boxlib', 'z_vel'), get_magnitude=True, volume=ds.all_data()) streamlines.integrate_through_volume() -# Create a 3D plot, trace the streamlines throught the 3D volume of the plot +# Create a 3D plot, trace the streamlines through the 3D volume of the plot fig=pl.figure() ax = Axes3D(fig) for stream in streamlines.streamlines: diff --git a/Exec/test_problems/double_bubble/MaestroInitData.cpp b/Exec/test_problems/double_bubble/MaestroInitData.cpp index 2ae8ccc00..ee9871b20 100644 --- a/Exec/test_problems/double_bubble/MaestroInitData.cpp +++ b/Exec/test_problems/double_bubble/MaestroInitData.cpp @@ -3,7 +3,7 @@ using namespace amrex; using namespace problem_rp; -// prototype for pertubation function to be called on the +// prototype for perturbation function to be called on the // device (if USE_CUDA=TRUE) AMREX_GPU_DEVICE void Perturb(const Real p0, const Real* s0, Real* perturbations, diff --git a/Exec/test_problems/reacting_bubble/MaestroInitData.cpp b/Exec/test_problems/reacting_bubble/MaestroInitData.cpp index 1ce6e85c0..5a4ba91db 100644 --- a/Exec/test_problems/reacting_bubble/MaestroInitData.cpp +++ b/Exec/test_problems/reacting_bubble/MaestroInitData.cpp @@ -5,7 +5,7 @@ using namespace amrex; using namespace problem_rp; -// prototype for pertubation function to be called on the +// prototype for perturbation function to be called on the // device (if USE_CUDA=TRUE) AMREX_GPU_DEVICE void Perturb(const Real p0_init, const Real* s0, Real* perturbations, diff --git a/Exec/test_problems/rt/MaestroInitData.cpp b/Exec/test_problems/rt/MaestroInitData.cpp index be52eed32..d476d78e1 100644 --- a/Exec/test_problems/rt/MaestroInitData.cpp +++ b/Exec/test_problems/rt/MaestroInitData.cpp @@ -3,7 +3,7 @@ using namespace amrex; using namespace problem_rp; -// prototype for pertubation function to be called on the +// prototype for perturbation function to be called on the // device (if USE_CUDA=TRUE) AMREX_GPU_DEVICE void Perturb(const Real p0, const Real* s0, Real* scal_pert, Real* vel_pert, diff --git a/Exec/unit_tests/test_basestate/README b/Exec/unit_tests/test_basestate/README index 40d096ace..85fcb8b8f 100644 --- a/Exec/unit_tests/test_basestate/README +++ b/Exec/unit_tests/test_basestate/README @@ -42,7 +42,7 @@ plane-parallel test problem: ------------------------------------------------------------------------------ analytical CNO cycle test problem: - make sure to change the nework in GNUmakefile to H_core_null + make sure to change the network in GNUmakefile to H_core_null ------------------------------------------------------------------------------ diff --git a/Exec/unit_tests/test_diffusion/MaestroInit.cpp b/Exec/unit_tests/test_diffusion/MaestroInit.cpp index 1c0432221..12924bed0 100644 --- a/Exec/unit_tests/test_diffusion/MaestroInit.cpp +++ b/Exec/unit_tests/test_diffusion/MaestroInit.cpp @@ -124,7 +124,7 @@ void Maestro::MakeNewLevelFromScratch(int lev, Real time, const BoxArray& ba, const auto x = prob_lo[0] + (Real(i) + 0.5) * dx[0] - center_p[0]; const auto y = prob_lo[1] + (Real(j) + 0.5) * dx[1] - center_p[1]; - // apply the guassian enthalpy pulse at constant density + // apply the Gaussian enthalpy pulse at constant density const auto dist2 = x * x + y * y; const auto h_zone = diff --git a/Exec/unit_tests/test_react/scripts/fsnapshot.f90 b/Exec/unit_tests/test_react/scripts/fsnapshot.f90 index fb8b6142d..f1c533dd2 100644 --- a/Exec/unit_tests/test_react/scripts/fsnapshot.f90 +++ b/Exec/unit_tests/test_react/scripts/fsnapshot.f90 @@ -183,7 +183,7 @@ subroutine fplotfile_get_comps(pltfile, comps_sz, comps) !characters. !Perhaps there is a more elegant solution, but I've just resized the array to - !accomodate all of the characters and it works as a rudimentary work-around if + !accommodate all of the characters and it works as a rudimentary work-around if !one is really determined to have a string returned. !f2py intent(in) :: pltfile @@ -340,7 +340,7 @@ subroutine fplotfile_get_data_3d(pltfile, component, mydata, nx, ny, nz, ierr) ! get the index bounds and dx for the coarse level. Note, lo and - ! hi are ZERO based indicies + ! hi are ZERO based indices lo = lwb(plotfile_get_pd_box(pf, 1)) hi = upb(plotfile_get_pd_box(pf, 1)) diff --git a/README.md b/README.md index c28d0f49e..e253c3423 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ http://amrex-astro.github.io/MAESTROeX/ required, as is Python (>= 3.6) and standard tools available in any Unix-like environments (e.g., Perl and `sed`). - For GPU computing, CUDA 10 or later is requred. + For GPU computing, CUDA 10 or later is required. - To stay up-to-date with MAESTROeX, you will want to periodically pull changes from the repository by typing `git pull`. @@ -43,7 +43,7 @@ http://amrex-astro.github.io/MAESTROeX/ git clone --recursive https://github.com/AMReX-Astro/MAESTROeX.git ``` - To add the submodules to an exisiting clone, from the top-level + To add the submodules to an existing clone, from the top-level MAESTROeX directory, do: ``` diff --git a/Source/MaestroMakew0.cpp b/Source/MaestroMakew0.cpp index 068c3b4b3..b728c9779 100644 --- a/Source/MaestroMakew0.cpp +++ b/Source/MaestroMakew0.cpp @@ -948,7 +948,7 @@ void Maestro::Tridiag(const BaseStateArray& a, auto gam = gam_s.array(); if (b(0) == 0) { - Abort("tridiag: CANT HAVE B(0) = 0.0"); + Abort("tridiag: CAN NOT HAVE B(0) = 0.0"); } Real bet = b(0); diff --git a/Source/MaestroNodalProj.cpp b/Source/MaestroNodalProj.cpp index 585f9882d..34455164c 100644 --- a/Source/MaestroNodalProj.cpp +++ b/Source/MaestroNodalProj.cpp @@ -388,7 +388,7 @@ void Maestro::NodalProj(int proj_type, Vector& rhcc, } else if (proj_type == pressure_iters_comp) { // Vproj = Vproj - sig*grad(phi) // we do this manually instead of using mlndlap.updateVelocity() because - // for alt_energy_fix we neet beta0*grad(phi) + // for alt_energy_fix we need beta0*grad(phi) for (int lev = 0; lev <= finest_level; ++lev) { for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { MultiFab::Multiply(gphi[lev], sig[lev], 0, dir, 1, 0); @@ -404,7 +404,7 @@ void Maestro::NodalProj(int proj_type, Vector& rhcc, } else if (proj_type == regular_timestep_comp) { // Vproj = Vproj - sig*grad(phi) // we do this manually instead of using mlndlap.updateVelocity() because - // for alt_energy_fix we neet beta0*grad(phi) + // for alt_energy_fix we need beta0*grad(phi) for (int lev = 0; lev <= finest_level; ++lev) { for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { MultiFab::Multiply(gphi[lev], sig[lev], 0, dir, 1, 0); diff --git a/Util/diagnostics/README.md b/Util/diagnostics/README.md index 246f55cce..4c103a90b 100644 --- a/Util/diagnostics/README.md +++ b/Util/diagnostics/README.md @@ -17,13 +17,13 @@ be output: Additional arguments depend on the problem: - **1d**: no additional arguments are required -- **2d cylindrical**: extra arguents `--xctr x` and `--yctr y` giving the coordinates +- **2d cylindrical**: extra arguments `--xctr x` and `--yctr y` giving the coordinates of the domain center (x,y) can be provided (both default to 0.0) - **2d spherical**: the argument `--sphr` *must* be provided to indicate a spherical problem, and the `--yctr y` argument can be provided to give the y coordinate of the domain center (defaults to 0.0) -- **3d cylindrical**: extra arguents `--xctr x` and `--yctr y` giving the coordinates +- **3d cylindrical**: extra arguments `--xctr x` and `--yctr y` giving the coordinates of the domain center (x,y) can be provided (both default to 0.0) - **3d spherical**: the argument `--sphr` *must* be provided to indicate a spherical -problem, and extra arguents `--xctr x`, `--yctr y` and `--zctr z` giving the coordinates +problem, and extra arguments `--xctr x`, `--yctr y` and `--zctr z` giving the coordinates of the domain center (x,y,z) can be provided (all default to 0.0) diff --git a/Util/model_parser/ModelParser.cpp b/Util/model_parser/ModelParser.cpp index ab51c8846..c3bcd8541 100644 --- a/Util/model_parser/ModelParser.cpp +++ b/Util/model_parser/ModelParser.cpp @@ -33,7 +33,7 @@ void ModelParser::ReadFile(const std::string& model_file_name) { varnames_stored[i] = maestro::trim(line.substr(ipos)); } - // alocate storage for the model data + // allocate storage for the model data model_state.resize(npts_model); for (auto i = 0; i < npts_model; ++i) { model_state[i].resize(nvars_model); @@ -204,4 +204,4 @@ Real ModelParser::Interpolate(const Real r, const int ivar, } return interpolate; -} \ No newline at end of file +} diff --git a/paper/paper.md b/paper/paper.md index 7e0d6949e..13a7e2115 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -68,7 +68,7 @@ of density and pressure stratification. The code leverages the new AMReX software framework [@AMReX] for block-structured adaptive mesh refinement (AMR) applications, allowing for scalability to large fractions of leadership-class machines. -Our approach to parallization uses a hybrid MPI/OpenMP approach, with increasing GPU support. +Our approach to parallelization uses a hybrid MPI/OpenMP approach, with increasing GPU support. Microphysics are provided by the Starkiller-Astro libraries [@starkiller]. The code contains documentation through sphinx, a large suite of test problems, and nightly regression tests on a number of architectures. diff --git a/sphinx_docs/source/Godunov.rst b/sphinx_docs/source/Godunov.rst index d359cce70..34443f431 100644 --- a/sphinx_docs/source/Godunov.rst +++ b/sphinx_docs/source/Godunov.rst @@ -892,7 +892,7 @@ Then upwind based on :math:`v_{\ib-\half\eb_y}^{1D}`: u_{R,\ib-\half\eb_y}^{y|z}, & v_{\ib-\half\eb_y}^{1D} < 0. \end{cases} -We use an analogous procedure to compute five more intemediate states, +We use an analogous procedure to compute five more intermediate states, :math:`u_{\ib-\half\eb_z}^{z|y}, v_{\ib-\half\eb_x}^{x|z}, v_{\ib-\half\eb_z}^{z|x}, w_{\ib-\half\eb_x}^{x|y}`, and :math:`w_{\ib-\half\eb_y}^{y|x}`. Then we do a full-dimensional diff --git a/sphinx_docs/source/architecture.rst.old b/sphinx_docs/source/architecture.rst.old deleted file mode 100644 index d53eae8ad..000000000 --- a/sphinx_docs/source/architecture.rst.old +++ /dev/null @@ -1,1501 +0,0 @@ -********************** -MAESTROeX Architecture -********************** - -MAESTROeX Basics -================ - -MAESTROeX models problems that are in tight hydrostatic equilibrium. -The fluid state is decomposed into a 1D radial base state that -describes the hydrostatic structure of the star or atmosphere, and a -2- or 3-D Cartesian full state, that captures the departures from -hydrostatic equilibrium. Two basic geometries are allowed. A -*plane-parallel* geometry assumes that the domain is thin compared to -the radius of curvature of the star, and therefore the 1D base state -is perfectly aligned with the Cartesian state. A *spherical* -geometry is for modeling an entire star [1]_. Here, the 1D base state is not -aligned with the Cartesian state. The figure below shows -these geometries. - -.. figure:: base_grid.eps - :alt: base_grid - :width: 80% - :align: center - - MAESTROeX geometries, showing both the 1D base state and - the full Cartesian state. (Left) For multi-level - problems in planar geometry, we force a direct alignment between the - radial array cell centers and the Cartesian grid cell centers by - allowing the radial base state spacing to change with space and - time. (Right) For multi-level problems in spherical geometry, since - there is no direct alignment between the radial array cell centers - and the Cartesian grid cell centers, we choose to fix the radial - base state spacing across levels. Figure taken from :cite:`multilevel`. - -MAESTROeX can use adaptive mesh refinement to focus resolution on -complex regions of flow. For Cartesian/plane-parallel geometries, all -cells at the same height must be at the same level of refinement. -This restriction is to allow for the base state to directly align with -the Cartesian state everywhere. For spherical geometries, there is no -such restriction (again, see above figure). -The MAESTROeX grids are managed by the AMReX library, which is -distributed separately. - -The MAESTROeX ‘Ecosystem’ -========================= - -.. raw:: latex - - \centering - -.. figure:: \archfigpath/maestro_ecosystem2 - :alt: [fig:arch:eco] The basic - MAESTROeX ‘ecosystem’. Here we see the different packages that - contribute to building the reacting_bubble problem in MAESTROeX. The - red directories are part of most standard MAESTROeX build. The - purple lines show the directories that are pulled in through - various Makefile variables (AMREX_HOME, NETWORK_DIR, - EOS_DIR, and CONDUCTIVITY_DIR). - -Building MAESTROeX requires both the MAESTROeX-specific source -files (distributed in the MAESTROeX/ directory), and the -AMReX library (distributed separately, consisting of the amrex/ directory). -AMReX provides both a C++ and a Fortran framework. Figure \ `[fig:arch:eco] <#fig:arch:eco>`__ -shows the relationship between the different packages, highlighting -what goes into defining a specific MAESTROeX problem. - -Problems piece together various MAESTROeX directories, choosing a -reaction network, equation of state, and conductivity routine to build -an executable. Briefly, the MAESTROeX sub-directories are: - -- MAESTROeX/ - - The main MAESTROeX algorithm directory. - - Important directories under MAESTROeX/ include: - - - Docs/ - - Documentation describing the basic algorithm (including this - document). - - - Exec/ - - The various problem-setups. Each problem in MAESTROeX gets it own - sub-directory under SCIENCE/, TEST_PROBLEMS/, or - UNIT_TESTS/. The GNUmakefile in the problem directory - includes the instructions on how to build the executable, - including what modules in Microphysics/ are used. Any file that - you place in a sub-directory here takes precedence over a file of - the same name in MAESTROeX/. This allows problems to have - custom versions of the main MAESTROeX routines (e.g. initial - conditions via initdata.f90. See § \ `3.1 <#sec:makefile>`__ and - Chapter \ `[ch:make] <#ch:make>`__ for details on the build system). - - - SCIENCE/ - - MAESTROeX problem directories for science studies. These are - the setups that have been used for science papers in the past, - or are the basis for current science studies. - - - TEST_PROBLEMS/ - - MAESTROeX problem directories for simple test problems that have - been used while developing MAESTROeX. Many of these problems - have appeared in the various papers describing the - MAESTROeX algorithm. - - - UNIT_TESTS/ - - Special MAESTROeX problem directories that test only a single - component of the MAESTROeX algorithm. These often have their - own main drivers (varden.f90) that setup and initialize - some data structures and then call only a few of the - MAESTROeX routines. See Chapter \ `[chapter:unit_tests] <#chapter:unit_tests>`__ for details. - - - Microphysics/ [2]_ - - The basic microphysics routines used by MAESTROeX. These are organized - into the following sub-directories. - - - conductivity/ - - Various routines for computing the thermal conductivity used in - the thermal diffusion part of the algorithm. - - - EOS/ - - The gamma_law_general/. - - - networks/ - - The basic general_null network that defines arbitrary - non-reacting species. - - - Source/ - - The main MAESTROeX source. Here you will find the driver routine, - the advection routines, etc. All MAESTROeX problems will compile - this source. - - - Util/ - - Various helper routines exist in this directory. Some of these - are externally developed. - - - BLAS/ - - Linear algebra routines. - - - initial_models/ - - Simple routines for generating toy initial models in hydrostatic equilibrium. - - - model_parser/ - - A simple Fortran module for reading in 1D initial model files. - This is used by the initialization routines to get the initial - model data. - - - random/ - - A random number generator. - - - VODE/ - - The VODE :raw-latex:`\cite{vode}` package for integrating ODEs. At the - moment, this is used for integrating various reaction networks. - -.. raw:: latex - - \centering - -.. figure:: \archfigpath/amrex_directory2 - :alt: [fig:arch:amrex] The - basic AMReX directory structure. The directories used by - MAESTROeX are indicated in red. - - [fig:arch:amrex] The - basic AMReX directory structure. The directories used by - MAESTROeX are indicated in red. - -The AMReX directory structure is shown in -Figure \ `[fig:arch:amrex] <#fig:arch:amrex>`__. The subset of the directories that are -used by MAESTROeX are: - -- Src/ - - The main AMReX source directory. In MAESTROeX, we only use the - Fortran source files. The core directories are: - - - F_BaseLib/ - - The Fortran AMReX files. This is a library for describing - meshes consisting of a union of boxes. The AMReX modules - define the basic datatypes used in MAESTROeX. AMReX also - provides the routines that handle the parallelization and I/O. - - - LinearSolvers/ - - The AMReX linear solvers—these are used to solve elliptic - problems in the MAESTROeX algorithm. - - - F_MG - - The Fortran multigrid solver, with support for both - cell-centered and node-centered data. - -- Tools/ - - Various tools used for building AMReX applications. Here we use: - - - F_mk/ - - The generic Makefiles that store the compilation flags for various - platforms. Platform/compiler-specific options are stored in the - comps/ sub-directory. - - - F_scripts/ - - Some simple scripts that are useful for building, running, - maintaining MAESTROeX. - -Finally the amrex/Tools/Postprocessing/F_Src package provides simple -Fortran-based analysis routines (e.g. extract a line from a -multidimensional dataset) that operate on AMReX datasets. These are -described in § \ `[sec:analysis] <#sec:analysis>`__. Several sub-directories with -python-based routines are also here. These are described both in -§ \ `[sec:analysis] <#sec:analysis>`__ and § \ `[sec:vis:python] <#sec:vis:python>`__. - -.. _sec:adding_problems: - -Adding A New Problem -==================== - -Different MAESTROeX problems are defined in sub-directories under -Exec/ in SCIENCE, TEST_PROBLEMS, or UNIT_TESTS. -To add a problem, start by creating a new sub-directory—this is -where you will compile your problem and store all the problem-specific -files. - -The minimum requirement to define a new problem would be a -GNUmakefile which describes how to build the application and an -input file which lists the runtime parameters. The problem-specific -executable is built in the problem directory by typing make. -Source files are found automatically by searching the directories -listed in the GNUmakefile. Customized versions of any source -files placed in the problem-directory override those with the same -name found elsewhere. Any unique source files (and not simply a -custom version of a file found elsewhere) needs to be listed in a file -called GPackage.mak in the problem-directory (and this needs to -be told to the build system—see below). - -.. _sec:makefile: - -The GNUmakefile ---------------- - -A basic GNUmakefile begins with: - -:: - - NDEBUG := t - MPI := - OMP := - -Here, NDEBUG is true if we are building an optimized executable. -Otherwise, the debug version is built—this typically uses less -optimization and adds various runtime checks through compiler flags. -MPI and OMP are set to true if we want to use either MPI -or OpenMP for parallelization. If MPI := t, you will need to -have the MPI libraries installed, and their location may need to be -specified in MAESTROeX/mk/GMakeMPI.mak. - -The next line sets the compiler to be used for compilation: - -:: - - COMP := gfortran - -The MAESTROeX build system knows what options to use for various -compiler families. The COMP flag specifies which compiler to -use. Allowed values include Intel, gfortran, PGI, -PathScale, and Cray. The specific details of these -choices are defined in the MAESTROeX/mk/comps/ directory. - -MKVERBOSE set to true will echo the build commands to the -terminal as the are executed. - -:: - - MKVERBOSE := t - -The next line defines where the top of the MAESTROeX source tree is located. - -:: - - MAESTROeX_TOP_DIR := ../../.. - -A MAESTROeX application is built from several packages (the -multigrid solver, an EOS, a reaction network, etc.). The core -MAESTROeX packages are always included, so a problem only needs -to define the EOS, reaction network, and conductivities to -use, as well as any extra, problem-specific files. - -:: - - EOS_DIR := helmholtz - CONDUCTIVITY_DIR := constant - NETWORK_DIR := ignition_simple - - EXTRA_DIR := Util/random - -Note that the microphysics packages are listed simply by the name of -the directory containing the specific implementation (e.g. helmholtz). -By default, the build system will look in Microphysics/EOS/ for -the EOS, Microphysics/conductivity/ for the conductivity routine, -and Microphysics/networks/ for the reaction network. To -override this default search path, you can set EOS_TOP_DIR, -CONDUCTIVITY_TOP_DIR, and NETWORK_TOP_DIR respectively. - -Generally, one does not need to include the problem directory itself -in EXTRA_DIR, unless there are unique source files found there, -described in a GPackage.mak file. These variables are -interpreted by the GMaestro.mak file and used to build a main -list of packages called Fmdirs. The build system will attempt -to build all of the files listed in the various GPackage.mak -files found in the Fmdirs directories. Furthermore, -Fmdirs will be will be added to the make VPATH, which -is the list of directories to search for source files. The problem -directory will always be put first in the VPATH, so any source -files placed there override those with the same name found elsewhere -in the source tree. - -Some packages (for instance, the helmholtz -EOS) require Fortran include files. The Fmincludes variable -lists all those directories that contain include files that are -inserted into the Fortran source at compile time via the include -statement. Presently, the only instance of this is with the Helmholtz -general equation of state found in Microphysics/EOS/helmholtz/. This is -automatically handled by the GMaestro.mak instructions. - -Runtime parameters listed in the MAESTROeX/_parameters file are -parsed at compile time and the file probin.f90 is written and -compiled. This is a Fortran module that holds the values of the -runtime parameters and makes them available to any routine. By -default, the build system looks for a file called \_parameters -in the problem directory and adds those parameters along with the -main list of MAESTROeX parameters (MAESTROeX/_parameters) to -the probin_module. - -The final line in the GNUmakefile includes the rules to actually -build the executable. - -:: - - include $(MAESTROeX_TOP_DIR)/GMaestro.mak - -Handling Problem-Specific Source Files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As mentioned above, any source files placed in the problem directory -override a files with the same name found elsewhere in the source -tree. This allows you to create a problem-specific version of any -routine. Source files that are unique to this problem (i.e. there is -no file with the same name elsewhere in the source tree) need to be -listed in a file GPackage.mak in the problem directory, and -the problem-directory needs to be explicitly listed in the EXTRA_DIR -list in the GNUmakefile. - -.. _sec:def_runtime_param: - -Defining Runtime Parameters ---------------------------- - -The runtime parameters for the core MAESTROeX algorithm are listed in -MAESTROeX/_parameters. That file is parsed at compile-time by -the MAESTROeX/write_probin.py script (along with any -problem-specific parameters). The script outputs the probin.f90 -source file. Each line in the \_parameters file has the form: -10em *data-type* 10em *value* -where *parameter* is the name of the runtime parameter, -*data-type* is one of {character, real, -integer, logical}, and the *value* specifies the default -value for the runtime parameter. Comments are indicated by a ‘ -#’ character and are used to produce documentation about the -available runtime parameters. For the documentation, runtime parameters are grouped together -in the \_parameters file into categories. The category headings -are defined by comments in the \_parameters file and any comments -following that heading are placed into that category. The documentation -(Chapter `[ch:parameters] <#ch:parameters>`__) is produced by the script -MAESTROeX/docs/runtime_parameters/rp.py. - -At runtime, the default values for the parameters can be overridden -either through the inputs file (by adding a line of the form: -parameter = value) or through a command-line argument (taking the -form: –parameter value). The probin_module makes the -values of the runtime parameters available to the various functions -in the code (see § \ `6.7 <#sec:probin>`__). - -Problem-specific runtime parameters should be defined in the -problem-directory in a file called \_parameters. This file will -be automatically found at compile time. - -.. _sec:initial_models: - -Preparing the Initial Model ---------------------------- - -MAESTROeX models subsonic, non-hydrostatic flows as deviations from -a background state in hydrostatic equilibrium. -The solution in MAESTROeX is broken up into a 1D base state and the 2- -or 3D full state. The job of the 1D base state in the algorithm is -to represent the hydrostatic structure. The full, Cartesian state -carries the departures from hydrostatic equilibrium. The underlying -formulation of the low Mach number equations assumes that the base -state is in hydrostatic equilibrium. At the start of a simulation, -the initial model is read in and taken as the base state. Therefore, -any initial model needs to already be in hydrostatic equilibrium. - -The routines in Util/initial_models/ prepare an initial model -for MAESTROeX. In general, there are two different proceduces that are -needed. The first type modify an existing 1D initial model produced -somewhere else (e.g. a 1D stellar evolution code), and map it onto a -uniform grid, at the desired resolution, using the equation of state -in MAESTROeX, and using MAESTROeX’s discretization of hydrostatic -equilibrium. The second type generate the initial model internally, -by integrating the condition of hydrostatic equilibrium together with -a simplifying assumption on the energy (e.g. isothermal or -isentropic). In both cases hydrostatic equilibrium is enforced as: - -.. math:: - - \frac{p_{i+1} - p_i}{\Delta r} = \frac{1}{2} (\rho_i + \rho_{i+1}) - g_{i+1/2} - -Here, :math:`g_{i+1/2}` is the edge-centered gravitational acceleration. - -The toy_atm example provides a simple approximation for a thin -(plane-parallel) convectively-unstable accreted layer on the surface -of a star. This can be used as the starting point for a more complex -model. - -MAESTROeX initial models are read in by the Util/model_parser -routines. This expects the initial model to contain a header giving -the number of variables and their names, followed by rows of data -giving the coordinate and data values at that coordinate. The initial -model should contain the same species data (in the form of mass fractions) as -defined in the network module used by the MAESTROeX problem. - -Full details on which initial model routine matches each problem and -how the initial models are used to initialize the full state data can -be found in § \ `[sec:initial_models_main] <#sec:initial_models_main>`__. - -Customizing the Initialization ------------------------------- - -The best way to customize the initialization (e.g. add perturbations) -is to copy from one of the existing problems. The file initveldata.f90 controls the velocity field initialization and initscaldata.f90 controls the initialization of the scalars -(:math:`\rho`, :math:`\rho X_k`, :math:`\rho h`). The reacting_bubble problem is a good -starting point for plane-parallel and wdconvect is a good -starting point for full stars. - -AMReX Data Structures -===================== - -MAESTROeX’s gridding is handled by the AMReX library, which -contains the most fundamental objects used to construct parallel -block-structured AMR applications—different -regions of the domain can have different spatial resolutions. -At each level of refinement, the region covered by that level is divided -into grids, or boxes. The entire computational domain is covered by -the coarsest (base) level of refinement, often called level :math:`\ell=0`, either by one -grid or divided into many grids. -Higher levels of refinement have cells that are finer by a “refinement ratio” -(typically 2). The grids are properly nested in the sense that the union -of grids at level :math:`\ell+1` is contained in the union of grids at level :math:`\ell`. -Furthermore, the containment is strict in the sense that, except at physical -boundaries, the level :math:`\ell` grids are large enough to guarantee that there is -a border at least :math:`n_{\rm buffer}` level :math:`\ell` cells wide surrounding each level -:math:`\ell +1` grid (grids at all levels are allowed to extend to the physical -boundaries so the proper nesting is not strict there). -For parallel computations, the boxes are spread across processors, in -a fashion designed to put roughly equal amounts of work on each -processor (load balancing). - -.. figure:: \archfigpath/data_loc2 - :alt: [fig:dataloc] Some of the different data-centerings: - (a) cell-centered, (b) nodal in the :math:`x`-direction, and (c) nodal in - both the :math:`x`- and :math:`y`-directions. Note that for nodal data, the - integer index corresponds to the lower boundary in that direction. - In each of these centerings, the red point has the same indices: (1,2). - Not shown is the case where data is nodal in the :math:`y`-direction only. - :width: 6.5in - - [fig:dataloc] Some of the different data-centerings: - (a) cell-centered, (b) nodal in the :math:`x`-direction, and (c) nodal in - both the :math:`x`- and :math:`y`-directions. Note that for nodal data, the - integer index corresponds to the lower boundary in that direction. - In each of these centerings, the red point has the same indices: (1,2). - Not shown is the case where data is nodal in the :math:`y`-direction only. - -On a grid, the data can be stored at cell-centers, on a face/edge, or -on the corners. In AMReX, data that is on an edge is termed ‘nodal’ -in that direction (see Figure \ `[fig:dataloc] <#fig:dataloc>`__). Data that is on the -corners is nodal in all spatial directions. In MAESTROeX, the state -data (density, enthalpy, velocity, :math:`\ldots`) is generally -cell-centered. Fluxes are nodal in the direction they represent. -A few quantities are nodal in all directions (e.g. :math:`\phi` used in -the final velocity projection). - -To simplify the description of the underlying AMR grid, AMReX provides a number of Fortran types. We briefly summarize the major -data types below. A more extensive introduction to AMReX is -provided by the AMReX User’s Guide, distributed with the library. - -box ---- - -A box is simply a rectangular domain in space. Note that boxes -do not hold the state data themselves. A box has a lo -and hi index in each coordinate direction that gives the -location of the lower-left and upper-right corner with respect to -a global index space. - -.. raw:: latex - - \centering - -.. figure:: \archfigpath/index_grid2 - :alt: [fig:boxes] Three boxes that comprise a single level. At this - resolution, the domain is 20\ :math:`\times`\ 18 zones. Note that the - indexing in AMReX starts with :math:`0`. - :width: 4in - - [fig:boxes] Three boxes that comprise a single level. At this - resolution, the domain is 20\ :math:`\times`\ 18 zones. Note that the - indexing in AMReX starts with :math:`0`. - -The computational domain is divided into boxes. The collection of -boxes with the same resolution comprise a level. -Figure \ `[fig:boxes] <#fig:boxes>`__ shows three boxes in the same level of -refinement. The position of the boxes is with respect to the global -index space at that level. For example, box 1 in the figure has -lo = (3,7) and hi = (9,12). Note that the global indexing -is 0-based. - -The global index space covers the entire domain at a given resolution. -For a simulation setup with n_cellx = 32 and n_celly = -32, the coarsest level (level 1) has :math:`32 \times 32` zones, and the -global index space will run from :math:`0, \ldots, 31` in each coordinate -direction. Level 2 will have a global index space running from :math:`0, -\ldots, 63` in each coordinate direction (corresponding to :math:`64 \times -64` zones if fully refined), and level 3 will have a global index -space running from :math:`0, \ldots, 127` in each coordinate direction -(corresponding to :math:`128\times 128` zones if fully refined). - -Common Operations on a box -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A box declared as: - -:: - - type(box) :: mybox - -The upper and lower bounds of the box (in terms of the global -index space) are found via: - -- lo = lwb(mybox) returns an array, lo(dm), with - the box lower bounds - -- hi = upb(mybox) returns an array, hi(dm), with - the box upper bounds - -boxarray and ml_boxarray ------------------------- - -A boxarray is an array of boxes. A ml_boxarray is a collection of -boxarrays at different levels of refinement. - -layout and ml_layout --------------------- - -A layout is basically a boxarray that knows information about other -boxes, or box “connectivity.” It contains additional information -that is used in filling ghost cells from other fine grids or from -coarser grids. This information is stored as long as the layout -exists so that we don’t have to recompute intersections every time we -do some operation with two multifabs that have that layout, for -example. - -By separating the layout from the actual data, we can allocate and -destroy data that lives on the grid as needed. - -fab ---- - -A fab is a “Fortran Array Box”. It contains the state data in a -multidimensional array and several box-types to describe where in -the global index-space it lives: - -:: - - type fab - ... - type(box) :: bx - type(box) :: pbx - type(box) :: ibx - end type fab - -bx represents the box in the global index-space over which the -fab is defined, pbx represents the “physical” box in the -sense that it includes bx plus ghost cells, and ibx is the -same as bx unless the fab is nodal. As can be seen in -Figure \ `[fig:dataloc] <#fig:dataloc>`__, for the same grid nodal data requires one -more array element than cell-centered data. To address this ibx -is made by growing bx by one element along all nodal dimensions. - -It’s important to note that all state data is stored in a -four-dimensional array *regardless of the problem’s -dimensionality*. The array is (nx,ny,nz,nc) in size, where -nc is the number of components, for instance representing -different fluid variables, and (nx,ny,nz) are the number of -cells in each respective spatial dimension. For 2D problems, -nz=1. - -A fab would represent the data for a single box in the domain. -In MAESTROeX, we don’t usually deal with fabs alone, but rather -we deal with multifabs, described next. - -multifab --------- - -A multifab is a collection of fabs at the same level of -refinement. This is the primary data structure that MAESTROeX routine operate on. A multilevel simulation stores the -data in an array of multifabs, where the array index refers -to the refinement level. - -All fabs in a given multifab have the same number of ghost cells, -but different multifabs can have different numbers of ghost cells -(or no ghost cells). - -Working with multifabs -~~~~~~~~~~~~~~~~~~~~~~ - -To build a multifab, we need to provide a layout, the number of -components to store in the multifab  and the number of ghostcells. In -MAESTROeX  the hierarchy of grids will be described by a single -ml_layout. A multifab can be declared and built at any time in a -simulation using the ml_layout, thereby allocating space at every -grid location in the simulation. The sequence to build a multifab appears as - -:: - - type(multifab) :: mfab(nlevs) - ... - do n = 1, nlevs - call multifab_build(mfab(n), mla%la(n), nc, ng) - enddo - -Here, nc is the number of components and ng is the number -of ghostcells. The multifab is built one level at a time, using the -layout for that level taken from the ml_layout, mla. - -A common operation on a multifab is to initialize it to :math:`0` -everywhere. This can be done (level-by-level) as - -:: - - call setval(mfab(n), ZERO, all=.true.) - -where ZERO is the constant 0.0 from amrex_constants_module. - -The procedure for accessing the data in each grid managed by the -multifab is shown in § \ `[sec:example] <#sec:example>`__. Subroutines to add, -multiply, or divide two multifabs exist, as do subroutines to copy -from one multifab to another—see -amrex/Src/F_BaseLib/multifab.f90 for the full list of -routines that work with multifabs. - -When you are done working with a multifab, its memory can be freed by -calling multifab_destroy on the multifab. - -bc_tower --------- - -A bc_tower holds the information about what boundary conditions are -in effect for each variable in a -MAESTROeX simulation. These are interpretted by the ghost cell filling -routines. See § \ `10 <#sec:arch:bcs>`__ for more detail. - -MAESTROeX Data Organization -========================= - -The state of the star in MAESTROeX is described by both a -multidimensional state and the 1D base state. The full -multidimensional state is stored in multifabs while the base state -is simply stored in Fortran arrays. Here we describe the -major MAESTROeX data-structures. - -‘s’ multifabs (fluid state) ---------------------------- - -The fluid state (density, enthalpy, species, temperature, and tracer) -are stored together in a cell-centered multi-component multifab, -typically named sold, s1, s2, or snew -(depending on which time-level it represents). The enthalpy is stored -as :math:`(\rho h)`, and the species are stored as partial-densities :math:`(\rho -X_k)`. The tracer component is not used at present time, but can -describe an arbitrary advected quantity. - -Individual state variables should be indexed using the integer keys -provided by the variables module (see § -`6.8 <#sec:variables_module>`__). For example, the integer rho_comp -will always refer to the density component of the state. - -Note: the pressure is not carried as part of the ‘s’ multifabs. - -‘u’ multifabs (fluid velocity) ------------------------------- - -The fluid velocity at time-levels :math:`n` and :math:`n+1` is stored in -a cell-centered multi-component multifab, typically named -uold or unew. Here the dm -components correspond to each coordinate direction. - -umac (the MAC velocity) ------------------------ - -In creating the advective fluxes, we need the time-centered velocity -through the faces of the zone—the :math:`x`-velocity on the :math:`x`-edges, the -:math:`y`-velocity on the :math:`y`-edges, etc. (see figure \ `[fig:mac] <#fig:mac>`__). This -type of velocity discretization is termed the MAC velocity (after the -“marker-and-cell” method for free boundaries in incompressible -flows :raw-latex:`\cite{harlowwelch:1965}`). - -.. raw:: latex - - \centering - -.. figure:: \archfigpath/mac2 - :alt: [fig:mac] The MAC grid for the velocity. - Here the :math:`x`-velocity is on the :math:`x`-edges (shown as the - blue points) and the :math:`y`-velocity is on the :math:`y`-edges - (shown as the red points). - :width: 2.5in - - [fig:mac] The MAC grid for the velocity. - Here the :math:`x`-velocity is on the :math:`x`-edges (shown as the - blue points) and the :math:`y`-velocity is on the :math:`y`-edges - (shown as the red points). - -|   - -The MAC velocities are allocated at each level of refinement, n, -by making a multifab array where each of the dm components is -nodal in its respective direction: - -:: - - type(multifab) :: umac(nlevel,dm) - - do n=1,nlevel - do comp=1,dm - call multifab_build_edge(umac(n,comp), mla%la(n),1,1,comp) - enddo - enddo - -Base State Arrays ------------------ - -The base state is defined by :math:`\rho_0`, :math:`p_0`, and :math:`w_0`. There is no -base state composition. Other arrays are defined as needed, such as -:math:`h_0`, the base state enthalpy. - -The base state arrays are 2-dimensional, with the first dimension -giving the level in the AMR hierarchy and the second the radial index -into the base state. For spherical geometries, the base state only -exists at a single level, so the first index will always be 1. The -radial index is 0-based, to be consistent with the indexing for the -Cartesian state data. For example, the base state density would be -dimensioned: rho0(nlevs,0:nr_fine-1). Here, nlevs is the -number of levels of refinement and nr_fine is the number of -cells in the radial direction at the finest level of refinement. - -For multilevel, plane-parallel geometry, all grids at the same height -will have the same resolution so that the full state data is always -aligned with the base state (see Figure \ `[fig:base_state] <#fig:base_state>`__). Base -state data on coarse grids that are covered by fine grids is not -guaranteed to be valid. - -For spherical problems, the base state resolution, :math:`\Delta r`, is -generally picked to be finer than the Cartesian grid resolution, -:math:`\Delta x`, i.e. \ :math:`\Delta r < \Delta x`. The ratio is controlled -by the parameter drdxfac. - -Note there are no ghost cells for the base state outside of the -physical domain. For plane-parallel, multilevel simulations, there -are ghostcells at the jumps in refinement—these are filled by the -fill_code_base routine. The convention when dealing with the -base state is that we only access it inside of the valid physical -domain. Any multi-dimensional quantity that is derived using the base -state then has its ghost cells filled by the usually multifab ghost -cell routines. - -MAESTROeX Helper Modules -====================== - -A number of MAESTROeX modules appear frequently throughout the source. -Below, we describe some of the more common functionality of the most -popular modules. - -average_module --------------- - -The average_module module provides a routine average that takes -a multilevel multifab array and averages the full Cartesian data -onto the 1D base state. - -eos_module ----------- - -The eos_module provides the interface to the equation of -state to connect the state variables thermodynamically. It -gets the information about the fluid species from the network -module (for example, the atomic number, :math:`Z`, and atomic weight, :math:`A`, -of the nuclei). - -Presently there is a single EOS that comes with MAESTROeX, tt gamma_law_general, -but many more are available through the external Microphysics repo [3]_. The Microphysics EOSs share the same interface and can be compiled into MAESTROeX directly. -Here are the more popular EOSs: - -- helmholtz represents a general stellar equation - of state, consisting of nuclei (as an ideal gas), radiation, - and electrons (with arbitrary degeneracy and degree of relativity). - This equation of state is that described in :raw-latex:`\cite{timmes_eos}`. - - A runtime parameter, use_eos_coulomb, is defined in - this EOS to enable/disable Coulomb corrections. - -- gamma_law_general assumes an ideal gas with a mixed - composition and a constant ratio of specific heats, :math:`\gamma`: - - .. math:: p = \rho e (\gamma - 1) = \frac{\rho k_B T}{\mu m_p} - - where :math:`k_B` is Boltzmann’s constant and :math:`m_p` is the mass of the - proton. - The mean molecular weight, :math:`\mu`, is computed assuming - electrically neutral atoms: - - .. math:: \mu = \left ( \sum_k \frac{X_k}{A_k} \right )^{-1} - - An option in the source code itself exists for treating the - species as fully-ionized, but there is no runtime-parameter to - make this switch. - -- multigamma is an ideal gas equation of state where each - species can have a different value of :math:`\gamma`. This mainly affects - how the internal energy is constructed as each species, represented - with a mass fraction :math:`X_k` will have its contribution to the total - specific internal energy take the form of :math:`e = p/\rho/(\gamma_k - - 1)`. The main thermodynamic quantities take the form: - - .. math:: - - \begin{aligned} - p &= \frac{\rho k T}{m_u} \sum_k \frac{X_k}{A_k} \\ - e &= \frac{k T}{m_u} \sum_k \frac{1}{\gamma_k - 1} \frac{X_k}{A_k} \\ - h &= \frac{k T}{m_u} \sum_k \frac{\gamma_k}{\gamma_k - 1} \frac{X_k}{A_k}\end{aligned} - - We recognize that the usual astrophysical :math:`\bar{A}^{-1} = \sum_k - X_k/A_k`, but now we have two other sums that involve different - :math:`\gamma_k` weightings. - - The specific heats are constructed as usual, - - .. math:: - - \begin{aligned} - c_v &= \left . \frac{\partial e}{\partial T} \right |_\rho = - \frac{k}{m_u} \sum_k \frac{1}{\gamma_k - 1} \frac{X_k}{A_k} \\ - c_p &= \left . \frac{\partial h}{\partial T} \right |_p = - \frac{k}{m_u} \sum_k \frac{\gamma_k}{\gamma_k - 1} \frac{X_k}{A_k}\end{aligned} - - and it can be seen that the specific gas constant, :math:`R \equiv c_p - c_v` is - independent of the :math:`\gamma_i`, and is simply :math:`R = k/m_u\bar{A}` giving the - usual relation that :math:`p = R\rho T`. Furthermore, we can show - - .. math:: - - \Gamma_1 \equiv \left . \frac{\partial \log p}{\partial \log \rho} \right |_s = - \left ( \sum_k \frac{\gamma_k}{\gamma_k - 1} \frac{X_k}{A_k} \right ) \bigg / - \left ( \sum_k \frac{1}{\gamma_k - 1} \frac{X_k}{A_k} \right ) = - \frac{c_p}{c_v} \equiv \gamma_\mathrm{effective} - - and :math:`p = \rho e (\gamma_\mathrm{effective} - 1)`. - - This equation of state takes several runtime parameters that can set the - :math:`\gamma_i` for a specific species: - - - eos_gamma_default: the default :math:`\gamma` to apply for - all species - - - species_X_name and species_X_gamma: set the :math:`\gamma_i` - for the species whose name is given as species_X_name to the - value provided by species_X_gamma. Here, X can be one - of the letters: a, b, or c, allowing us to specify - custom :math:`\gamma_i` for up to three different species. - -The thermodynamic quantities are stored in a Fortran type eos_t, -which has fields for all the thermodynamic inputs and outputs. The -type definition is brought in through eos_type_module. - [4]_ - -The first argument to the eos call is an integer key that -specifies which thermodynamic variables (in addition to the mass -fractions) are used as input. EOS input options are listed -in table \ `[arch:table:eosinput] <#arch:table:eosinput>`__. - -.. table:: [arch:table:eosinput] EOS input flags - - +--------------+-------------------------+ - | key | input quantities | - +==============+=========================+ - | eos_input_rt | :math:`\rho`, :math:`T` | - +--------------+-------------------------+ - | eos_input_rh | :math:`\rho`, :math:`h` | - +--------------+-------------------------+ - | eos_input_tp | :math:`T`, :math:`p` | - +--------------+-------------------------+ - | eos_input_rp | :math:`\rho`, :math:`p` | - +--------------+-------------------------+ - | eos_input_re | :math:`\rho`, :math:`e` | - +--------------+-------------------------+ - | eos_input_ps | :math:`p`, :math:`s` | - +--------------+-------------------------+ - -fill_3d_module --------------- - -The fill_3d_module provides routines that map from the 1D -base state to the full Cartesian 2- or 3D state. Variations in the -routines allow for cell-centered or edge-centered data on either the -base state or full Cartesian state. - -fundamental_constants_module ----------------------------- - -The fundamental_constants_module provides a simple list of -various fundamental constants (e.g. Newton’s gravitational constant) -in CGS units. - -geometry --------- - -network -------- - -The network module defines the number species advected by the -code (nspec), their ordering, and gives their basic properties -(like atomic number, :math:`Z`, and atomic mass, :math:`A`). All MAESTROeX problems -require a network module, even if there are no reactions -modeled. Many different reaction modules (containing different sets -of isotopes) exist in Microphysics/networks. The particular network -used by a problem is defined in the problem’s GNUmakefile. - -To find the location of a particular species (for instance, “carbon-12”) -in the allowed range of 1:nspec, you do the following query: - -:: - - ic12 = network_species_index("carbon-12") - -If the resulting index is -1, then the requested species was not -found. - -.. _sec:probin: - -probin_module -------------- - -probin_module provides access to the runtime parameters. -The runtime parameters appear simply as module variables. To get the -value of a parameter, one simply needs to ‘use probin_module’. -The preferred method is to add the ‘only’ clause to the -use statement and explicitly list only those parameters that -are used in the routine. Defining new runtime parameters is -described in § \ `3.2 <#sec:def_runtime_param>`__. - -.. _sec:variables_module: - -variables ---------- - -The variables module provides integer keys to index the state -multifabs and other arrays dealing with the scalar quantities. The -most commonly used keys are are list in table \ `[arch:table:variables] <#arch:table:variables>`__. - -.. table:: [arch:table:variables] Common variables module keys - - +-----------+------------------------------------------------------------+ - | rho_comp | density | - +-----------+------------------------------------------------------------+ - | rhoh_comp | density :math:`\times` specific enthalpy, :math:`(\rho h)` | - +-----------+------------------------------------------------------------+ - | spec_comp | first species partial density, :math:`(\rho X_1)` | - +-----------+------------------------------------------------------------+ - | temp_comp | temperature | - +-----------+------------------------------------------------------------+ - -The species indices are contiguous in the state array, spanning -spec_comp:spec_comp-1+nspec. To find a particular species, a -query can be made through the network module, such as: - -:: - - ic12 = network_species_index("carbon-12") - -and then the fab can be indexed using spec_comp-1+ic12 to -get “carbon-12”. -The variables module also provides keys for the plotfile -variables and boundary condition types. - -Other keys in the variables modules are reserved for boundary -conditions (foextrap_comp and hoextap_comp), the -projection of the pressure (press_comp), or constructing -the plotfile. - -AMReX Helper Modules -==================== - -There are a large number of modules in amrex/ that provide -the core functionality for managing grids. Here we describe -the most popular such modules. - -amrex_constants ------------- - -This module provides descriptive names for a number of common double precision -numbers, e.g. ONE = 1.d0. This enhances the readability of -the code. - -parallel --------- - -All MPI calls are wrapped by functions in the parallel module. For -serial jobs, the wrappers simply do the requested operation on processor. -By wrapping the calls, we can easily switch between serial and parallel -builds. - -[sec:example] Example: Accessing State and MAC Data -=================================================== - -In MAESTROeX, the state data is stored in a cell-centered multifab array -(the array index refers to the AMR level) and the MAC velocities are -stored in a 2D nodal multifab array (with indices referring to the AMR -level and the velocity component). Here we demonstrate a typical way -to extract the state and MAC velocity data. - -All MAESTROeX routines are contained in a module, to allow for compile-time -argument checking. - -:: - - module example_module - - contains - -The main interface to our routine is called example—this will -take the multifabs containing the data and then pass them to the -work routines, example_2d or example_3d, depending on -the dimensionality. - -:: - - subroutine example(mla,s,umac,dx,dt) - - use multifab_module - use ml_layout_module - use variables, only: rho_comp - -Here, the -multifab_module defines -the multifab data type. The ml_layout_module defines the -datatype for a ml_layout—many routines will take an ml_layoutto -allow us to fill ghostcells. The variables module is a -MAESTROeX module that provides integer keys for indexing the state -arrays. In this case the integer rho_comp refers to the -location in the state array corresponding to density. - -Next we declare the subroutine arguments: - -:: - - type(ml_layout) , intent(in ) :: mla - type(multifab) , intent(inout) :: s(:) - type(multifab) , intent(inout) :: umac(:,:) - double precision, intent(in ) :: dx(:,:),dt - -Here, s(:) is our multifab array that holds the state data. -with the array index in s refers to the AMR level. The MAC -velocities are held in the multifab umac, with the array -indices referring to the AMR level and the component. - -Local variable declarations come next: - -:: - - ! Local variables - double precision, pointer :: sp(:,:,:,:) - double precision, pointer :: ump(:,:,:,:), vmp(:,:,:,:), wmp(:,:,:,:) - integer :: i,n,dm,nlevs,ng_sp,ng_um - integer :: lo(mla%dim),hi(mla%dim) - -Amongst the local variables we define here are a pointer, -sp, that will point to a single fab from the -multifab s, and a pointer for each component of the MAC -velocity, ump, vmp, and wmp (for a 2D run, -we won’t use wmp). We note that regardless of the dimensionality, -these pointers are 4-dimensional: 3 spatial + 1 component. - -Next we get the dimensionality and number of levels - -:: - - dm = mla%dim - nlevs = mla%nlevel - -Each multifab can have their own number of ghostcells, so we get -these next: - -:: - - ng_sp = nghost(s(1)) - ng_um = nghost(umac(1,1)) - -By convention, all levels in a given multifab have the same number of -ghostcells, so we use level 1 in the nghost() call. We also use -the same number of ghostcells for each component of the velocity, so -we only need to consider the first component in the nghost() -call. The ghostcells will be needed to access the data stored in the -fabs. - -To access the data, we loop over all the levels, and all the boxes in -the given level. - -:: - - do n=1,nlevs - do i = 1, nfabs(s(n)) - -nfabs(s(n)) is simply the number of boxes in level n on -the current processor. Each processor knows which fabs in its -multifabare local to that processor, and this loop will only loop -over those. - -For a given box, we get the data and the bounds of the box. - -:: - - sp => dataptr(s(n), i) - ump => dataptr(umac(n,1),i) - vmp => dataptr(umac(n,2),i) - lo = lwb(get_box(s(n), i)) - hi = upb(get_box(s(n), i)) - -The actual data array is accessed through the dataptr function, -which takes a multifab (e.g. s(n)) and the index of the -box (i) we want. We see that the :math:`x` MAC velocity for the -current box is stored in ump and the :math:`y` MAC velocity is stored -in vmp. We don’t get the :math:`z` velocity data here, since that -would not be available for a 2D run—we defer that until we test on -the dimensionality below. - -Finally, the index bounds of the box (just the data, not the ghostcells) are -stored in the dm-dimensional arrays lo and hi. These indices -refer to the current box, and hold for both the state, sp, and the MAC -velocity, ump and vmp. However, since the MAC velocity is nodal -in the component direction, the loops over the valid data will differ -slight (as we see below). - -With the data extracted, we call a subroutine to operate on it. We use -different subroutines for the different dimensionalities (and many times -have a separate routine for spherical geometries). - -:: - - select case (dm) - case (2) - call example_2d(sp(:,:,1,rho_comp),ng_sp, & - ump(:,:,1,1),vmp(:,:,1,1),ng_um, & - lo,hi,dx(n,:),dt) - case (3) - wmp => dataptr(umac(n,3),i) - call example_3d(sp(:,:,:,rho_comp),ng_sp, & - ump(:,:,:,1),vmp(:,:,:,1),wmp(:,:,:,1),ng_um, & - lo,hi,dx(n,:),dt) - end select - enddo ! end loop over boxes - - enddo ! end loop over levels - - end subroutine example - -We call either the function -example_2d for two-dimensional data or example_3d -for three-dimensional data. Note that in the two-dimensional -case, we index the data as sp(:,:,1,rho_comp). Here a -‘1’ is used as the ‘z’-coordinate spatial index, since this -is a 2D problem, and the density component of the state is selected -(using the integer key rho_comp). The 3D version accesses -the data as sp(:,:,:,rho_comp)—only the component regarding -the variable is needed here. Notice that we also pass through -the number of ghostcells for each of the quantities. - -This routine will be supplimented with example_2d and -example_3d, which actually operate on the data. The form of -the 2D function is: - -:: - - subroutine example_2d(density,ng_sp, & - umac,vmac,ng_um, & - lo,hi,dx,dt) - - use amrex_constants_module - use probin_module, only: prob_lo - - integer , intent(in) :: lo(:),hi(:), ng_sp, ng_um - double precision, intent(in) :: density(lo(1)-ng_sp:,lo(2)-ng_sp:) - double precision, intent(in) :: umac(lo(1)-ng_um:,lo(2)-ng_um:) - double precision, intent(in) :: vmac(lo(1)-ng_um:,lo(2)-ng_um:) - - double precision, intent(in) :: dx(:),dt - - integer :: i, j - double precision :: x, y - double precision :: dens, u, v - - do j = lo(2), hi(2) - y = prob_lo(2) + (dble(j) + HALF)*dx(2) - - do i = lo(1), hi(1) - x = prob_lo(1) + (dble(i) + HALF)*dx(1) - - dens = density(i,j) - - ! compute cell-centered velocity - u = HALF*(umac(i,j) + umac(i+1,j)) - v = HALF*(vmac(i,j) + vmac(i,j+1)) - - ! operate on the data - ! ... - - enddo - enddo - - end subroutine example_2d - - end module example_module - -In this function, the bounds of the density array take -into account the ng_sp ghostcells and the index space of the -current box. Likewise, the MAC velocities refer to the ng_um -ghostcells. The j and i loops loop over all the valid -zones. Coordinate information is computed from dx and -prob_lo which is the physical lower bound of the domain. -amrex_constants_module declares useful double-precision -constants, like HALF (0.5). Here, we see how to access the -density for the current zone and compute the cell-centered velocities -from the MAC velocities. By convection, for a nodal array, the -indices refer to the *lower* interface in the nodal direction, so -for umac, umac(i,j) and umac(i+1,j) are the :math:`x` MAC -velocities on the lower and upper edge of the zone in the -:math:`x`-direction. - -The three-dimensional case is similar, with the density array -declared as - -:: - - density(lo(1)-ng_sp:,lo(2)-ng_sp:,lo(3)-ng_sp:) - -and an additional loop over the ‘z’ coordinate (from lo(3) to -hi(3)). - -In this example, we looped over the valid zones. If we wished to loop -over the interfaces bounding the valid zones, in the :math:`x`-direction, -we would loop as - -:: - - do j = lo(2), hi(2) - do i = lo(1), hi(1)+1 - ! access umac(i,j) - enddo - enddo - -Filling Ghostcells -================== - -Ghostcells are filled through a variety of different routines, depending -on the objective. - -- multifab_fill_boundary fills ghost cells for two - adjacent grids at the same level, which als includes periodic domain - boundary ghost cells. - -- multifab_physbc fills ghostcells at the physical boundaries. - -- multifab_fill_ghost_cells is used for multilevel - problems, and fills ghostcells in the finer grid (level n) by - interpolating from data in the coarser grid (level n-1). - This function, by default, will also call multifab_fill_boundary - and multifab_physbc for both levels n and n-1 (you - can override this behavior for speed optimization purposes). - This call is usually preceded by a call to - ml_cc_restriction_c which sets the level n-1 data to be - the average of the level n data covering it. - -You generally won’t see calls in the MAESTROeX source code to these subroutines, -as there is now a special AMReX subroutine, ml_restrict_and_fill, -that takes an array of multifabs at different level, and in order calls: -(1) ml_cc_restriction_c, (2) multifab_fill_boundary, -(3) multifab_physbc, and (4) multifab_fill_ghost_cells. -These four subroutines are called in such a way to avoid extra -ghostcell filling, saving on communication time. You can specify the -starting component, starting boundary condition component, -the number of components, the number of ghost cells, -and whether or not you want to use the same boundary condition component -for all variables. - -.. _sec:arch:bcs: - -Boundary Conditions -=================== - -When MAESTROeX is run, the boundary condition parameters are read in -from the input file and used to build the bc_tower. The -bc_tower consists of a bc_level object for each level of resolution -in the simulation. The bc_level contains 3 different descriptions of -the boundary conditions for each box in the domain at that level of -refinement: phys_bc_level_array, adv_bc_level_array, -and ell_bc_level_array. In all cases, the boundary -conditions are specified via integer values that are defined in -bc_module (part of AMReX). - -Each level has a phys_bc_level_array(0:ngrids,dim,2) array, -where ngrids is the number of boxes on that level, dim is -the coordinate direction, and the last index refers to the lower (1) -or upper (2) edge of the box in the given coordinate direction. This -stores the *physical desciption* of the boundary type (outlet, inlet, -slipwall, etc.)—this description is independent of the variables -that live on the grid. The phys_bc_level_array(0,:,:) ‘box’ -refers to the entire domain. If an edge of a box is not on a physical -boundary, then it is set to a default value (typically -INTERIOR). These boundary condition types are used to interpret -the actual method to fill the ghostcells for each variable, as -described in adv_bc_level_array and -ell_bc_level_array. - -Whereas phys_bc_level_array provides a physical description -of the type of boundary, the array adv_bc_level_array -describes the *action* taken (e.g. reflect, extrapolate, etc.) -for each variable when filling boundaries. -adv_bc_level_array specifically describes the boundary -conditions that are in play for the advection (hyperbolic) equations. -The form of this array is -adv_bc_level_array(0:ngrids,dim,2,nvar) where the additional -component, nvar, allows for each state variable that lives on a -grid to have different boundary condition actions associated with it. -The convention is that the first dm variables in bc_level(where dm is -the dimensionality of the simulation) refer to the -velocity components, and the subsequent slots are for the other -standard variables described in the variables_module. For -instance, to reference the boundary condition for density, one would -index with dm+rho_comp. For temporary variables that are -created on the fly in the various routines in MAESTROeX there may not -be a variable name in variables_module that describes the -temporary variable. In this case, the special variables -foextrap_comp and hoextrap_comp (first-order and high-order -extrapolation) are used. - -ell_bc_level_array is the analog to -adv_bc_level_array for the elliptic solves in MAESTROeX. This -will come into play in the multigrid portions of the code. The -actions that are used for ell_bc_level_array are either -Dirichlet or Neumann boundary condtions. For the velocity -projections, we are dealing with a pressure-like quantity, :math:`\phi`, so -the pressure boundary conditions here reflect the behavior we want for -the velocity. After the projection, it is :math:`\nabla \phi` that modifies -the velocity field. At a wall or for inflow conditions, we already -have the velocity we want at the boundary, so we want the velocity to -remain unchanged after the projection. This requires :math:`d\phi/dn=0` on -those boundaries. For outflow, we impose a condition that we do not -want the boundaries to introduce any tangental acceleration (or -shear), this is equivalent to setting :math:`\phi = 0` (then :math:`\partial -\phi/\partial t = 0`, with :math:`t` meaning ‘tangental’). This allows the -velocity to adjust as needed to the domain (see, for example, -:raw-latex:`\cite{almgrenBellSzymczak:1996}`). - -The actual filling of the ghostcells according to the descriptions -contained in the bc_tower is carried out by the multifab_physbc routine. When you have an EXT_DIR -condition in multifab_physbc (an specified in the inputs file -as inlet), the advection solver (via the slope routine) and -linear solvers will then assume that the value in the ghost cells is -equal to the value that actually lies on the wall. - -Multigrid -========= - -MAESTROeX uses the multigrid solver to enforce the divergence -constraint both on the half-time edge-centered advective velocities -(the “MAC projection”) and on the final cell-centered velocities -(the “HG projection”). For the MAC projection, since the velocity -data is edge-centered (the MAC grid), the projection is cell-centered. -For the HG projection, since the velocity data is cell-centered, the -projection is node-centered. The -multigrid solver performs a number of V-cycles until the residual -drops by 10-12 orders-of-magnitude. There are several options that -affect how the multigrid solver behaves, which we describe below. -More detail on the multigrid solvers is given in Chapter \ `[ch:mg] <#ch:mg>`__. - -Multilevel and Refinement Criteria -================================== - -.. _arch:sec:particles: - -Particles -========= - -MAESTROeX has support for Lagrangian particles that are passively -advected with the velocity field. These are useful for diagnostics -and post-processing. To use particles, particles must be seeded into -the domain by writing a problem-specific init_particles.f90 -routine. This routine is called at the start of the simulation. The -init_particles routines add particles at specific locations by -calling the particle_module’s add routine when a given -criteria is met by the fluid state. - -When you run the code, particles are enabled by setting -use_particles = T. At the end of each timestep the locations of -all the particles are written out into a series of files called -timestamp_NN, where NN is the CPU number on which the -particle *currently* resides. Particles are always kept on the -processor containing the state data corresponding to their present -location. Several bits of associated data (density, temperature, and -mass fractions) are stored along with the particle ID and position. - -Some simple python scripts allow for the plotting of the particle -positions. See § \ `[analysis:sec:particles] <#analysis:sec:particles>`__ for details. - -Regression Testing -================== - -There is an extensive regression test suite for AMReX that works with -MAESTROeX. Full details, and a sample MAESTROeX configuration file are -provided in the AMReX User’s Guide and source. - -.. [1] - Spherical geometry - only exists for 3-d. This is a design decision—convection is 3-d. - You can however run as an octant - -.. [2] - Note: many more compatible routines are available in the separate Microphysics git repo - -.. [3] - Microphysics is - available at https://github.com/starkiller-astro/Microphysics. MAESTROeX will - find it via the MICROPHYSICS_HOME environment variable - -.. [4] - Note: an older interface to the EOS exists, but is - deprecated. In this mode, the eos_old_interface module declares - the variables that need appear in the old-style eos call - argument list. MAESTROeX routines use these module variables in the - EOS call to avoid having to declare each quantity in each routine - that calls the EOS. Most code has been updated to use the new interface. - -.. |[fig:base_state] MAESTROeX geometries, showing both the -1D base state and the full Cartesian state. (Left) For multi-level -problems in planar geometry, we force a direct alignment between the -radial array cell centers and the Cartesian grid cell centers by -allowing the radial base state spacing to change with space and -time. (Right) For multi-level problems in spherical geometry, since -there is no direct alignment between the radial array cell centers -and the Cartesian grid cell centers, we choose to fix the radial -base state spacing across levels. Figure taken -from :raw-latex:`\cite{multilevel}`.| image:: \archfigpath/base_grid - :height: 2in -.. |[fig:base_state] MAESTROeX geometries, showing both the -1D base state and the full Cartesian state. (Left) For multi-level -problems in planar geometry, we force a direct alignment between the -radial array cell centers and the Cartesian grid cell centers by -allowing the radial base state spacing to change with space and -time. (Right) For multi-level problems in spherical geometry, since -there is no direct alignment between the radial array cell centers -and the Cartesian grid cell centers, we choose to fix the radial -base state spacing across levels. Figure taken -from :raw-latex:`\cite{multilevel}`.| image:: \archfigpath/base_spherical - :height: 2in diff --git a/sphinx_docs/source/flowchart.rst b/sphinx_docs/source/flowchart.rst index 098a5bbd1..c14ff8abf 100644 --- a/sphinx_docs/source/flowchart.rst +++ b/sphinx_docs/source/flowchart.rst @@ -166,7 +166,7 @@ are energy sources due to reactions and user-defined external heating. When we are using thermal diffusion, there will be an additional term in the enthalpy equation (see § :ref:`sec:flow:diffusion`). -In the original temporal scheme, we utlized a base state enthlpy that effectively +In the original temporal scheme, we utilized a base state enthlpy that effectively represents the average over a layer; its evolution equation can be found by laterally averaging :eq:`eq:flow:enthalpy` diff --git a/sphinx_docs/source/mg.rst b/sphinx_docs/source/mg.rst index aa57dfef3..b8af0df65 100644 --- a/sphinx_docs/source/mg.rst +++ b/sphinx_docs/source/mg.rst @@ -24,8 +24,8 @@ V-cycle. The MG solvers are located in amrex/Src/LinearSolvers/F_MG/. There are two MG solvers, for cell-centered and nodal data. Generally, the routines specific to the cell-centered solver will have -cc in their name, and those specific to the nodal solver will have -nd in their name. +``cc`` in their name, and those specific to the nodal solver will have +``nd`` in their name. Support for :math:`\Delta x \ne \Delta y \ne \Delta z` @@ -241,7 +241,7 @@ The allowed values are: - mg_bottom_solver / hg_bottom_solver = 4: a special bottom solver that extends the range of the multigrid coarsening - by aggregrating coarse grids on the original mesh together and + by aggregating coarse grids on the original mesh together and further coarsening. You should use the special bottom solver (4) whenever possible, even diff --git a/sphinx_docs/source/sdc.rst.old b/sphinx_docs/source/sdc.rst.old deleted file mode 100644 index 9625b5f4e..000000000 --- a/sphinx_docs/source/sdc.rst.old +++ /dev/null @@ -1,533 +0,0 @@ -***************************** -Spectral Deferred Corrections -***************************** - -SDC Overview -============ - -Spectral deferred corrections (SDC) is an iterative scheme used to integrate -the thermodynamic variables, :math:`(\rho X_k,\rho h)`, over a time step. It has -been shown to be more accurate and efficient than Strang splitting in a -terrestrial, non-stratified, low Mach number reacting flow solver :raw-latex:`\cite{Non11}`, -so we would like to develop an SDC version of MAESTRO. - -MAESTRO integrates the following system of equations: - -.. math:: - - \begin{aligned} - \frac{\partial\Ub}{\partial t} &=& - -\Ub\cdot\nabla\Ub - \frac{1}{\rho}\nabla\pi - - \frac{\rho-\rho_0}{\rho} g\eb_r,\label{eq:momentum}\\ - \frac{\partial(\rho X_k)}{\partial t} &=& - -\nabla\cdot(\rho X_k\Ub) + \rho\omegadot_k,\label{eq:species}\\ - \frac{\partial(\rho h)}{\partial t} &=& - -\nabla\cdot(\rho h\Ub) + \frac{Dp_0}{Dt} - + \rho\Hnuc + \nabla\cdot k_{\rm th}\nabla T,\label{eq:enthalpy}\end{aligned} - -together with base state evolution equations and a constraint equation. -By default, MAESTRO advances the thermodynamic variables by coupling -the different physical processes together (advection, diffusion, reactions) using -Strang splitting. Specifically, we integrate the reaction terms -over half a time step ignoring contributions from advection and diffusion, -then from this intermediate state we integrate the advection and diffusion terms over -a full time step (while ignoring reactions), and finally -integrate the reactions over a half time step (ignoring advection and diffusion). -For problems where -the reactions and/or diffusion greatly alter the energy balance or composition -as compared to advection, this operator splitting approach can lead to large -splitting errors and highly inaccurate solutions. -This issue is can be particularly exasperating -for low Mach number methods that can take large advection-based time steps. - -An alternate approach to advancing the thermodynamic variables is SDC. -SDC is an iterative approach to couple the various processes -together, with each process seeing an -approximation of the other processes as a source term. The SDC -algorithm converges to an integral representation of the solution in -time that couples all of the processes together in a self-consistent -fashion, see :raw-latex:`\cite{Non11}`. - -As a first attempt, we will work on coupling advection and reactions only -via SDC, with a base state that is fixed in time, but not space. - -Strang-Splitting Without Thermal Diffusion or Base State Evolution -================================================================== - -In the Strang splitting version of MAESTRO, the reaction and advection -processes operate independent of one-another. The species and -enthalpy equations are integrated over :math:`\Delta t` using the following -sequence: - -- React for :math:`\Delta t/2`: - - In the Strang splitting version of MAESTRO, the reaction network solves just - the reaction portion of the species evolution equations along with a - temperature evolution equation. This is done using a standard stiff ODE solver - package (like VODE) on the system: - - .. math:: - - \begin{aligned} - \frac{dX_k}{dt} &=& \omegadot_k(\rho,X_k,T), \\ - \frac{dT}{dt} &=& - \frac{1}{c_p} \left ( -\sum_k \xi_k \omegadot_k + \Hnuc \right ).\end{aligned} - - Here, :math:`T` is evolved solely to evaluate the reaction rates, - :math:`\omegadot_k(\rho,X_k,T)`. Furthermore, we simplify the problem - “freezing” the thermodynamics—i.e., :math:`c_p` and :math:`\xi_k` are evaluated at the - start of the integration and held constant thereafter. - The density remains constant during this step, i.e., - :math:`\rho^\mathrm{new} = \rho^\mathrm{old}`, and we - update the enthalpy at the end of the integration as: - - .. math:: h^\mathrm{new} = h^\mathrm{old} + \frac{\Delta t}{2} \Hnuc. - -- Advect for :math:`\Delta t`: - - Beginning with the results from the previous reaction half time step, we integrate - that state using the equations - - .. math:: - - \begin{aligned} - \frac{\partial(\rho X_k)}{\partial t} &=& - -\nabla\cdot(\rho X_k\Ub), \\ - \frac{\partial(\rho h)}{\partial t} &=& - -\nabla\cdot(\rho h\Ub) + \frac{Dp_0}{Dt}.\end{aligned} - - Note that no reaction terms appear here. Since the advection - takes place using the state updated from the reaction step, the effect - of the reactions is implicitly contained in the advective update. - -- React for :math:`\Delta t/2`: - - Finally, we react again, starting with the state left by the advection - step. - -Note that MAESTRO uses a predictor-corrector approach. After integrating :math:`(\rho X_k,\rho h)` over -the time step, we use this time-advanced state to get a better estimate of a time-centered :math:`\beta_0` -and :math:`S`. We re-compute the advective velocities using an updated divergence constraint and repeat -the thermodynamic variable advance. - -SDC Without Thermal Diffusion or Base State Evolution -===================================================== - -In the SDC version, the VODE integration at the end of an SDC -iteration is responsible for updating all the thermodynamic quantities -including both the advection (incorporated via a piecewise constant advective -flux divergence source term) and the reactions. This provides a much stronger coupling between -the physical processes. In particular, our system now looks like: - -.. math:: - - \begin{aligned} - \frac{d(\rho X_k)}{dt} &=& \rho \omegadot_k(\rho,X_k,T) - \underbrace{\nabla\cdot(\rho X_k\Ub)}_{A_{\rho X_k}}\label{eq:sdc:rhoX} \\ - \frac{d(\rho h)}{dt} &=& \rho \Hnuc - \underbrace{\nabla\cdot(\rho h\Ub) + \frac{Dp_0}{Dt}}_{A_{\rho h}} \label{eq:sdc:rhoh}\end{aligned} - -Here, :math:`A_{\rho X_k}` and :math:`A_{\rho h}` are piecewise-constant (in time) -approximations to the change in :math:`{\rho X_k}` and :math:`{\rho h}` (respectively) -due to the advection. These are constructed by calling density_advance -and enthalpy_advance in MAESTRO and passed into the network solver -during the reaction step. A flowchart of the MAESTRO SDC algorithm is -shown in figure \ `[fig:sdc:flowchart] <#fig:sdc:flowchart>`__. - -.. raw:: latex - - \centering - -.. figure:: \sdcfigpath/flowchart_SDC - :alt: [fig:sdc:flowchart] A flowchart of the MAESTRO SDC algorithm. The - thermodynamic state variables and local velocity are - indicated in each step. The base state is not shown as it is time-independent. - Red text indicates that quantity was - updated during that step. The predictor is - outlined by the dotted box. The blue text indicates state - variables that are the same in **Step 3** as they are in - **Step 1**, i.e., they are unchanged by the predictor steps. - The SDC loop is shown in the gray dotted box. - - [fig:sdc:flowchart] A flowchart of the MAESTRO SDC algorithm. The - thermodynamic state variables and local velocity are - indicated in each step. The base state is not shown as it is time-independent. - Red text indicates that quantity was - updated during that step. The predictor is - outlined by the dotted box. The blue text indicates state - variables that are the same in **Step 3** as they are in - **Step 1**, i.e., they are unchanged by the predictor steps. - The SDC loop is shown in the gray dotted box. - -Advective Update ----------------- - -In the advective update, our goal is to compute :math:`A_{\rho X_k}` and -:math:`A_{\rho h}`. These terms approximate the following: - -.. math:: - - \begin{aligned} - A_{\rho X_k} &=& \left [- \nabla \cdot (\rho X_k \Ub) \right ]^{n+1/2} \\ - A_{\rho h} &=& \left [- \nabla \cdot (\rho h \Ub) + \frac{Dp_0}{Dt} \right ]^{n+1/2}\end{aligned} - -The construction of the interface states used in the advective terms -uses either a time-lagged or iteratively-lagged approximation to the reaction -terms (:math:`I_{\rho X_k}` and :math:`I_{\rho h}`, see below) as a source term in the interface -prediction. This explicitly couples the reaction process to the -advection process. - -Final Update ------------- - -The RHS routine that the ODE solver operates on will first construct -the density as: - -.. math:: \rho = \sum_k (\rho X_k) - -It will then derive the temperature from the equation of state. If we -are running with use_tfromp = T, then we do - -.. math:: T = T(\rho, p_0, X_k) - -otherwise, we do - -.. math:: T = T(\rho, h, X_k) - -Note that in contrast to the Strang splitting version, here we call the EOS -every time we enter the RHS routine, but here we call the EOS to compute temperature -rather than thermodynamic coefficients. - -Finally we integrate the ODE system (Eqs. `[eq:sdc:rhoX] <#eq:sdc:rhoX>`__ and `[eq:sdc:rhoh] <#eq:sdc:rhoh>`__). -At the end of the integration, we define :math:`I_{\rho X_k}` and :math:`I_{\rho h}`. The actual -form of these depends on what quantities we predict to edges during -the construction of the advective fluxes. -Note that we only need :math:`I_{\rho X_k}` and :math:`I_{\rho h}` for the -prediction of the interface states, and not the VODE integration. -This is because all we need from the advection solver is the -approximation to :math:`A_{\rho X_k}` and :math:`A_{\rho h}` and not the final -updated state. - -Species Source Terms. ---------------------- - -For the species prediction, the form of :math:`I` depends on -species_pred_type (see §\ `[sec:pred:density] <#sec:pred:density>`__). -We note that there is no :math:`I` term for :math:`\rho` or :math:`\rho'` prediction, since -the density evolution equation does not have a reaction source term. - -- species_pred_type = 1 (predict_rhoprime_and_X) - or 3 (predict_rho_and_X) - - .. math:: - - I_{X_k} = \frac{1}{\rho^{n+\myhalf}} \left [ - \frac{(\rho X_k)^\mathrm{new} - - (\rho X_k)^\mathrm{old}}{\Delta t} - A_{\rho X_k} \right ]. - - (Andy’s Idea) Define :math:`I_{X_k}` using - - .. math:: I_{X_k} = \frac{X_k^\mathrm{new} - X_k^\mathrm{old}}{\Delta t} - A_{X_k}, - - where we first define a state that has only been updated with advection: - - .. math:: \frac{(\rho X_k)^{(1)} - (\rho X_k)^\mathrm{old}}{\Delta t} = A_{\rho X_k}, - - and then define the species mass fractions, - - .. math:: - - X_k^{(1)} = (\rho X_k)^{(1)} / \sum_k (\rho X_k)^{(1)}, \quad - X_k^\mathrm{old} = (\rho X_k)^\mathrm{old} / \sum_k (\rho X_k)^\mathrm{old}, \quad - X_k^\mathrm{new} = (\rho X_k)^\mathrm{new} / \sum_k (\rho X_k)^\mathrm{new}, - - and finally define :math:`A_{X_k}` using - - .. math:: \frac{X^{(1)} - X^\mathrm{old}}{\Delta t}= A_{X_k}. - -- species_pred_type = 2 (predict_rhoX) - - .. math:: - - I_{\rho X_k} = \frac{(\rho X_k)^\mathrm{new} - (\rho X_k)^\mathrm{old}}{\Delta t} - A_{\rho X_k}. - \label{eq:sdc:Irhoo} - -Enthalpy Source Terms. ----------------------- - -The appropriate constructions are: - -- enthalpy_pred_type = 0 (predict_rhoh) - - .. math:: I_{\rho h} = \frac{(\rho h)^{\rm new} - (\rho h)^{\rm old}}{\Delta t} - A_{\rho h}. - -- enthalpy_pred_type = 1 (predict_rhohprime, not implemented yet) - - (Andy’s Idea) Here we need an :math:`I_{\rho h}` term for the :math:`(\rho h)'` evolution - equation (see Eq. \ `[rhohprime equation] <#rhohprime equation>`__). In this case we will use - :math:`I_{(\rho h)'} = I_{\rho h}`. Since we are not evolving the base state, the PDE - for :math:`(\rho h)_0` is simply :math:`\partial(\rho h)_0/\partial t = 0`, and thus the - evolution equation for :math:`(\rho h)'` is the same as the evolution equation - for :math:`\rho h`. - - In the future, when we enable base state evolution, the base state enthalpy - evolution equation may need to know about the :math:`I_{\rho h}` source term. - In particular, should :math:`(\rho h)_0` see a :math:`\overline{(\rho \Hnuc)}` term? - what about an average thermal diffusion? - -- enthalpy_pred_type = 2 (predict_h ) - - This is the most straightforward prediction type. The SDC solver - integrates the equation for :math:`(\rho h)`: - - .. math:: \frac{\partial(\rho h)}{\partial t} = -\nabla\cdot(\rho h \Ub) + \frac{Dp_0}{Dt} + \rho H_{\rm nuc} - - (shown here without diffusion or external heat sources). Expanding - the time derivative and divergence, and using the continuity equation - we see: - - .. math:: \frac{\partial h}{\partial t} = -\Ub \cdot \nabla h + \frac{1}{\rho} \frac{Dp_0}{Dt} + \frac{1}{\rho} (\rho H_{\rm nuc}) \label{eq:sdc:h} - - Comparing these equations, we see that - - .. math:: - - I_{h} = \frac{1}{\rho^{n+\myhalf}} \left [ - \frac{(\rho h)^\mathrm{new} - (\rho h)^\mathrm{old}}{\Delta t} - A_{\rho h} \right ] - - (Andy’s Idea) Form :math:`I_h` in the same way we would form :math:`I_{X_k}` from above: - - .. math:: I_h = \frac{h^\mathrm{new} - h^\mathrm{old}}{\Delta t} - A_h, - - where we first define - - .. math:: \frac{(\rho h)^{(1)} - (\rho h)^\mathrm{old}}{\Delta t} = A_{\rho h}, - - and then define :math:`h`, - - .. math:: h^{(1)} = (\rho h)^{(1)} / \sum_k(\rho X_k)^{(1)}, \quad h^\mathrm{old} = (\rho h)^\mathrm{old} / \sum_k(\rho X_k)^\mathrm{old}, \quad h^\mathrm{new} = (\rho h)^\mathrm{new} / \sum_k(\rho X_k)^\mathrm{new}, - - and finally define :math:`A_h` using - - .. math:: I_h = \frac{h^{(1)} - h^\mathrm{old}}{\Delta t} = A_h. - -- enthalpy_pred_type = 3 (predict_T_then_rhoprime) or - enthalpy_pred_type = 4 (predict_T_then_h ) - - Both of these enthalpy_pred_types predict temperature. Expressing - :math:`h = h(p_0,T,X_k)` and differentiating along particle paths: - - .. math:: - - \begin{aligned} - \frac{Dh}{Dt} &=& \left . \frac{\partial h}{\partial T} \right |_{p,X_k} \frac{DT}{Dt} + - \left . \frac{\partial h}{\partial p} \right |_{T,X_k} \frac{Dp_0}{Dt} + - \sum_k \left . \frac{\partial h}{\partial X_k} \right |_{p,T} \frac{DX_k}{Dt} \\ - &=& c_p \frac{DT}{Dt} + h_p \frac{Dp_0}{Dt} + \sum_k \xi_k \omegadot_k\end{aligned} - - where :math:`c_p`, :math:`h_p`, and :math:`\xi_k` are as defined in the table of symbols - (Table `[table:sym] <#table:sym>`__), and we substitute :math:`DX_k/Dt = \omegadot_k` (from the species - continuity equation, Eq. \ `[species equation] <#species equation>`__). Using Eq. \ `[eq:sdc:h] <#eq:sdc:h>`__, we have - the familiar temperature evolution equation: - - .. math:: \rho c_p \frac{DT}{Dt} = \underbrace{(1 - \rho h_p) \frac{Dp_0}{Dt}}_{\begin{smallmatrix}\text{already~accounted~for} \\ \text{in~T~prediction}\end{smallmatrix}} - \sum_k \xi_k \rho \omegadot_k + \rho \Hnuc - - where the underbraced term is already present in mktempforce. Recognizing that - Eq. \ `[eq:sdc:Irhoh] <#eq:sdc:Irhoh>`__ is the SDC approximation to :math:`(\rho \Hnuc)` and Eq. \ `[eq:sdc:Irhoo] <#eq:sdc:Irhoo>`__ is the - SDC approximation to :math:`(\rho \omegadot_k)`, we can define - - .. math:: - - I_T = \frac{1}{\rho^{n+\myhalf} c_p^{n+\myhalf}} \left \{ - \left [ \frac{(\rho h)^\mathrm{new} - (\rho h)^\mathrm{old}}{\Delta t} - A_{\rho h} \right ] - - \sum_k \xi_k^{n+\myhalf} \left [ \frac{(\rho X_k)^\mathrm{new} - - (\rho X_k)^\mathrm{old}}{\Delta t} - A_{\rho X_k} \right ] \right \} - - (Andy’s Idea) The idea is to advance the species and enthalpy with advection - terms only, and compute the resulting temperature, :math:`T^{(1)}`. Compare that temperature - with the final temperature computed by the SDC VODE call. The difference - between these values is :math:`I_T`. - - .. math:: I_T = \frac{T^\mathrm{new} - T^\mathrm{old}}{\Delta t} - A_T, - - with :math:`A_T` given by - - .. math:: \frac{T^{(1)} - T^\mathrm{old}}{\Delta t} = A_T, - - and :math:`T^{(1)}` computed using the equation of state from :math:`\rho^{(1)}, X_k^{(1)}`, - and :math:`h^{(1)}` (or :math:`p_0`, if use_tfromp = T). - -Implementation --------------- - -This is done in advance.f90 just after the call to react_state, -stored in the multifab called intra. -These terms are used as the source terms for the -advection step in the next SDC iteration. - -Summary of Changes ------------------- - -The major changes from the non-SDC-enabled burners is the addition of -the advective terms to the system of ODEs, the fact that we integrate -:math:`(\rho X_k)` instead of just :math:`X_k`, integrate :math:`(\rho h)` instead -of :math:`T`, and the need to derive the -temperature from the input state for each RHS evaluation by VODE. - -Note also that the SDC integration by VODE does not operate on -the velocities at all. That update is handled in the same fashion -as the Strang splitting version of the code. - -The ignition_simple_SDC burner shows how to setup the system -for use_tfromp = T or F. Presently, this implementation -does not support evolve_base_state = T (in particular, we -need to evolve :math:`p_0` in the RHS routine). - -Algorithm Flowchart - ADR with Fixed Base State -=============================================== - -We now include thermal diffusion and assume the base state is constant in time but not space: - -.. math:: - - \begin{aligned} - \frac{\partial\Ub}{\partial t} =& - -\Ub\cdot\nabla\Ub - \frac{1}{\rho}\nabla\pi - - \frac{\rho-\rho_0}{\rho} g\eb_r,\\ - \frac{\partial(\rho X_k)}{\partial t} =& - -\nabla\cdot(\rho X_k\Ub) + \rho\omegadot_k,\label{eq:sdc species}\\ - \frac{\partial(\rho h)}{\partial t} =& - -\nabla\cdot(\rho h\Ub) + \underbrace{\Ub\cdot\nabla p_0}_{Dp_0/Dt} - + \rho\Hnuc - + \underbrace{\nabla\cdot\frac{\kth}{c_p}\nabla h - \sum_k\nabla\cdot\frac{\xi_k k_{\rm th}}{c_p}\nabla X_k - \nabla\cdot\frac{h_p k_{\rm th}}{c_p}\nabla p_0}_{\nabla\cdot k_{\rm th}\nabla T}.\nonumber\\ - \label{eq:sdc enthalpy}\end{aligned} - -| The time-advancement is divided into three major steps. The first step is the predictor, where we integrate the thermodynamic variables, :math:`(\rho,\rho X_k,\rho h)`, over the full time step. The second step is corrector, where we use the results from the predictor to perform a more accurate temporal integration of the thermodynamic variables. The third step is the velocity and dynamic pressure update. -| **Step 1:** (*Compute advection velocities*) -| Use :math:`\Ub^n` and a second-order Godunov method to compute time-centered edge velocities, :math:`\uadvsdcstar`, with time-lagged dynamic pressure and explicit buoyancy as forcing terms. The :math:`\star` superscript indicates that this field does not satisfy the divergence constraint. Compute :math:`S^{n+\myhalf,{\rm pred}}` by extrapolating in time, - - .. math:: S^{n+\myhalf,{\rm pred}} = S^n + \frac{\Delta t^n}{2}\frac{S^n - S^{n-1}}{\Delta t^{n-1}}, - - and project :math:`\uadvsdcstar` to obtain :math:`\uadvsdcpred`, which satisfies - - .. math:: \nabla\cdot\left(\beta_0^n\uadvsdcpred\right) = S^{n+\myhalf,\rm{pred}}. - -| **Step 2:** (*Predictor*) -| In this step, we integrate :math:`(\rho, \rho X_k, \rho h)` over the full time step. The quantities :math:`(S, \beta_0, k_{\rm th}, c_p, \xi_k, h_p)^n` are computed from the the thermodynamic variables at :math:`t^n`. This step is divided into several sub-steps: -| **Step 2A:** (*Compute advective flux divergences*) -| Use :math:`\uadvsdcpred` and a second-order Godunov integrator to compute time-centered edge states, :math:`(\rho X_k, \rho h)^{n+\myhalf,(0)}`, with time-lagged reactions (:math:`I^{\rm lagged} = I^{(j_{\rm max})}` from the previous time step), explicit diffusion, and time-centered thermodynamic pressure as source terms. Define the advective flux divergences as - - .. math:: - - \begin{aligned} - A_{\rho X_k}^{(0)} &=& -\nabla\cdot\left[\left(\rho X_k\right)^{n+\myhalf,{(0)}}\uadvsdcpred\right],\\ - A_{\rho h}^{(0)} &=& -\nabla\cdot\left[\left(\rho h\right)^{n+\myhalf,(0)}\uadvsdcpred\right] + \uadvsdcpred\cdot\nabla p_0.\end{aligned} - - Next, use these fluxes to compute the time-advanced density, - - .. math:: \frac{\rho^{n+1} - \rho^n}{\Delta t} = \sum_k A_{\rho X_k}^{(0)}. - - Then, compute preliminary, time-advanced species using - - .. math:: \frac{\rho^{n+1}\widehat{X}_k^{n+1,(0)} - (\rho X_k)^n}{\Delta t} = A_{\rho X_k}^{(0)} + I_{\rho X_k}^{\rm lagged}.\label{eq:sdc species 2} - -| **Step 2B:** (*Compute diffusive flux divergence*) -| Solve a Crank-Nicolson-type diffusion equation for :math:`\widehat{h}^{n+1,(0)}`, using transport coefficients evaluated at :math:`t^n` everywhere, - - .. math:: - - \begin{aligned} - \frac{\rho^{n+1}\widehat{h}^{n+1,(0)} - (\rho h)^n}{\Delta t} =& A_{\rho h}^{(0)} + I_{\rho h}^{\rm lagged}\nonumber\\ - & + \half\left(\nabla\cdot\frac{\kth^n}{c_p^n}\nabla h^n + \nabla\cdot\frac{\kth^n}{c_p^n}\nabla \widehat{h}^{n+1,(0)}\right)\nonumber\\ - & - \half\left(\sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla X_k^n + \sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla\widehat{X}_k^{n+1,(0)}\right)\nonumber\\ - & - \half\left(\nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0 + \nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0\right),\label{eq:sdc enthalpy 2}\end{aligned} - - which is equivalent to - - .. math:: - - \begin{aligned} - \left(\rho^{n+1} - \frac{\Delta t}{2}\nabla\cdot\frac{k_{\rm th}^n}{c_p^n}\nabla\right)\widehat{h}^{n+1,(0)} =& (\rho h)^n + \Delta t\Bigg[A_{\rho h}^{(0)} + I_{\rho h}^{\rm lagged} + \left(\half\nabla\cdot\frac{\kth^n}{c_p^n}\nabla h^n\right)\nonumber\\ - &\hspace{0.85in} - \half\left(\sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla X_k^n + \sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla\widehat{X}_k^{n+1,(0)}\right)\nonumber\\ - &\hspace{0.85in} - \half\left(\nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0 + \nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0\right)\Bigg].\end{aligned} - -| **Step 2C:** (*Advance thermodynamic variables*) -| Define :math:`Q_{\rho X_k}^{(0)}` as the right hand side of (`[eq:sdc species 2] <#eq:sdc species 2>`__) without the :math:`I_{\rho X_k}^{\rm lagged}` term, and define :math:`Q_{\rho h}^{(0)}` as the right hand side of (`[eq:sdc enthalpy 2] <#eq:sdc enthalpy 2>`__) without the :math:`I_{\rho h}^{\rm lagged}` term. Use VODE to integerate (`[eq:sdc species] <#eq:sdc species>`__) and (`[eq:sdc enthalpy] <#eq:sdc enthalpy>`__) over :math:`\Delta t` to advance :math:`(\rho X_k, \rho h)^n` to :math:`(\rho X_k, \rho h)^{n+1,(0)}` using the piecewise-constant advection and diffusion source terms: - - .. math:: - - \begin{aligned} - \frac{\partial(\rho X_k)}{\partial t} &=& Q_{\rho X_k}^{(0)} + \rho\dot\omega_k\\ - \frac{\partial(\rho h)}{\partial t} &=& Q_{\rho h}^{(0)} + \rho\Hnuc.\end{aligned} - - At this point we can define :math:`I_{\rho X_k}^{(0)}` and :math:`I_{\rho h}^{(0)}`, or whatever term we need depending on our species and enthalpy edge state prediction types, for use in the corrector step. In our first implementation, we are predicting :math:`\rho X_k` and :math:`\rho h`, in which case we define: - - .. math:: - - \begin{aligned} - I_{\rho X_k}^{(0)} &=& \frac{(\rho X_k)^{n+1,(0)} - (\rho X_k)^n}{\Delta t} - Q_{\rho X_k}^{(0)}\\ - I_{\rho h}^{(0)} &=& \frac{(\rho h)^{n+1,(0)} - (\rho h)^n}{\Delta t} - Q_{\rho h}^{(0)}.\end{aligned} - -| **Step 3:** (*Update advection velocities*) -| First, compute :math:`S^{n+\myhalf}` and :math:`\beta_0^{n+\myhalf}` using - - .. math:: S^{n+\myhalf} = \frac{S^n + S^{n+1,(0)}}{2}, \qquad \beta_0^{n+\myhalf} = \frac{\beta_0^n + \beta_0^{n+1,(0)}}{2}. - - Then, project :math:`\uadvsdcstar` to obtain :math:`\uadvsdc`, which satisfies - - .. math:: \nabla\cdot\left(\beta_0^{n+\myhalf}\uadvsdc\right) = S^{n+\myhalf}. - -| **Step 4:** (*Corrector Loop*) -| We loop over this step from :math:`j=1,j_{\rm max}` times. In the corrector, we use the time-advanced state from the predictor to perform a more accurate integration of the thermodynamic variables. The quantities :math:`(S, \beta_0, k_{\rm th}, c_p, \xi_k, h_p)^{n+1,(j-1)}` are computed from :math:`(\rho,\rho X_k,\rho h)^{n+1,(j-1)}`. This step is divided into several sub-steps: -| **Step 4A:** (*Compute advective flux divergences*) -| Use :math:`\uadvsdc` and a second-order Godunov integrator to compute time-centered edge states, :math:`(\rho X_k, \rho h)^{n+\myhalf}`, with iteratively-lagged reactions (:math:`I^{(j-1)}`), explicit diffusion, and time-centered thermodynamic pressure as source terms. Define the advective flux divergences as - - .. math:: - - \begin{aligned} - A_{\rho X_k}^{(j)} &=& -\nabla\cdot\left[\left(\rho X_k\right)^{n+\myhalf,(j)}\uadvsdc\right],\\ - A_{\rho h}^{(j)} &=& -\nabla\cdot\left[\left(\rho h\right)^{n+\myhalf,(j)}\uadvsdc\right] + \uadvsdc\cdot\nabla p_0.\end{aligned} - - Then, compute preliminary, time-advanced species using - - .. math:: \frac{\rho^{n+1}\widehat{X}_k^{n+1,(j)} - (\rho X_k)^n}{\Delta t} = A_{\rho X_k}^{(j)} + I_{\rho X_k}^{(j-1)}.\label{eq:sdc species 3} - -| **Step 4B:** (*Compute diffusive flux divergence*) -| Solve a backward-Euler-type correction equation for :math:`\widehat{h}^{n+1,(j)}`, - - .. math:: - - \begin{aligned} - \frac{\rho^{n+1}\widehat{h}^{n+1,(j)} - (\rho h)^n}{\Delta t} =& A_{\rho h}^{(j)} + I_{\rho h}^{(j-1)}\nonumber\\ - & + \nabla\cdot\frac{\kth^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla\widehat{h}^{n+1,(j)} + \half\left(\nabla\cdot\frac{\kth^n}{c_p^n}\nabla h^n - \nabla\cdot\frac{\kth^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla h^{n+1,(j-1)}\right)\nonumber\\ - & - \half\left(\sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla X_k^n + \sum_k\nabla\cdot\frac{\xi_k^{n+1,(j-1)} k_{\rm th}^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla\widehat{X}_k^{n+1,(j)}\right)\nonumber\\ - & - \half\left(\nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0 + \nabla\cdot\frac{h_p^{n+1,(j-1)}k_{\rm th}^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla p_0\right),\label{eq:sdc enthalpy 3}\end{aligned} - - which is equivalent to - - .. math:: - - \begin{aligned} - \left(\rho^{n+1} - \Delta t\nabla\cdot\frac{\kth^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla\right)\widehat{h}^{n+1,(j)} =& (\rho h)^n + \Delta t\Bigg[A_{\rho h}^{(j)} + I_{\rho h}^{(j-1)} \nonumber\\ - & + \half\left(\nabla\cdot\frac{\kth^n}{c_p^n}\nabla h^n - \nabla\cdot\frac{\kth^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla h^{n+1,(j-1)}\right)\nonumber\\ - & - \half\left(\sum_k\nabla\cdot\frac{\xi_k^n k_{\rm th}^n}{c_p^n}\nabla X_k^n + \sum_k\nabla\cdot\frac{\xi_k^{n+1,(j-1)} k_{\rm th}^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla\widehat{X}_k^{n+1,(j)}\right)\nonumber\\ - & - \half\left(\nabla\cdot\frac{h_p^n k_{\rm th}^n}{c_p^n}\nabla p_0 + \nabla\cdot\frac{h_p^{n+1,(j-1)}k_{\rm th}^{n+1,(j-1)}}{c_p^{n+1,(j-1)}}\nabla p_0\right)\Bigg].\end{aligned} - -| **Step 4C:** (*Advance thermodynamic variables*) -| Define :math:`Q_{\rho X_k}^{(j)}` as the right hand side of (`[eq:sdc species 3] <#eq:sdc species 3>`__) without the :math:`I_{\rho X_k}^{(j-1)}` term, and define :math:`Q_{\rho h}^{(j)}` as the right hand side of (`[eq:sdc enthalpy 3] <#eq:sdc enthalpy 3>`__) without the :math:`I_{\rho h}^{(j-1)}` term. Use VODE to integerate (`[eq:sdc species] <#eq:sdc species>`__) and (`[eq:sdc enthalpy] <#eq:sdc enthalpy>`__) over :math:`\Delta t` to advance :math:`(\rho X_k, \rho h)^n` to :math:`(\rho X_k, \rho h)^{n+1,(j)}` using the piecewise-constant advection and diffusion source terms: - - .. math:: - - \begin{aligned} - \frac{\partial(\rho X_k)}{\partial t} &=& Q_{\rho X_k}^{(j)} + \rho\dot\omega_k\\ - \frac{\partial(\rho h)}{\partial t} &=& Q_{\rho h}^{(j)} + \rho\Hnuc.\end{aligned} - - At this point we can define :math:`I_{\rho X_k}^{(j)}`, :math:`I_{\rho h}^{(j)}`, and any other :math:`I` terms we need depending on - our species and enthalpy edge state prediction types, for use in the predictor in the next time step. In our first implementation, we are predicting :math:`\rho X_k` and :math:`\rho h`, in which case we define: - - .. math:: - - \begin{aligned} - I_{\rho X_k}^{(j)} &=& \frac{(\rho X_k)^{n+1,(j)} - (\rho X_k)^n}{\Delta t} - Q_{\rho X_k}^{(j)}\\ - I_{\rho h}^{(j)} &=& \frac{(\rho h)^{n+1,(j)} - (\rho h)^n}{\Delta t} - Q_{\rho h}^{(j)}.\end{aligned} - -| **Step 5:** (*Advance velocity and dynamic pressure*) -| Similar to the original MAESTRO algorithm, more to come. diff --git a/sphinx_docs/source/thermo_notes.rst b/sphinx_docs/source/thermo_notes.rst index bc55e2138..c33dbdf1f 100644 --- a/sphinx_docs/source/thermo_notes.rst +++ b/sphinx_docs/source/thermo_notes.rst @@ -268,7 +268,7 @@ i.e. (Note that :math:`\chi_{\bar{\mu}}` is negative, as pressure is inversely proportional to mass per particle, and :math:`\frac{d \ln \bar{\mu}}{d \ln P}` is positive, since -nuclear reactions sythesize more massive particles in the center of the star.) +nuclear reactions synthesize more massive particles in the center of the star.) In this case, when a rising parcel eventually reaches neutral buoyancy, it will have a temperature excess in comparison to it’s surroundings. If the parcel can retain