diff --git a/exec/dean_kow/GNUmakefile b/exec/dean_kow/GNUmakefile deleted file mode 100644 index 41b1f9645..000000000 --- a/exec/dean_kow/GNUmakefile +++ /dev/null @@ -1,39 +0,0 @@ -# AMREX_HOME defines the directory in which we will find all the AMReX code. -AMREX_HOME ?= ../../../amrex - -DEBUG = FALSE -USE_MPI = TRUE -USE_OMP = FALSE -COMP = gcc -DIM = 2 -MAX_SPEC = 8 - -USE_CUDA = FALSE -USE_HIP = FALSE -USE_DPCPP = FALSE - - -include $(AMREX_HOME)/Tools/GNUMake/Make.defs - - -include ../../src_deankow/Make.package -VPATH_LOCATIONS += ../../src_deankow -INCLUDE_LOCATIONS += ../../src_deankow - - -include ../../src_rng/Make.package -VPATH_LOCATIONS += ../../src_rng/ -INCLUDE_LOCATIONS += ../../src_rng/ - - -include ../../src_common/Make.package -VPATH_LOCATIONS += ../../src_common/ -INCLUDE_LOCATIONS += ../../src_common/ - - -include $(AMREX_HOME)/Src/Base/Make.package - -include $(AMREX_HOME)/Tools/GNUMake/Make.rules - -MAXSPECIES := $(strip $(MAX_SPEC)) -DEFINES += -DMAX_SPECIES=$(MAXSPECIES) diff --git a/exec/dean_kow/multilevel/GNUmakefile b/exec/dean_kow/multilevel/GNUmakefile new file mode 100644 index 000000000..19e549e3b --- /dev/null +++ b/exec/dean_kow/multilevel/GNUmakefile @@ -0,0 +1,23 @@ + +PRECISION = DOUBLE +PROFILE = FALSE + +DEBUG = TRUE +DEBUG = FALSE + +DIM = 2 + +MAX_SPEC = 8 + +COMP = gnu + +USE_MPI = TRUE +USE_OMP = FALSE +USE_CUDA = FALSE + +USE_PARTICLES = TRUE + +Bpack := ./Make.package +Blocs := . + +include Make.Adv diff --git a/exec/dean_kow/multilevel/Make.Adv b/exec/dean_kow/multilevel/Make.Adv new file mode 100644 index 000000000..e2e53cf6b --- /dev/null +++ b/exec/dean_kow/multilevel/Make.Adv @@ -0,0 +1,41 @@ +AMREX_HOME ?= /path/to/amrex + +EBASE := main + +BL_NO_FORT = TRUE + +include $(AMREX_HOME)/Tools/GNUMake/Make.defs + +SRC_DIR = ../../../src_deankow/multilevel +RNG_DIR = ../../../src_rng/ +COM_DIR = ../../../src_common/ + +include $(SRC_DIR)/Make.package +include $(SRC_DIR)/Src_K/Make.package +INCLUDE_LOCATIONS += $(SRC_DIR) $(SRC_DIR)/Src_K +VPATH_LOCATIONS += $(SRC_DIR) $(SRC_DIR)/Src_K + +include $(RNG_DIR)/Make.package +INCLUDE_LOCATIONS += $(RNG_DIR) +VPATH_LOCATIONS += $(RNG_DIR) + +include $(COM_DIR)/Make.package +INCLUDE_LOCATIONS += $(COM_DIR) +VPATH_LOCATIONS += $(COM_DIR) + +Pdirs := Base Boundary AmrCore +ifeq ($(USE_PARTICLES),TRUE) +Pdirs += Particle +endif + +Ppack += $(foreach dir, $(Pdirs), $(AMREX_HOME)/Src/$(dir)/Make.package) + +include $(Ppack) + +all: $(executable) + @echo SUCCESS + +include $(AMREX_HOME)/Tools/GNUMake/Make.rules + +MAXSPECIES := $(strip $(MAX_SPEC)) +DEFINES += -DMAX_SPECIES=$(MAXSPECIES) diff --git a/exec/dean_kow/multilevel/Make.package b/exec/dean_kow/multilevel/Make.package new file mode 100644 index 000000000..e69de29bb diff --git a/exec/dean_kow/multilevel/inputs b/exec/dean_kow/multilevel/inputs new file mode 100644 index 000000000..a17624212 --- /dev/null +++ b/exec/dean_kow/multilevel/inputs @@ -0,0 +1,92 @@ +# ***************************************************************** +# Run until nsteps == max_step or time == stop_time, +# whichever comes first +# ***************************************************************** +max_step = 2500 + +seed = 1024 + +amrex.fpe_trap_invalid = 1 + +# ***************************************************************** +# Specific to this application +# ***************************************************************** +npts_scale = 1000000. +alg_type = 0 + +# ***************************************************************** +# Are we restarting from an existing checkpoint file? +# ***************************************************************** +#amr.restart = chk00060 # restart from this checkpoint file + +# ***************************************************************** +# Problem size and geometry +# ***************************************************************** +geometry.prob_lo = 0.0 0.0 0.0 +geometry.prob_hi = 1.0 1.0 1.0 +geometry.is_periodic = 1 1 1 + +# ***************************************************************** +# VERBOSITY +# ***************************************************************** +amr.v = 1 # verbosity in Amr + +# ***************************************************************** +# Resolution and refinement +# ***************************************************************** + +amr.n_cell = 256 256 +amr.max_grid_size_x = 64 64 +amr.max_grid_size_y = 64 64 + +amr.max_level = 1 +amr.use_particles = 1 # Turn particles on or off +amr.ref_ratio = 1 + +#amr.max_level = 0 +#amr.use_particles = 0 # Turn particles on or off + +# ***************************************************************** +# Control of grid creation +# ***************************************************************** +# Blocking factor for grid creation in each dimension -- +# this ensures that every grid is coarsenable by a factor of 8 -- +# this is mostly relevant for multigrid performance +amr.blocking_factor_x = 16 +amr.blocking_factor_y = 16 +amr.blocking_factor_z = 8 + +amr.regrid_int =-2 # how often to regrid + +# ***************************************************************** +# Time step control +# ***************************************************************** +adv.cfl = 0.01 # CFL constraint +adv.cfl = 0.25 # CFL constraint + +# ***************************************************************** +# Should we reflux at coarse-fine boundaries? +# ***************************************************************** +adv.do_reflux = 1 + +# ***************************************************************** +# Tagging - if phi < phierr at level 0, then refine +# ***************************************************************** +adv.phierr = 10000 # this for 32x32 (5 * 32 * 32 = 5120) +adv.phierr = 100000 # this for 64x64 (5 * 64 * 64 = 20480) +adv.phierr = 160000 # this for 128x128 (5 * 128 * 128 = 81920) +adv.phierr = 400000 # this for 256x256 (5 * 256 * 256 = 327680) + +# ***************************************************************** +# Plotfile name and frequency +# ***************************************************************** +amr.plot_file = plt # root name of plot file +amr.plot_int = 10 # number of timesteps between plot files + # if negative then no plot files will be written + +# ***************************************************************** +# Checkpoint name and frequency +# ***************************************************************** +amr.chk_file = chk # root name of checkpoint file +amr.chk_int = -1 # number of timesteps between checkpoint files + # if negative then no checkpoint files will be written diff --git a/exec/dean_kow/multilevel/inputs_bug1 b/exec/dean_kow/multilevel/inputs_bug1 new file mode 100644 index 000000000..dba196d67 --- /dev/null +++ b/exec/dean_kow/multilevel/inputs_bug1 @@ -0,0 +1,98 @@ +# ***************************************************************** +# Run until nsteps == max_step or time == stop_time, +# whichever comes first +# ***************************************************************** +max_step = 2500 +max_step = 5 + +seed = 1024 + +amrex.fpe_trap_invalid = 1 + +# ***************************************************************** +# Specific to this application +# ***************************************************************** +npts_scale = 1000000. +alg_type = 0 + +# ***************************************************************** +# Are we restarting from an existing checkpoint file? +# ***************************************************************** +#amr.restart = chk00060 # restart from this checkpoint file + +# ***************************************************************** +# Problem size and geometry +# ***************************************************************** +geometry.prob_lo = 0.0 0.0 0.0 +geometry.prob_hi = 1.0 1.0 1.0 +geometry.is_periodic = 1 1 1 + +# ***************************************************************** +# VERBOSITY +# ***************************************************************** +amr.v = 1 # verbosity in Amr + +# ***************************************************************** +# Resolution and refinement +# ***************************************************************** + +amr.n_cell = 256 256 +amr.n_cell = 32 32 +amr.max_grid_size_x = 256 256 +amr.max_grid_size_y = 256 256 +amr.max_grid_size_x = 64 64 +amr.max_grid_size_y = 64 64 + +amr.max_level = 1 +amr.use_particles = 1 # Turn particles on or off +amr.ref_ratio = 1 + +#amr.max_level = 0 +#amr.use_particles = 0 # Turn particles on or off + +# ***************************************************************** +# Control of grid creation +# ***************************************************************** +# Blocking factor for grid creation in each dimension -- +# this ensures that every grid is coarsenable by a factor of 8 -- +# this is mostly relevant for multigrid performance +amr.blocking_factor_x = 16 +amr.blocking_factor_y = 16 +amr.blocking_factor_x = 8 +amr.blocking_factor_y = 8 +amr.blocking_factor_z = 8 + +amr.regrid_int = 2 # how often to regrid + +# ***************************************************************** +# Time step control +# ***************************************************************** +adv.cfl = 0.01 # CFL constraint +adv.cfl = 0.25 # CFL constraint + +# ***************************************************************** +# Should we reflux at coarse-fine boundaries? +# ***************************************************************** +adv.do_reflux = 1 + +# ***************************************************************** +# Tagging - if phi < phierr at level 0, then refine +# ***************************************************************** +adv.phierr = 100000 # this for 64x64 (5 * 64 * 64 = 20480) +adv.phierr = 160000 # this for 128x128 (5 * 128 * 128 = 81920) +adv.phierr = 400000 # this for 256x256 (5 * 256 * 256 = 327680) +adv.phierr = 10000 # this for 32x32 (5 * 32 * 32 = 5120) + +# ***************************************************************** +# Plotfile name and frequency +# ***************************************************************** +amr.plot_file = plt # root name of plot file +amr.plot_int = 1 # number of timesteps between plot files + # if negative then no plot files will be written + +# ***************************************************************** +# Checkpoint name and frequency +# ***************************************************************** +amr.chk_file = chk # root name of checkpoint file +amr.chk_int = -1 # number of timesteps between checkpoint files + # if negative then no checkpoint files will be written diff --git a/exec/dean_kow/multilevel/inputs_bug2 b/exec/dean_kow/multilevel/inputs_bug2 new file mode 100644 index 000000000..55f0d1a96 --- /dev/null +++ b/exec/dean_kow/multilevel/inputs_bug2 @@ -0,0 +1,96 @@ +# ***************************************************************** +# Run until nsteps == max_step or time == stop_time, +# whichever comes first +# ***************************************************************** +max_step = 2500 +max_step = 5 + +seed = 1024 + +amrex.fpe_trap_invalid = 1 + +# ***************************************************************** +# Specific to this application +# ***************************************************************** +npts_scale = 1000000. +alg_type = 0 + +# ***************************************************************** +# Are we restarting from an existing checkpoint file? +# ***************************************************************** +#amr.restart = chk00060 # restart from this checkpoint file + +# ***************************************************************** +# Problem size and geometry +# ***************************************************************** +geometry.prob_lo = 0.0 0.0 0.0 +geometry.prob_hi = 1.0 1.0 1.0 +geometry.is_periodic = 1 1 1 + +# ***************************************************************** +# VERBOSITY +# ***************************************************************** +amr.v = 1 # verbosity in Amr + +# ***************************************************************** +# Resolution and refinement +# ***************************************************************** + +amr.n_cell = 256 256 +amr.n_cell = 32 32 +amr.max_grid_size_x = 256 256 +amr.max_grid_size_y = 256 256 +amr.max_grid_size_x = 64 64 +amr.max_grid_size_y = 64 64 + +amr.max_level = 1 +amr.use_particles = 1 # Turn particles on or off +amr.ref_ratio = 1 + +#amr.max_level = 0 +#amr.use_particles = 0 # Turn particles on or off + +# ***************************************************************** +# Control of grid creation +# ***************************************************************** +# Blocking factor for grid creation in each dimension -- +# this ensures that every grid is coarsenable by a factor of 8 -- +# this is mostly relevant for multigrid performance +amr.blocking_factor_x = 16 +amr.blocking_factor_y = 16 +amr.blocking_factor_z = 8 + +amr.regrid_int = 2 # how often to regrid + +# ***************************************************************** +# Time step control +# ***************************************************************** +adv.cfl = 0.01 # CFL constraint +adv.cfl = 0.25 # CFL constraint + +# ***************************************************************** +# Should we reflux at coarse-fine boundaries? +# ***************************************************************** +adv.do_reflux = 1 + +# ***************************************************************** +# Tagging - if phi < phierr at level 0, then refine +# ***************************************************************** +adv.phierr = 100000 # this for 64x64 (5 * 64 * 64 = 20480) +adv.phierr = 160000 # this for 128x128 (5 * 128 * 128 = 81920) +adv.phierr = 400000 # this for 256x256 (5 * 256 * 256 = 327680) +adv.phierr = 10000 # this for 32x32 (5 * 32 * 32 = 5120) + +# ***************************************************************** +# Plotfile name and frequency +# ***************************************************************** +amr.plot_file = plt # root name of plot file +amr.plot_int = 1 # number of timesteps between plot files + # if negative then no plot files will be written + +# ***************************************************************** +# Checkpoint name and frequency +# ***************************************************************** +amr.chk_file = chk # root name of checkpoint file +amr.chk_int = -1 # number of timesteps between checkpoint files + # if negative then no checkpoint files will be written diff --git a/exec/dean_kow/singlelevel/GNUmakefile b/exec/dean_kow/singlelevel/GNUmakefile new file mode 100644 index 000000000..45b21eef5 --- /dev/null +++ b/exec/dean_kow/singlelevel/GNUmakefile @@ -0,0 +1,36 @@ +# AMREX_HOME defines the directory in which we will find all the AMReX code. +AMREX_HOME ?= ../../../amrex +USE_MPI = TRUE +USE_OMP = FALSE +COMP = gcc +DIM = 2 +MAX_SPEC = 8 + +USE_CUDA = FALSE +USE_HIP = FALSE +USE_SYCL = FALSE + +DEBUG = FALSE + +USE_PARTICLES = TRUE + +include $(AMREX_HOME)/Tools/GNUMake/Make.defs + +include ../../../src_deankow/singlelevel/Make.package +VPATH_LOCATIONS += ../../../src_deankow/singlelevel +INCLUDE_LOCATIONS += ../../../src_deankow/singlelevel + +include ../../../src_rng/Make.package +VPATH_LOCATIONS += ../../../src_rng/ +INCLUDE_LOCATIONS += ../../../src_rng/ + +include ../../../src_common/Make.package +VPATH_LOCATIONS += ../../../src_common/ +INCLUDE_LOCATIONS += ../../../src_common/ + +include $(AMREX_HOME)/Src/Base/Make.package + +include $(AMREX_HOME)/Tools/GNUMake/Make.rules + +MAXSPECIES := $(strip $(MAX_SPEC)) +DEFINES += -DMAX_SPECIES=$(MAXSPECIES) diff --git a/exec/dean_kow/inputs b/exec/dean_kow/singlelevel/inputs similarity index 76% rename from exec/dean_kow/inputs rename to exec/dean_kow/singlelevel/inputs index bb82fda1b..70b2d1fa1 100644 --- a/exec/dean_kow/inputs +++ b/exec/dean_kow/singlelevel/inputs @@ -1,9 +1,10 @@ -nsteps = 10 -plot_int = 1 +nsteps = 10000 +plot_int = 10 n_cell = 256 max_grid_size = 256 npts_scale = 1000000. -cfl = .01 +cfl = .25 +alg_type = 1 # 0 = periodic # 2 = homogeneous Neumann (zero flux) diff --git a/exec/hydro/Checkpoint.cpp b/exec/hydro/Checkpoint.cpp index 7e49c0047..6ac3cf5ba 100644 --- a/exec/hydro/Checkpoint.cpp +++ b/exec/hydro/Checkpoint.cpp @@ -82,13 +82,14 @@ void WriteCheckPoint(int step, } } + int comm_rank = 0; + int n_ranks = 1; +#if AMREX_USE_MPI // C++ random number engine // have each MPI process write its random number state to a different file - int comm_rank; MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank); - - int n_ranks; MPI_Comm_size(MPI_COMM_WORLD, &n_ranks); +#endif // don't write out all the rng states at once (overload filesystem) // one at a time write out the rng states to different files, one for each MPI rank @@ -195,13 +196,14 @@ void ReadCheckPoint(int& step, #endif } + int comm_rank = 0; + int n_ranks = 1; +#if AMREX_USE_MPI // C++ random number engine // each MPI process reads in its own file - int comm_rank; MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank); - - int n_ranks; MPI_Comm_size(MPI_COMM_WORLD, &n_ranks); +#endif if (seed < 0) { diff --git a/exec/hydro/inputs_incomp_equil_fluct_2d b/exec/hydro/inputs_incomp_equil_fluct_2d index b1e28e6ff..7305aee92 100644 --- a/exec/hydro/inputs_incomp_equil_fluct_2d +++ b/exec/hydro/inputs_incomp_equil_fluct_2d @@ -49,8 +49,6 @@ bc_vel_lo = -1 -1 bc_vel_hi = -1 -1 -algorithm_type = 2 - mg_verbose = 0 # multigrid verbosity diff --git a/exec/immersed_boundary/channel_dumbbell/main_driver.cpp b/exec/immersed_boundary/channel_dumbbell/main_driver.cpp index 2052e1a99..e1bd8de3a 100644 --- a/exec/immersed_boundary/channel_dumbbell/main_driver.cpp +++ b/exec/immersed_boundary/channel_dumbbell/main_driver.cpp @@ -343,13 +343,13 @@ void main_driver(const char * argv) { init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[d][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & d, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()), + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()), &prob_type); // initialize tracer init_s_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(tracer[mfi]), - dx, ZFILL(realDomain.lo()), ZFILL(realDomain.hi())); + dx, AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi())); } diff --git a/exec/immersed_boundary/channel_multiblob/main_driver.cpp b/exec/immersed_boundary/channel_multiblob/main_driver.cpp index 24f2998f5..377f8be19 100644 --- a/exec/immersed_boundary/channel_multiblob/main_driver.cpp +++ b/exec/immersed_boundary/channel_multiblob/main_driver.cpp @@ -411,20 +411,20 @@ void main_driver(const char * argv) { AMREX_D_TERM(dm=0; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[0][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi() ,&dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));, + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));, dm=1; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[1][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi() ,&dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));, + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));, dm=2; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[2][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi() ,&dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));); + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));); // initialize tracer init_s_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(tracer[mfi]), - dx, ZFILL(realDomain.lo()), ZFILL(realDomain.hi())); + dx, AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi())); } diff --git a/exec/immersed_boundary/channel_rigid/main_driver.cpp b/exec/immersed_boundary/channel_rigid/main_driver.cpp index b6a393c50..298676c3d 100644 --- a/exec/immersed_boundary/channel_rigid/main_driver.cpp +++ b/exec/immersed_boundary/channel_rigid/main_driver.cpp @@ -324,20 +324,20 @@ void main_driver(const char * argv) { AMREX_D_TERM(dm=0; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[0][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));, + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));, dm=1; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[1][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));, + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));, dm=2; init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[2][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & dm, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()));); + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()));); // initialize tracer init_s_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(tracer[mfi]), - dx, ZFILL(realDomain.lo()), ZFILL(realDomain.hi())); + dx, AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi())); } @@ -454,7 +454,7 @@ void main_driver(const char * argv) { // const Box& bx = mfi.validbox(); // init_s_vel(BL_TO_FORTRAN_BOX(bx), // BL_TO_FORTRAN_ANYD(mf_cc[0][mfi]), - // dx, ZFILL(realDomain.lo()), ZFILL(realDomain.hi())); + // dx, AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi())); // } // } diff --git a/exec/immersed_boundary/flagellum/main_driver.cpp b/exec/immersed_boundary/flagellum/main_driver.cpp index b1df8c383..970f1c644 100644 --- a/exec/immersed_boundary/flagellum/main_driver.cpp +++ b/exec/immersed_boundary/flagellum/main_driver.cpp @@ -544,7 +544,7 @@ void main_driver(const char * argv) { init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[d][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & d, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()), + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()), &prob_type); } diff --git a/exec/immersed_boundary/taylor_line/main_driver.cpp b/exec/immersed_boundary/taylor_line/main_driver.cpp index b1b21272c..a72a4db82 100644 --- a/exec/immersed_boundary/taylor_line/main_driver.cpp +++ b/exec/immersed_boundary/taylor_line/main_driver.cpp @@ -538,7 +538,7 @@ void main_driver(const char * argv) { init_vel(BL_TO_FORTRAN_BOX(bx), BL_TO_FORTRAN_ANYD(umac[d][mfi]), geom.CellSize(), geom.ProbLo(), geom.ProbHi(), & d, - ZFILL(realDomain.lo()), ZFILL(realDomain.hi()), + AMREX_ZFILL(realDomain.lo()), AMREX_ZFILL(realDomain.hi()), &prob_type); } diff --git a/exec/multispec/Init.cpp b/exec/multispec/Init.cpp index 51d191e1f..b91870e6e 100644 --- a/exec/multispec/Init.cpp +++ b/exec/multispec/Init.cpp @@ -430,6 +430,70 @@ void InitRhoUmac(std::array< MultiFab, AMREX_SPACEDIM >& umac, }); + } else if (prob_type == 20) { + + /* + cylinder with radius = 1/4 of domain in x + c=c_init_1(:) inside, c=c_init_2(:) outside + can be discontinous or smooth depending on smoothing_width + */ + //Real rad = L[0] / 8.; + int nsub = 100; + Real factor = nsub; + Real dxsub = dx[0]/factor; + Real dysub = dx[1]/factor; + Real x,y,z; + amrex::Print() << "smoothing width " << smoothing_width << " film_thickness " << film_thickness << std::endl; + + amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + for (int n=0; n +#if AMREX_USE_MPI #include #endif +#endif #include diff --git a/src_common/BCPhysToMath.cpp b/src_common/BCPhysToMath.cpp index dceb30e69..8b31ef488 100644 --- a/src_common/BCPhysToMath.cpp +++ b/src_common/BCPhysToMath.cpp @@ -10,24 +10,24 @@ void BCPhysToMath(int bccomp, amrex::Vector& bc_lo, amrex::Vector& bc_ // set to interior/periodic by default; overwrite below for (int i=0; i first-order extrapolation - bc_lo[i] = FOEXTRAP; + bc_lo[i] = amrex::BCType::foextrap; } else if (bc_vel_lo[i] == -2) { // pressure inflow - bc_lo[i] = EXT_DIR; + bc_lo[i] = amrex::BCType::ext_dir; } if (bc_vel_hi[i] == 1 || bc_vel_hi[i] == 2) { // wall -> first-order extrapolation - bc_hi[i] = FOEXTRAP; + bc_hi[i] = amrex::BCType::foextrap; } else if (bc_vel_hi[i] == -2) { // pressure inflow - bc_hi[i] = EXT_DIR; + bc_hi[i] = amrex::BCType::ext_dir; } } } @@ -37,19 +37,19 @@ void BCPhysToMath(int bccomp, amrex::Vector& bc_lo, amrex::Vector& bc_ for (int i=0; i first-order extrapolation - bc_lo[i] = FOEXTRAP; + bc_lo[i] = amrex::BCType::foextrap; } else if (bc_mass_lo[i] == 2) { // reservoir -> dirichlet - bc_lo[i] = EXT_DIR; + bc_lo[i] = amrex::BCType::ext_dir; } if (bc_mass_hi[i] == 1) { // wall -> first-order extrapolation - bc_hi[i] = FOEXTRAP; + bc_hi[i] = amrex::BCType::foextrap; } else if (bc_mass_hi[i] == 2) { // reservoir -> dirichlet - bc_hi[i] = EXT_DIR; + bc_hi[i] = amrex::BCType::ext_dir; } } } @@ -57,19 +57,19 @@ void BCPhysToMath(int bccomp, amrex::Vector& bc_lo, amrex::Vector& bc_ for (int i=0; i first-order extrapolation - bc_lo[i] = FOEXTRAP; + bc_lo[i] = amrex::BCType::foextrap; } else if (bc_therm_lo[i] == 2) { // isothermal -> dirichlet - bc_lo[i] = EXT_DIR; + bc_lo[i] = amrex::BCType::ext_dir; } if (bc_therm_hi[i] == 1) { // adiabatic -> first-order extrapolation - bc_hi[i] = FOEXTRAP; + bc_hi[i] = amrex::BCType::foextrap; } else if (bc_therm_hi[i] == 2) { // isothermal -> dirichlet - bc_hi[i] = EXT_DIR; + bc_hi[i] = amrex::BCType::ext_dir; } } } @@ -77,19 +77,19 @@ void BCPhysToMath(int bccomp, amrex::Vector& bc_lo, amrex::Vector& bc_ for (int i=0; i & // boundary conditions // note: at physical boundaries, // alter stencil at boundary since ghost value represents value at boundary - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx_x.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx_x, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -106,7 +106,7 @@ void ComputeGrad(const MultiFab & phi_in, std::array & } } - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx_x.bigEnd(0) >= dom.bigEnd(0)+1) { int hi = dom.bigEnd(0)+1; amrex::ParallelFor(bx_x, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -120,7 +120,7 @@ void ComputeGrad(const MultiFab & phi_in, std::array & } } - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx_y.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx_y, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -134,7 +134,7 @@ void ComputeGrad(const MultiFab & phi_in, std::array & } } - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx_y.bigEnd(1) >= dom.bigEnd(1)+1) { int hi = dom.bigEnd(1)+1; amrex::ParallelFor(bx_y, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -149,7 +149,7 @@ void ComputeGrad(const MultiFab & phi_in, std::array & } #if (AMREX_SPACEDIM == 3) - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx_z.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx_z, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -163,7 +163,7 @@ void ComputeGrad(const MultiFab & phi_in, std::array & } } - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx_z.bigEnd(2) >= dom.bigEnd(2)+1) { int hi = dom.bigEnd(2)+1; amrex::ParallelFor(bx_z, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept diff --git a/src_common/ConvertStag.cpp b/src_common/ConvertStag.cpp index 05267c413..c544e738d 100644 --- a/src_common/ConvertStag.cpp +++ b/src_common/ConvertStag.cpp @@ -81,7 +81,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array // note: at physical boundaries, // the value in the ghost cells represent the value ON the boundary // so we simply copy the ghost cell value into the value on the domain (and ghost faces too) - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx_x.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx_x, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -93,7 +93,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array } } - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx_x.bigEnd(0) >= dom.bigEnd(0)+1) { int hi = dom.bigEnd(0)+1; amrex::ParallelFor(bx_x, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -105,7 +105,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array } } - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx_y.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx_y, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -117,7 +117,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array } } - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx_y.bigEnd(1) >= dom.bigEnd(1)+1) { int hi = dom.bigEnd(1)+1; amrex::ParallelFor(bx_y, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -130,7 +130,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array } #if (AMREX_SPACEDIM == 3) - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx_z.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx_z, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -142,7 +142,7 @@ void AverageCCToFace(const MultiFab& cc_in, std::array } } - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx_z.bigEnd(2) >= dom.bigEnd(2)+1) { int hi = dom.bigEnd(2)+1; amrex::ParallelFor(bx_z, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -298,7 +298,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc // boundary conditions // note: at physical boundaries, // the value in ghost cells represents the value ON the boundary - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -314,7 +314,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc } } - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx.bigEnd(0) >= dom.bigEnd(0)+1) { int hi = dom.bigEnd(0); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -330,7 +330,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc } } - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -346,7 +346,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc } } - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx.bigEnd(1) >= dom.bigEnd(1)+1) { int hi = dom.bigEnd(1); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -364,7 +364,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc #if (AMREX_SPACEDIM == 3) - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -376,7 +376,7 @@ void AverageCCToNode(const MultiFab& cc_in, MultiFab& node_in, int scomp, int nc } } - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx.bigEnd(2) >= dom.bigEnd(2)+1) { int hi = dom.bigEnd(2); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -455,7 +455,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge // the value in ghost cells represents the value ON the boundary // lo-x - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx_xy.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx_xy, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -477,7 +477,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge } // hi-x - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx_xy.bigEnd(0) >= dom.smallEnd(0)+1) { int hi = dom.bigEnd(0); amrex::ParallelFor(bx_xy, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -499,7 +499,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge } // lo-y - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx_xy.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx_xy, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -521,7 +521,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge } // hi-y - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx_xy.bigEnd(1) <= dom.bigEnd(1)) { int hi = dom.bigEnd(1); amrex::ParallelFor(bx_xy, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -543,7 +543,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge } // lo-z - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx_xz.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx_xz, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -565,7 +565,7 @@ void AverageCCToEdge(const MultiFab& cc_in, std::array& edge } // hi-z - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx_xz.bigEnd(2) <= dom.bigEnd(2)) { int hi = dom.bigEnd(2); amrex::ParallelFor(bx_xz, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept diff --git a/src_common/MultiFabPhysBC.cpp b/src_common/MultiFabPhysBC.cpp index e389c18c6..529efaf59 100644 --- a/src_common/MultiFabPhysBC.cpp +++ b/src_common/MultiFabPhysBC.cpp @@ -3,8 +3,8 @@ // Ghost cell filling routine. // Fills in ALL ghost cells to the value ON the boundary. -// FOEXTRAP uses boundary conditions (Neumann) and 1 interior points. -// EXT_DIR copies the supplied Dirichlet condition into the ghost cells. +// amrex::BCType::foextrap uses boundary conditions (Neumann) and 1 interior points. +// amrex::BCType::ext_dir copies the supplied Dirichlet condition into the ghost cells. void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, int bccomp, const Real& time) { BL_PROFILE_VAR("MultiFabPhysBC()",MultiFabPhysBC); @@ -52,7 +52,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.smallEnd(0) < lo) { Real x = prob_lo[0]; - if (bc_lo[0] == FOEXTRAP) { + if (bc_lo[0] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (i < lo) { @@ -62,7 +62,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_lo[0] == EXT_DIR) { + else if (bc_lo[0] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (i < lo) { @@ -76,7 +76,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.bigEnd(0) > hi) { Real x = prob_hi[0]; - if (bc_hi[0] == FOEXTRAP) { + if (bc_hi[0] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (i > hi) { @@ -86,7 +86,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_hi[0] == EXT_DIR) { + else if (bc_hi[0] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (i > hi) { @@ -107,7 +107,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.smallEnd(1) < lo) { Real y = prob_lo[1]; - if (bc_lo[1] == FOEXTRAP) { + if (bc_lo[1] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (j < lo) { @@ -117,7 +117,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_lo[1] == EXT_DIR) { + else if (bc_lo[1] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (j < lo) { @@ -131,7 +131,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.bigEnd(1) > hi) { Real y = prob_hi[1]; - if (bc_hi[1] == FOEXTRAP) { + if (bc_hi[1] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (j > hi) { @@ -141,7 +141,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_hi[1] == EXT_DIR) { + else if (bc_hi[1] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (j > hi) { @@ -163,7 +163,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.smallEnd(2) < lo) { Real z = prob_lo[2]; - if (bc_lo[2] == FOEXTRAP) { + if (bc_lo[2] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (k < lo) { @@ -173,7 +173,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_lo[2] == EXT_DIR) { + else if (bc_lo[2] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (k < lo) { @@ -187,7 +187,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i if (bx.bigEnd(2) > hi) { Real z= prob_hi[2]; - if (bc_hi[2] == FOEXTRAP) { + if (bc_hi[2] == amrex::BCType::foextrap) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (k > hi) { @@ -197,7 +197,7 @@ void MultiFabPhysBC(MultiFab& phi, const Geometry& geom, int scomp, int ncomp, i } }); } - else if (bc_hi[2] == EXT_DIR) { + else if (bc_hi[2] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { if (k > hi) { diff --git a/src_compressible/compressible_functions.cpp b/src_compressible/compressible_functions.cpp index e2cf91423..8ab3611a7 100644 --- a/src_compressible/compressible_functions.cpp +++ b/src_compressible/compressible_functions.cpp @@ -9,6 +9,7 @@ AMREX_GPU_MANAGED int compressible::do_2D; AMREX_GPU_MANAGED int compressible::all_correl; AMREX_GPU_MANAGED int compressible::nspec_surfcov = 0; AMREX_GPU_MANAGED bool compressible::do_reservoir = false; +AMREX_GPU_MANAGED amrex::Real compressible::zeta_ratio = -1.0; void InitializeCompressibleNamespace() { @@ -70,6 +71,11 @@ void InitializeCompressibleNamespace() do_reservoir = true; } + // bulk viscosity ratio + pp.query("zeta_ratio",zeta_ratio); + if ((amrex::Math::abs(visc_type) == 3) and (zeta_ratio < 0.0)) amrex::Abort("need non-negative zeta_ratio (ratio of bulk to shear viscosity) for visc_type = 3 (use bulk viscosity)"); + if ((amrex::Math::abs(visc_type) == 3) and (zeta_ratio >= 0.0)) amrex::Print() << "bulk viscosity model selected; bulk viscosity ratio is: " << zeta_ratio << "\n"; + return; } diff --git a/src_compressible/compressible_namespace.H b/src_compressible/compressible_namespace.H index 55d85dc01..e75c9935c 100644 --- a/src_compressible/compressible_namespace.H +++ b/src_compressible/compressible_namespace.H @@ -8,6 +8,7 @@ namespace compressible { extern AMREX_GPU_MANAGED int all_correl; extern AMREX_GPU_MANAGED int nspec_surfcov; extern AMREX_GPU_MANAGED bool do_reservoir; + extern AMREX_GPU_MANAGED amrex::Real zeta_ratio; } diff --git a/src_compressible/transCoeffs.cpp b/src_compressible/transCoeffs.cpp index 385eb5969..5748d9ecc 100644 --- a/src_compressible/transCoeffs.cpp +++ b/src_compressible/transCoeffs.cpp @@ -75,6 +75,12 @@ void calculateTransportCoeffs(const MultiFab& prim_in, } } + // set bulk viscosity + if (amrex::Math::abs(visc_type) == 3) { + zeta(i,j,k) = zeta_ratio * eta(i,j,k); + } + + }); } } diff --git a/src_compressible_stag/compressible_functions_stag.H b/src_compressible_stag/compressible_functions_stag.H index 66f068f23..1ebaeca26 100644 --- a/src_compressible_stag/compressible_functions_stag.H +++ b/src_compressible_stag/compressible_functions_stag.H @@ -32,7 +32,8 @@ void WritePlotFileStag(int step, const MultiFab& surfcovMeans, const MultiFab& surfcovVars, const MultiFab& eta, - const MultiFab& kappa); + const MultiFab& kappa, + const MultiFab& zeta); void WriteSpatialCross3D(const Vector& spatialCross, int step, const Geometry& geom, const int ncross); diff --git a/src_compressible_stag/main_driver.cpp b/src_compressible_stag/main_driver.cpp index af1bcb182..1a1a0904a 100644 --- a/src_compressible_stag/main_driver.cpp +++ b/src_compressible_stag/main_driver.cpp @@ -717,7 +717,7 @@ void main_driver(const char* argv) if (plot_int > 0) { WritePlotFileStag(0, 0.0, geom, cu, cuMeans, cuVars, cumom, cumomMeans, cumomVars, - prim, primMeans, primVars, vel, velMeans, velVars, coVars, surfcov, surfcovMeans, surfcovVars, eta, kappa); + prim, primMeans, primVars, vel, velMeans, velVars, coVars, surfcov, surfcovMeans, surfcovVars, eta, kappa, zeta); #if defined(TURB) if (turbForcing > 0) { EvaluateWritePlotFileVelGrad(0, 0.0, geom, vel, vel_decomp); @@ -1145,7 +1145,7 @@ void main_driver(const char* argv) //yzAverage(cuMeans, cuVars, primMeans, primVars, spatialCross, // cuMeansAv, cuVarsAv, primMeansAv, primVarsAv, spatialCrossAv); WritePlotFileStag(step, time, geom, cu, cuMeans, cuVars, cumom, cumomMeans, cumomVars, - prim, primMeans, primVars, vel, velMeans, velVars, coVars, surfcov, surfcovMeans, surfcovVars, eta, kappa); + prim, primMeans, primVars, vel, velMeans, velVars, coVars, surfcov, surfcovMeans, surfcovVars, eta, kappa, zeta); #if defined(TURB) if (turbForcing > 0) { diff --git a/src_compressible_stag/writePlotFileStag.cpp b/src_compressible_stag/writePlotFileStag.cpp index 9735d157a..2be58ab8a 100644 --- a/src_compressible_stag/writePlotFileStag.cpp +++ b/src_compressible_stag/writePlotFileStag.cpp @@ -24,7 +24,8 @@ void WritePlotFileStag(int step, const amrex::MultiFab& surfcovMeans, const amrex::MultiFab& surfcovVars, const amrex::MultiFab& eta, - const amrex::MultiFab& kappa) + const amrex::MultiFab& kappa, + const amrex::MultiFab& zeta) { BL_PROFILE_VAR("writePlotFileStag()",writePlotFileStag); @@ -38,11 +39,13 @@ void WritePlotFileStag(int step, // prim: [vx, vy, vz, T, p, Yk, Xk] -- 5 + 2*nspecies // shifted [vx, vy, vz] -- 3 // eta, kappa -- 2 + // zeta -- 1 (only when visc_type = 3) nplot += nvars; nplot += 3; nplot += 5 + 2*nspecies; nplot += 3; nplot += 2; + if (amrex::Math::abs(visc_type) == 3) nplot += 1; if (nspec_surfcov>0) nplot += nspec_surfcov; @@ -132,6 +135,14 @@ void WritePlotFileStag(int step, amrex::MultiFab::Copy(plotfile,kappa,0,cnt,numvars,0); cnt+=numvars; + // instantaneous + // zeta -- 1 + if (amrex::Math::abs(visc_type) == 3) { + numvars = 1; + amrex::MultiFab::Copy(plotfile,zeta,0,cnt,numvars,0); + cnt+=numvars; + } + // instantaneous // surfcov -- nspec_surfcov if (nspec_surfcov>0) { @@ -277,6 +288,7 @@ void WritePlotFileStag(int step, varNames[cnt++] = "eta"; varNames[cnt++] = "kappa"; + if (amrex::Math::abs(visc_type) == 3) varNames[cnt++] = "zeta"; if (nspec_surfcov>0) { x = "surfcov_"; diff --git a/src_deankow/Make.package b/src_deankow/1d_stats/Make.package similarity index 100% rename from src_deankow/Make.package rename to src_deankow/1d_stats/Make.package diff --git a/src_deankow/1d_stats/main.cpp b/src_deankow/1d_stats/main.cpp new file mode 100644 index 000000000..9ae1c9bad --- /dev/null +++ b/src_deankow/1d_stats/main.cpp @@ -0,0 +1,464 @@ +#include +#include +#include +#include +#include +#include +// #include + +#include "common_functions.H" +#include "rng_functions.H" + +#include "myfunc.H" +#include "chrono" + +using namespace amrex; +using namespace std::chrono; + +int main (int argc, char* argv[]) +{ + amrex::Initialize(argc,argv); + + main_main(); + + amrex::Finalize(); + return 0; +} + +void main_main () +{ + // What time is it now? We'll use this to compute total run time. + auto strt_time = ParallelDescriptor::second(); + + // AMREX_SPACEDIM: number of dimensions + int n_cell, max_grid_size, nsteps, plot_int; + int nskip, nstat, ncopies, icor, istat; + int alg_type, avg_type; + int seed; + amrex::Real phileft, phiright; + amrex::Real npts_scale; + amrex::Real cfl; + Vector bc_lo(AMREX_SPACEDIM,0); + Vector bc_hi(AMREX_SPACEDIM,0); + + // inputs parameters + { + // ParmParse is way of reading inputs from the inputs file + ParmParse pp; + + // We need to get n_cell from the inputs file - this is the number of cells on each side of + // a square (or cubic) domain. + pp.get("n_cell",n_cell); + + // The domain is broken into boxes of size max_grid_size + pp.get("max_grid_size",max_grid_size); + + // Default plot_int to -1, allow us to set it to something else in the inputs file + // If plot_int < 0 then no plot files will be written + plot_int = -1; + pp.query("plot_int",plot_int); + + // Default nsteps to 0, allow us to set it to something else in the inputs file + nsteps = 10; + pp.query("nsteps",nsteps); + + nskip = 0; + pp.query("nskip",nskip); + + nstat = -1; + pp.query("nstat",nstat); + + icor = n_cell/2;; + pp.query("icor",icor); + + ncopies = 1; + pp.query("ncopies",ncopies); + + istat = 0; + + npts_scale = 1.; + pp.query ("npts_scale",npts_scale); + + alg_type = 0; + pp.query ("alg_type",alg_type); + + avg_type = 0; + pp.query ("avg_type",avg_type); + + phileft = 32; + pp.query ("phileft",phileft); + + phiright = 32; + pp.query ("phiright",phiright); + + seed = 0; + pp.query("seed",seed); + + cfl=.9; + pp.query ("cfl",cfl); + + + // By default, the boundary conditions will be set to periodic, or bc_lo = bc_hi = 0. + //Other options in this program include bc_lo, bc_hi = 2 for homogeneous Neumann, or + //bc_lo, bc_hi = 3 for external Dirichlet boundary conditions. + pp.queryarr("bc_lo", bc_lo); + pp.queryarr("bc_hi", bc_hi); + } + + amrex::Real copies = ncopies; + + Vector is_periodic(AMREX_SPACEDIM,0); + for (int idim=0; idim < AMREX_SPACEDIM; ++idim) { + if (bc_lo[idim] == BCType::int_dir && bc_hi[idim] == BCType::int_dir){ + is_periodic[idim] = 1; + } + } + + // make BoxArray and Geometry + BoxArray ba; + Geometry geom; + MultiFab stats_onegrid; + { + IntVect dom_lo(AMREX_D_DECL( 0, 0, 0)); + IntVect dom_hi(AMREX_D_DECL(n_cell-1, ncopies-1, n_cell-1)); + Box domain(dom_lo, dom_hi); + + // Initialize the boxarray "ba" from the single box "bx" + ba.define(domain); + // Break up boxarray "ba" into chunks no larger than "max_grid_size" along a direction + ba.maxSize(max_grid_size); + + + // This defines the physical box, [0,1] in each direction. + RealBox real_box({AMREX_D_DECL( 0.0, 0.0, 0.0)}, + {AMREX_D_DECL( 1.0, copies, 1.0)}); + + // This defines a Geometry object + geom.define(domain,&real_box,CoordSys::cartesian,is_periodic.data()); + + BoxArray ba_onegrid; + Vector pmap(1); + const int ioproc = ParallelDescriptor::IOProcessorNumber(); + if(ioproc != 0){ + amrex::Print() << "IO processor is " << ioproc << std::endl; + } + pmap[0] = ioproc; + ba_onegrid.define(domain); + //DistributionMapping dmap_onegrid(ba_onegrid,pmap); + DistributionMapping dmap_onegrid(pmap); + stats_onegrid.define(ba_onegrid,dmap_onegrid,4,0); + + } + + // Nghost = number of ghost cells for each array + int Nghost = 1; + + // Ncomp = number of components for each array + int Ncomp; + if(alg_type == 0){ + Ncomp = 1; + } else { + Ncomp = 2; + } + + // How Boxes are distrubuted among MPI processes + DistributionMapping dm(ba); + + + ///////////////////////////////////////// + //Initialise rngs + ///////////////////////////////////////// + + int restart = -1; + if (restart < 0) { + + if (seed > 0) { + // initializes the seed for C++ random number calls + InitRandom(seed+ParallelDescriptor::MyProc(), + ParallelDescriptor::NProcs(), + seed+ParallelDescriptor::MyProc()); + } else if (seed == 0) { + // initializes the seed for C++ random number calls based on the clock + auto now = time_point_cast(system_clock::now()); + int randSeed = now.time_since_epoch().count(); + // broadcast the same root seed to all processors + ParallelDescriptor::Bcast(&randSeed,1,ParallelDescriptor::IOProcessorNumber()); + InitRandom(randSeed+ParallelDescriptor::MyProc(), + ParallelDescriptor::NProcs(), + randSeed+ParallelDescriptor::MyProc()); + } else { + Abort("Must supply non-negative seed"); + } + + } + + + // we allocate two phi multifabs; one will store the old state, the other the new. + MultiFab phi_old(ba, dm, Ncomp, Nghost); + MultiFab phi_new(ba, dm, Ncomp, Nghost); + + MultiFab stats(ba, dm, 4, 0); + stats.setVal(0.); + + + GpuArray dx = geom.CellSizeArray(); + + init_phi(phi_new, geom,npts_scale, Ncomp, phileft, phiright); + + //Boundary conditions are assigned to phi_old such that the ghost cells at the boundary will + //be filled to satisfy those conditions. + Vector bc(phi_old.nComp()); + for (int n = 0; n < phi_old.nComp(); ++n) + { + for(int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + //Internal Dirichlet Periodic Boundary conditions, or bc_lo = bc_hi = 0 + if (bc_lo[idim] == BCType::int_dir) { + bc[n].setLo(idim, BCType::int_dir); + } + //First Order Extrapolation for Neumann boundary conditions or bc_lo, bc_hi = 2 + else if (bc_lo[idim] == BCType::foextrap) { + bc[n].setLo(idim, BCType::foextrap); + } + //External Dirichlet Boundary Condition, or bc_lo, bc_hi = 3 + else if(bc_lo[idim] == BCType::ext_dir) { + bc[n].setLo(idim, BCType::ext_dir); + } + else { + amrex::Abort("Invalid bc_lo"); + } + + //Internal Dirichlet Periodic Boundary conditions, or bc_lo = bc_hi = 0 + if (bc_hi[idim] == BCType::int_dir) { + bc[n].setHi(idim, BCType::int_dir); + } + //First Order Extrapolation for Neumann boundary conditions or bc_lo, bc_hi = 2 + else if (bc_hi[idim] == BCType::foextrap) { + bc[n].setHi(idim, BCType::foextrap); + } + //External Dirichlet Boundary Condition, or bc_lo, bc_hi = 3 + else if(bc_hi[idim] == BCType::ext_dir) { + bc[n].setHi(idim, BCType::ext_dir); + } + else { + amrex::Abort("Invalid bc_hi"); + } + } + } + +// Real coeff = AMREX_D_TERM( 2./(dx[0]*dx[0]), +// + 2./(dx[1]*dx[1]), +// + 2./(dx[2]*dx[2]) ); + Real coeff = 2./(dx[0]*dx[0]); + Real dt = cfl/(2.0*coeff); + + amrex::Print() << "dt = " << dt << " dx = " << dx[0] << std::endl; + + // time = starting time in the simulation + Real time = 0.0; + + // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file) + if (plot_int > 0) + { + int n = 0; + const std::string& pltfile = amrex::Concatenate("plt",n,7); + WriteSingleLevelPlotfile(pltfile, phi_new, {"phi"}, geom, time, 0); + } + + // build the flux multifabs + Array flux; + std::array< MultiFab, AMREX_SPACEDIM > stochFlux; + for (int dir = 0; dir < AMREX_SPACEDIM; dir++) + { + // flux(dir) has one component, zero ghost cells, and is nodal in direction dir + BoxArray edge_ba = ba; + edge_ba.surroundingNodes(dir); + flux[dir].define(edge_ba, dm, Ncomp, 0); + stochFlux[dir].define(edge_ba, dm, Ncomp, 0); + } + + flux[1].setVal(0.); + AMREX_D_TERM(stochFlux[0].setVal(0.0);, + stochFlux[1].setVal(0.0);, + stochFlux[2].setVal(0.0);); + + nsteps += nskip; + + for (int n = 1; n <= nsteps; ++n) + { + MultiFab::Copy(phi_old, phi_new, 0, 0, Ncomp, 0); + + // new_phi = old_phi + dt * (something) + advance(phi_old, phi_new, flux, stochFlux, dt, npts_scale, geom, bc, Ncomp, phileft, phiright, avg_type); + time = time + dt; + + // Tell the I/O Processor to write out which step we're doing + amrex::Print() << "Advanced step " << n << "\n"; + + // Write a plotfile of the current data (plot_int was defined in the inputs file) + if (nstat > 0 && n > nskip && n%nstat == 0){ + + istat += 1; + + for ( MFIter mfi(stats); mfi.isValid(); ++mfi ) + { + const Box& vbx = mfi.validbox(); + const auto lo = amrex::lbound(vbx); + const auto hi = amrex::ubound(vbx); + + auto const& phiNew = phi_new.array(mfi); + auto const& stat_arr = stats.array(mfi); + + for (auto k = lo.z; k <= hi.z; ++k) { + for (auto j = lo.y; j <= hi.y; ++j) { + for (auto i = lo.x; i <= hi.x; ++i) { + stat_arr(i,j,k,0) += phiNew(i,j,k); + stat_arr(i,j,k,1) += phiNew(i,j,k)*phiNew(i,j,k); + stat_arr(i,j,k,2) += phiNew(i,j,k)*phiNew(icor,j,k); + stat_arr(i,j,k,3) += phiNew(icor,j,k); + } + } + } + + + } + + } + if (plot_int > 0 && n%plot_int == 0) + { + const std::string& pltfile = amrex::Concatenate("plt",n,7); + WriteSingleLevelPlotfile(pltfile, phi_new, {"phi"}, geom, time, n); + + if(nstat > 0 && istat > 0){ + + stats_onegrid.ParallelCopy(stats,0,0,4); + amrex::Real mean[n_cell]; + amrex::Real var[n_cell]; + amrex::Real cor[n_cell]; + amrex::Real mean_avg[n_cell]; + amrex::Real var_avg[n_cell]; + amrex::Real cor_avg[n_cell]; + + amrex::Real num_stats = istat; + + for (MFIter mfi(stats_onegrid); mfi.isValid(); ++mfi ) { + + const Array4& stat_arr = stats_onegrid.array(mfi); + + for (int i=0; i Ephi(2,0.); + Real Ephimin = npts_scale; + for ( MFIter mfi(phi_old); mfi.isValid(); ++mfi ) + { + const Box& vbx = mfi.validbox(); + const auto lo = amrex::lbound(vbx); + const auto hi = amrex::ubound(vbx); + + auto const& phiNew = phi_new.array(mfi); + + for (auto k = lo.z; k <= hi.z; ++k) { + for (auto j = lo.y; j <= hi.y; ++j) { + for (auto i = lo.x; i <= hi.x; ++i) { + Ephi[0] += phiNew(i,j,k); + Ephi[1] += phiNew(i,j,k)*phiNew(i,j,k); + Ephimin = std::min(Ephimin,phiNew(i,j,k)); + } + } + } + + + } + + const int IOProc = ParallelDescriptor::IOProcessorNumber(); + ParallelDescriptor::ReduceRealSum(Ephi.dataPtr(),2); + ParallelDescriptor::ReduceRealMin(Ephimin); + amrex::Real scale = n_cell*n_cell; + amrex::Real scale2 = AMREX_D_TERM( dx[0], + * dx[1], + * dx[2] ); + + amrex::Print() << "phi variance = " << Ephi[1]/scale - (Ephi[0]*Ephi[0] + /(scale*scale)) << std::endl; + amrex::Print() << "phi integral = " << Ephi[0]*scale2 << std::endl; + amrex::Print() << "phi min = " << Ephimin << std::endl; +*/ + + } + + // Call the timer again and compute the maximum difference between the start time and stop time + // over all processors + auto stop_time = ParallelDescriptor::second() - strt_time; + const int IOProc = ParallelDescriptor::IOProcessorNumber(); + ParallelDescriptor::ReduceRealMax(stop_time,IOProc); + + // Tell the I/O Processor to write out the "run time" + amrex::Print() << "Run time = " << stop_time << std::endl; +} diff --git a/src_deankow/1d_stats/myfunc.H b/src_deankow/1d_stats/myfunc.H new file mode 100644 index 000000000..1dfe0780d --- /dev/null +++ b/src_deankow/1d_stats/myfunc.H @@ -0,0 +1,27 @@ +#ifndef MYFUNC_H_ +#define MYFUNC_H_ + +#include +#include +#include + +using namespace amrex; + +void main_main (); + +void advance (amrex::MultiFab& phi_old, + amrex::MultiFab& phi_new, + amrex::Array& flux, + amrex::Array& stcchflux, + amrex::Real dt, + amrex::Real npts_scale, + amrex::Geometry const& geom, + Vector const& BoundaryCondition, + int Ncomp, + amrex::Real phileft, + amrex::Real phiright, + int avg_type); + +void init_phi (amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale, int Ncomp, + amrex::Real phileft, amrex::Real phiright); +#endif diff --git a/src_deankow/1d_stats/myfunc.cpp b/src_deankow/1d_stats/myfunc.cpp new file mode 100644 index 000000000..711207b8d --- /dev/null +++ b/src_deankow/1d_stats/myfunc.cpp @@ -0,0 +1,154 @@ +#include "myfunc.H" +#include "mykernel.H" +#include +#include +#include "common_functions.H" +#include "rng_functions.H" + +void advance (MultiFab& phi_old, + MultiFab& phi_new, + Array& flux, + Array& stochFlux, + Real dt, + Real npts_scale, + Geometry const& geom, + Vector const& BoundaryCondition, + int Ncomp, amrex::Real phileft, amrex::Real phiright, int avg_type) +{ + + // Fill the ghost cells of each grid from the other grids + // includes periodic domain boundaries + phi_old.FillBoundary(geom.periodicity()); + // There are physical boundaries to fill. + FillDomainBoundary(phi_old, geom, BoundaryCondition); + + const BCRec& bc = BoundaryCondition[0]; + + // + // Note that this simple example is not optimized. + // The following two MFIter loops could be merged + // and we do not have to use flux MultiFab. + // + // ======================================================= + + // This example supports both 2D and 3D. Otherwise, + // we would not need to use AMREX_D_TERM. + AMREX_D_TERM(const Real dxinv = geom.InvCellSize(0);, + const Real dyinv = geom.InvCellSize(1);, + const Real dzinv = geom.InvCellSize(2);); + + const Box& domain_bx = geom.Domain(); + const auto dom_lo = lbound(domain_bx); + const auto dom_hi = ubound(domain_bx); + + //Real variance = dxinv*dyinv/(npts_scale*dt); + Real variance = dxinv*dyinv/dt; + +#if(AMREX_SPACEDIM > 2) + variance *=dzinv; +#endif +// amrex::Print() << dxinv << " " << dyinv << std::endl; +// amrex::Print() << npts_scale << std::endl; +// amrex::Print() << " variance " << variance << std::endl; + + // fill random numbers (can skip density component 0) +// for(int d=0;d 2) + const Box& zbx = mfi.nodaltilebox(2); + auto const& fluxz = flux[2].array(mfi); +#endif + + auto const& stochfluxx = stochFlux[0].array(mfi); +// auto const& stochfluxy = stochFlux[1].array(mfi); +#if (AMREX_SPACEDIM > 2) + auto const& stochfluxz = stochFlux[2].array(mfi); +#endif + const Box& bx = mfi.validbox(); + const auto lo = lbound(bx); + const auto hi = ubound(bx); + + auto const& phi = phi_old.array(mfi); + + + + + amrex::ParallelFor(xbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + compute_flux_x(i,j,k,fluxx,stochfluxx,phi,dxinv, + lo.x, hi.x, dom_lo.x, dom_hi.x, bc.lo(0), bc.hi(0),Ncomp, + phileft,phiright,avg_type); + }); + +// amrex::ParallelFor(ybx, +// [=] AMREX_GPU_DEVICE (int i, int j, int k) +// { +// compute_flux_y(i,j,k,fluxy,stochfluxy,phi,dyinv, +// lo.y, hi.y, dom_lo.y, dom_hi.y, bc.lo(1), bc.hi(1),Ncomp); +// }); +#if (AMREX_SPACEDIM > 2) + amrex::ParallelFor(zbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + compute_flux_z(i,j,k,fluxz,stochfluxz,phi,dzinv, + lo.z, hi.z, dom_lo.z, dom_hi.z, bc.lo(2), bc.hi(2),Ncomp); + }); +#endif + } + + // Advance the solution one grid at a time + for ( MFIter mfi(phi_old); mfi.isValid(); ++mfi ) + { + const Box& vbx = mfi.validbox(); + auto const& fluxx = flux[0].array(mfi); + auto const& fluxy = flux[1].array(mfi); +#if (AMREX_SPACEDIM > 2) + auto const& fluxz = flux[2].array(mfi); +#endif + auto const& phiOld = phi_old.array(mfi); + auto const& phiNew = phi_new.array(mfi); + + amrex::ParallelFor(vbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + update_phi(i,j,k,phiOld,phiNew, + AMREX_D_DECL(fluxx,fluxy,fluxz), + dt, + AMREX_D_DECL(dxinv,dyinv,dzinv), + Ncomp); + }); + } +} + +void init_phi(amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale,int Ncomp, + amrex::Real phileft, amrex::Real phiright){ + + GpuArray dx = geom.CellSizeArray(); + GpuArray prob_lo = geom.ProbLoArray(); + // ======================================= + // Initialize phi_new by calling a Fortran routine. + // MFIter = MultiFab Iterator + for (MFIter mfi(phi_new); mfi.isValid(); ++mfi) + { + const Box& vbx = mfi.validbox(); + auto const& phiNew = phi_new.array(mfi); + amrex::ParallelFor(vbx, + [=] AMREX_GPU_DEVICE(int i, int j, int k) + { + init_phi(i,j,k,phiNew,dx,prob_lo, npts_scale,Ncomp,phileft, phiright); + }); + } +} diff --git a/src_deankow/1d_stats/mykernel.H b/src_deankow/1d_stats/mykernel.H new file mode 100644 index 000000000..09b456906 --- /dev/null +++ b/src_deankow/1d_stats/mykernel.H @@ -0,0 +1,262 @@ +#ifndef MY_KERNEL_H_ +#define MY_KERNEL_H_ + +#include + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +amrex::Real reg_sqrt (amrex::Real phi, amrex::Real deltaN) noexcept +{ + amrex::Real phiabs = std::abs(phi); + amrex::Real smsqrt; + if(phiabs <= deltaN/2.) { + smsqrt = phiabs/std::sqrt(deltaN); + } else if (phiabs <= deltaN) { + smsqrt = std::sqrt(deltaN)/2. - 1.5*phiabs/std::sqrt(deltaN) + 4.*phiabs*phiabs*std::pow(deltaN,-1.5)-2.*std::pow(phiabs,3.)*std::pow(deltaN,-2.5); + } else { + smsqrt=std::sqrt(phiabs); + } + return ( (phi > 0.) ? smsqrt : -smsqrt); + //return ( phi/std::sqrt(deltaN)); +} + + + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void init_phi (int i, int j, int k, + amrex::Array4 const& phi, + GpuArray const& dx, + GpuArray const& prob_lo, + amrex::Real npts_scale,int Ncomp, amrex::Real phileft, amrex::Real phiright) +{ + using amrex::Real;; + + amrex::Real slope = phiright-phileft; + + + Real x = prob_lo[0] + (i+Real(0.5)) * dx[0]; + Real y = prob_lo[1] + (j+Real(0.5)) * dx[1]; +#if (AMREX_SPACEDIM > 2) + Real z = prob_lo[2] + (k+Real(0.5)) * dx[2]; + Real r2 = ((x-Real(0.25))*(x-Real(0.25))+(y-Real(0.25))*(y-Real(0.25))+(z-Real(0.25))*(z-Real(0.25)))/Real(0.01); +#else + Real z = Real(0.); + Real r2 = ((x-Real(0.25))*(x-Real(0.25))+(y-Real(0.25))*(y-Real(0.25)))/Real(0.01); +#endif +// phi(i,j,k,0) = 50./cellvol; +// phi(i,j,k,0) = .1; + + phi(i,j,k,0) = phileft+x*slope; + if(Ncomp == 2){ + phi(i,j,k,1) = phi(i,j,k,0); + } +} + + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_x (int i, int j, int k, + amrex::Array4 const& fluxx, + amrex::Array4 const& stochfluxx, + amrex::Array4 const& phi, amrex::Real dxinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi,int Ncomp, + amrex::Real phileft, amrex::Real phiright, int avg_type) +{ +// amrex::Real cellvol = 1./dxinv; +// phileft = .1; +// phiright = .1; + //amrex::Real deltaN = std::pow(0.5*(phileft+phiright),2./3.); + amrex::Real deltaN = std::pow(0.5*(phileft+phiright),1./3.); +// amrex::Print() << "deltaN " << deltaN << std::endl; +// amrex::Real val,flag; +// for (int n=0; n<201; n++){ +// val=(n-100.)*deltaN/50.; +// flag = (val>0) ? 1.:-1.; +// amrex::Print() << "curve " << val << " " << reg_sqrt(val,deltaN) << " " << flag*std::sqrt(std::abs(val)) << std::endl; +// } + + int coeff_index = Ncomp-1; + if (i == dom_lo && bc_lo == BCType::foextrap ) + { + fluxx(i,j,k,0) = 0.; + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.; + } + } else if ( i == dom_lo && bc_lo == BCType::ext_dir ) + { + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phileft) * dxinv / 0.5 + std::sqrt(2.*phileft)*stochfluxx(i,j,k); + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.5*(phi(i,j,k,1)-phileft) * dxinv / 0.5 ; + } + } else if ( i == dom_hi+1 && bc_lo == BCType::foextrap ) + { + fluxx(i,j,k,0) = 0.; + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.; + } + } else if ( i == dom_hi+1 && bc_lo == BCType::ext_dir ) + { + fluxx(i,j,k,0) = 0.5*(phiright-phi(i-1,j,k,0)) * dxinv / 0.5 + std::sqrt(2.*phiright)*stochfluxx(i,j,k); + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.5*(phiright-phi(i-1,j,k,1)) * dxinv / 0.5 ; + } + } else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i-1,j,k))/(phi(i,j,k)+phi(i-1,j,k)+1.e-16); + amrex::Real phiavg, phip, phim, noise_coeff; + if(avg_type == 0 || avg_type == 2 ) { + + phip = std::max(phi(i,j,k,coeff_index),0.); + phim = std::max(phi(i-1,j,k,coeff_index),0.); + phiavg = 0.5*(phip+phim); + noise_coeff = std::sqrt(phiavg); + + } else if (avg_type == 1) { + + phip = std::max(phi(i,j,k,coeff_index),0.); + phim = std::max(phi(i-1,j,k,coeff_index),0.); + phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + noise_coeff = std::sqrt(phiavg); + + } else if (avg_type == 3) { + + phip = phi(i,j,k,coeff_index); + phim = phi(i-1,j,k,coeff_index); + phiavg = 0.5*(phip+phim); + noise_coeff = reg_sqrt(phiavg,deltaN); + } +// phiavg = .1; +// amrex::Print() << "test " << std::sqrt(phiavg) << " " << phiavg/std::sqrt(deltaN) << " " << deltaN << std::endl; + //phiavg = 1600.; + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv + + stochfluxx(i,j,k) * noise_coeff; +// + stochfluxx(i,j,k) * reg_sqrt(phiavg,deltaN); +// + stochfluxx(i,j,k) * phiavg/std::sqrt(deltaN); + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i-1,j,k,1)) * dxinv; + } + } +} + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_y (int i, int j, int k, + amrex::Array4 const& fluxy, + amrex::Array4 const& stochfluxy, + amrex::Array4 const& phi, amrex::Real dyinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi, int Ncomp) +{ + int coeff_index = Ncomp-1; + if (lo == dom_lo && + (bc_lo == BCType::foextrap || + bc_lo == BCType::ext_dir)) + { + if(j == lo) + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; + } + else + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; + } + } + else if (hi == dom_hi && + (bc_hi == BCType::foextrap || + bc_hi == BCType::ext_dir)) + { + if(j == hi+1) + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; + } + else + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; + } + } + else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j-1,k))/(phi(i,j,k)+phi(i,j-1,k)+1.e-16); + Real phip = std::max(phi(i,j,k,coeff_index),0.); + Real phim = std::max(phi(i,j-1,k,coeff_index),0.); + amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv + + stochfluxy(i,j,k)* std::sqrt(phiavg); + if(Ncomp == 2){ + fluxy(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j-1,k,1)) * dyinv; + } + } +} + + +#if (AMREX_SPACEDIM > 2) +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_z (int i, int j, int k, + amrex::Array4 const& fluxz, + amrex::Array4 const& stochfluxz, + amrex::Array4 const& phi, amrex::Real dzinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi) +{ + int coeff_index = Ncomp-1; + if (lo == dom_lo && + (bc_lo == BCType::foextrap || + bc_lo == BCType::ext_dir)) + { + if(k == lo) + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; + } + else + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; + } + } + else if (hi == dom_hi && + (bc_hi == BCType::foextrap || + bc_hi == BCType::ext_dir)) + { + if(k == hi+1) + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; + } + else + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; + } + } + else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j,k-1))/(phi(i,j,k)+phi(i,j,k-1)+1.e-16); + Real phip = std::max(phi(i,j,k,coeff_index),0.); + Real phim = std::max(phi(i,j,k-1,coeff_index),0.); + amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv + +stochfluxz(i,j,k) * std::sqrt(phiavg); + if(Ncomp == 2){ + fluxz(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j,k-1,1)) * dzinv; + } + } +} +#endif + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void update_phi (int i, int j, int k, + amrex::Array4 const& phiold, + amrex::Array4 const& phinew, + AMREX_D_DECL(amrex::Array4 const& fluxx, + amrex::Array4 const& fluxy, + amrex::Array4 const& fluxz), + amrex::Real dt, + AMREX_D_DECL(amrex::Real dxinv, + amrex::Real dyinv, + amrex::Real dzinv), + int Ncomp) +{ + for (int n = 0; n < Ncomp; n++){ + phinew(i,j,k,n) = phiold(i,j,k,n) + + dt * dxinv * (fluxx(i+1,j ,k,n ) - fluxx(i,j,k,n)) + + dt * dyinv * (fluxy(i ,j+1,k,n ) - fluxy(i,j,k,n)) +#if (AMREX_SPACEDIM > 2) + + dt * dzinv * (fluxz(i ,j ,k+1,n) - fluxz(i,j,k,n)); +#else + ; +#endif + } +} +#endif diff --git a/src_deankow/multilevel/AdvancePhiAtLevel.cpp b/src_deankow/multilevel/AdvancePhiAtLevel.cpp new file mode 100644 index 000000000..abfce9d99 --- /dev/null +++ b/src_deankow/multilevel/AdvancePhiAtLevel.cpp @@ -0,0 +1,47 @@ +#include +#include +#include + +using namespace amrex; + +// Advance a single level for a single time step, updates flux registers +void +AmrCoreAdv::AdvancePhiAtLevel (int lev, Real /*time*/, Real dt_lev, int /*iteration*/, int /*ncycle*/) +{ + Array fluxes; + Array stochFluxes; + for (int i = 0; i < AMREX_SPACEDIM; ++i) + { + BoxArray ba = grids[lev]; + ba.surroundingNodes(i); + fluxes[i].define(ba, dmap[lev], phi_new[lev].nComp(), 0); + stochFluxes[i].define(ba, dmap[lev], phi_new[lev].nComp(), 0); + stochFluxes[i].setVal(0.); + } + + phi_old[lev].FillBoundary(Geom(lev).periodicity()); + advance_phi(phi_old[lev], phi_new[lev], fluxes, stochFluxes, dt_lev, npts_scale, geom[lev], bcs); + + // Increment or decrement the flux registers by area and time-weighted fluxes + // Note that the fluxes have already been scaled by dt and area + // In this example we are solving phi_t = -div(+F) + // The fluxes contain, e.g., F_{i+1/2,j} = (phi*u)_{i+1/2,j} + // Keep this in mind when considering the different sign convention for updating + // the flux registers from the coarse or fine grid perspective + // NOTE: the flux register associated with flux_reg[lev] is associated + // with the lev/lev-1 interface (and has grid spacing associated with lev-1) + if (do_reflux) { + if (flux_reg[lev+1]) { + for (int i = 0; i < AMREX_SPACEDIM; ++i) { + // update the lev+1/lev flux register (index lev+1) + flux_reg[lev+1]->CrseInit(fluxes[i],i,0,0,fluxes[i].nComp(),1.0); + } + } + if (flux_reg[lev]) { + for (int i = 0; i < AMREX_SPACEDIM; ++i) { + // update the lev/lev-1 flux register (index lev) + flux_reg[lev]->FineAdd(fluxes[i],i,0,0,fluxes[i].nComp(),-1.0); + } + } + } +} diff --git a/src_deankow/multilevel/AmrCoreAdv.H b/src_deankow/multilevel/AmrCoreAdv.H new file mode 100644 index 000000000..a58e496ee --- /dev/null +++ b/src_deankow/multilevel/AmrCoreAdv.H @@ -0,0 +1,215 @@ +#ifndef AmrCoreAdv_H_ +#define AmrCoreAdv_H_ + +#include +#ifdef AMREX_PARTICLES +# include "ParticleData.H" +#endif + +#include +#include +#include +#include + +#ifdef AMREX_USE_OMP +# include +#endif + +#include +#include +#include + + +class AmrCoreAdv + : public amrex::AmrCore +{ +public: + + //////////////// + // public member functions + + // constructor - reads in parameters from inputs file + // - sizes multilevel arrays and data structures + AmrCoreAdv (); + ~AmrCoreAdv () override; + + AmrCoreAdv (AmrCoreAdv&& rhs) noexcept = default; + AmrCoreAdv& operator= (AmrCoreAdv&& rhs) noexcept = default; + + AmrCoreAdv (const AmrCoreAdv& rhs) = delete; + AmrCoreAdv& operator= (const AmrCoreAdv& rhs) = delete; + + // advance solution to final time + void Evolve (); + + // initializes multilevel data + void InitData (); + + // Create the special boxArray that handles the particle-mesh communication + void MakeFBA (const amrex::BoxArray& ba); + + // Make a new level using provided BoxArray and DistributionMapping and + // fill with interpolated coarse level data. + // overrides the pure virtual function in AmrCore + void MakeNewLevelFromCoarse (int lev, amrex::Real time, const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm) override; + + // Remake an existing level using provided BoxArray and DistributionMapping and + // fill with existing fine and coarse data. + // overrides the pure virtual function in AmrCore + void RemakeLevel (int lev, amrex::Real time, const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm) override; + + // Delete level data + // overrides the pure virtual function in AmrCore + void ClearLevel (int lev) override; + + // Make a new level from scratch using provided BoxArray and DistributionMapping. + // Only used during initialization. + // overrides the pure virtual function in AmrCore + void MakeNewLevelFromScratch (int lev, amrex::Real time, const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm) override; + + // tag all cells for refinement + // overrides the pure virtual function in AmrCore + void ErrorEst (int lev, amrex::TagBoxArray& tags, amrex::Real time, int ngrow) override; + + // Advance phi at a single level for a single time step, update flux registers + void AdvancePhiAtLevel (int lev, amrex::Real time, amrex::Real dt_lev, int iteration, int ncycle); + + // compute dt from CFL considerations + amrex::Real EstTimeStep (int lev, amrex::Real time); + +#ifdef AMREX_PARTICLES + ParticleData particleData; +#endif + + +private: + + //////////////// + // private member functions + + // read in some parameters from inputs file + void ReadParameters(); + + // set covered coarse cells to be the average of overlying fine cells + void AverageDown (); + + // more flexible version of AverageDown() that lets you average down across multiple levels + void AverageDownTo (int crse_lev); + + enum class FillPatchType { fillpatch_class, fillpatch_function }; + + // compute a new multifab by coping in phi from valid region and filling ghost cells + // works for single level and 2-level cases (fill fine grid ghost by interpolating from coarse) + void FillPatch (int lev, amrex::Real time, amrex::MultiFab& mf, int icomp, + int ncomp, FillPatchType fptype); + + // fill an entire multifab by interpolating from the coarser level + // this comes into play when a new level of refinement appears + void FillCoarsePatch (int lev, amrex::Real time, amrex::MultiFab& mf, int icomp, int ncomp); + + // Pack pointers to phi_old and/or phi_new and associated times. + void GetData (int lev, amrex::Real time, amrex::Vector& data, + amrex::Vector& datatime); + + // Advance a level by dt - includes a recursive call for finer levels + void timeStepWithSubcycling (int lev, amrex::Real time, int iteration); + + // Advance all levels by the same dt + void timeStepNoSubcycling (amrex::Real time, int iteration); + + // a wrapper for EstTimeStep + void ComputeDt (); + + // get plotfile name + [[nodiscard]] std::string PlotFileName (int lev) const; + + // put together an array of multifabs for writing + [[nodiscard]] amrex::Vector PlotFileMF () const; + + // set plotfile variables names + [[nodiscard]] static amrex::Vector PlotFileVarNames (); + + // write plotfile to disk + void WritePlotFile () const; + + // write checkpoint file to disk + void WriteCheckpointFile () const; + + // read checkpoint file from disk + void ReadCheckpointFile (); + + //////////////// + // private data members + + amrex::Vector istep; // which step? + amrex::Vector nsubsteps; // how many substeps on each level? + + // keep track of old time, new time, and time step at each level + amrex::Vector t_new; + amrex::Vector t_old; + amrex::Vector dt; + + // array of multifabs to store the solution at each level of refinement + // after advancing a level we use "swap". + amrex::Vector phi_new; + amrex::Vector phi_old; + + // this is essentially a 2*DIM integer array storing the physical boundary + // condition types at the lo/hi walls in each direction + amrex::Vector bcs; // 1-component + + // stores fluxes at coarse-fine interface for synchronization + // this will be sized "nlevs_max+1" + // NOTE: the flux register associated with flux_reg[lev] is associated + // with the lev/lev-1 interface (and has grid spacing associated with lev-1) + // therefore flux_reg[0] and flux_reg[nlevs_max] are never actually + // used in the reflux operation + amrex::Vector > flux_reg; + + // This is for fillpatch during timestepping, but not for regridding. + amrex::Vector>> fillpatcher; + + //////////////// + // runtime parameters + + // Specific to this application + amrex::Real npts_scale; + int alg_type; + + int seed=0; + + amrex::BoxArray grown_fba; + + // maximum number of steps and stop time + int max_step = std::numeric_limits::max(); + amrex::Real stop_time = std::numeric_limits::max(); + + // if >= 0 we restart from a checkpoint + std::string restart_chkfile; + + // advective cfl number - dt = cfl*dx/umax + amrex::Real cfl = 0.7; + + // how often each level regrids the higher levels of refinement + // (after a level advances that many time steps) + int regrid_int = 2; + + // hyperbolic refluxing as part of multilevel synchronization + int do_reflux = 1; + + // do we subcycle in time? + int do_subcycle = 1; + + // plotfile prefix and frequency + std::string plot_file {"plt"}; + int plot_int = -1; + + // checkpoint prefix and frequency + std::string chk_file {"chk"}; + int chk_int = -1; +}; + +#endif diff --git a/src_deankow/multilevel/AmrCoreAdv.cpp b/src_deankow/multilevel/AmrCoreAdv.cpp new file mode 100644 index 000000000..395ff57a9 --- /dev/null +++ b/src_deankow/multilevel/AmrCoreAdv.cpp @@ -0,0 +1,974 @@ + +#include +#include +#include +#include +#include +#include + +#ifdef AMREX_MEM_PROFILING +#include +#endif + +#include +#include +#include +#include +#include "chrono" + +using namespace amrex; +using namespace std::chrono; + +// constructor - reads in parameters from inputs file +// - sizes multilevel arrays and data structures +// - initializes BCRec boundary condition object +AmrCoreAdv::AmrCoreAdv () +{ + ReadParameters(); + + ///////////////////////////////////////// + //Initialise rngs + ///////////////////////////////////////// + int restart = -1; + + if (restart < 0) { + + if (seed > 0) { + // initializes the seed for C++ random number calls + InitRandom(seed+ParallelDescriptor::MyProc(), + ParallelDescriptor::NProcs(), + seed+ParallelDescriptor::MyProc()); + } else if (seed == 0) { + // initializes the seed for C++ random number calls based on the clock + auto now = time_point_cast(system_clock::now()); + int randSeed = now.time_since_epoch().count(); + // broadcast the same root seed to all processors + ParallelDescriptor::Bcast(&randSeed,1,ParallelDescriptor::IOProcessorNumber()); + InitRandom(randSeed+ParallelDescriptor::MyProc(), + ParallelDescriptor::NProcs(), + randSeed+ParallelDescriptor::MyProc()); + } else { + Abort("Must supply non-negative seed"); + } + } + + // Geometry on all levels has been defined already. + + // No valid BoxArray and DistributionMapping have been defined. + // But the arrays for them have been resized. + + int nlevs_max = max_level + 1; + + istep.resize(nlevs_max, 0); + nsubsteps.resize(nlevs_max, 1); + if (do_subcycle) { + for (int lev = 1; lev <= max_level; ++lev) { + nsubsteps[lev] = MaxRefRatio(lev-1); + } + } + + t_new.resize(nlevs_max, 0.0); + t_old.resize(nlevs_max, -1.e100); + dt.resize(nlevs_max, 1.e100); + + phi_new.resize(nlevs_max); + phi_old.resize(nlevs_max); + + // periodic boundaries + int bc_lo[] = {BCType::int_dir, BCType::int_dir, BCType::int_dir}; + int bc_hi[] = {BCType::int_dir, BCType::int_dir, BCType::int_dir}; + +/* + // walls (Neumann) + int bc_lo[] = {amrex::BCType::foextrap, amrex::BCType::foextrap, amrex::BCType::foextrap}; + int bc_hi[] = {amrex::BCType::foextrap, amrex::BCType::foextrap, amrex::BCType::foextrap}; +*/ + + bcs.resize(1); // Setup 1-component + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + // lo-side BCs + if (bc_lo[idim] == BCType::int_dir || // periodic uses "internal Dirichlet" + bc_lo[idim] == BCType::foextrap || // first-order extrapolation + bc_lo[idim] == BCType::ext_dir ) { // external Dirichlet + bcs[0].setLo(idim, bc_lo[idim]); + } + else { + amrex::Abort("Invalid bc_lo"); + } + + // hi-side BCSs + if (bc_hi[idim] == BCType::int_dir || // periodic uses "internal Dirichlet" + bc_hi[idim] == BCType::foextrap || // first-order extrapolation + bc_hi[idim] == BCType::ext_dir ) { // external Dirichlet + bcs[0].setHi(idim, bc_hi[idim]); + } + else { + amrex::Abort("Invalid bc_hi"); + } + } + + // stores fluxes at coarse-fine interface for synchronization + // this will be sized "nlevs_max+1" + // NOTE: the flux register associated with flux_reg[lev] is associated + // with the lev/lev-1 interface (and has grid spacing associated with lev-1) + // therefore flux_reg[0] is never actually used in the reflux operation + flux_reg.resize(nlevs_max+1); + + // fillpatcher[lev] is for filling data on level lev using the data on + // lev-1 and lev. + fillpatcher.resize(nlevs_max+1); +} + +AmrCoreAdv::~AmrCoreAdv () = default; + +// advance solution to final time +void +AmrCoreAdv::Evolve () +{ + Real cur_time = t_new[0]; + int last_plot_file_step = 0; + + for (int step = istep[0]; step < max_step && cur_time < stop_time; ++step) + { + amrex::Print() << "\nCoarse STEP " << step+1 << " starts ..." << std::endl; + + ComputeDt(); + + int lev = 0; + int iteration = 1; + timeStepNoSubcycling(cur_time, iteration); + + cur_time += dt[0]; + + // sum phi to check conservation + Real sum_phi_old = phi_old[0].sum(); + Real sum_phi_new = phi_new[0].sum(); + + amrex::Print() << "Coarse STEP " << step+1 << " ends." << " TIME = " << cur_time + << " DT = " << dt[0] << " Sum(Phi) = " << sum_phi_old << " " << sum_phi_new << std::endl; + + // sync up time + for (lev = 0; lev <= finest_level; ++lev) { + t_new[lev] = cur_time; + } + + if (plot_int > 0 && (step+1) % plot_int == 0) { + last_plot_file_step = step+1; + WritePlotFile(); + } + + if (chk_int > 0 && (step+1) % chk_int == 0) { + WriteCheckpointFile(); + } + +#ifdef AMREX_MEM_PROFILING + { + std::ostringstream ss; + ss << "[STEP " << step+1 << "]"; + MemProfiler::report(ss.str()); + } +#endif + + if (cur_time >= stop_time - 1.e-6*dt[0]) { break; } + } + + if (plot_int > 0 && istep[0] > last_plot_file_step) { + WritePlotFile(); + } +} + +// initializes multilevel data +void +AmrCoreAdv::InitData () +{ + if (restart_chkfile.empty()) { + // start simulation from the beginning + const Real time = 0.0; + + // Note that InitFromScratch allocates the space for phi at each level, + // but only initializes phi at level 0, not at level > 0. + // So we can't do average down until we create the particles, + // then use the particles to define the level 1 phi + InitFromScratch(time); + +#ifdef AMREX_PARTICLES + if (max_level > 0) { + particleData.init_particles((amrex::ParGDBBase*)GetParGDB(), grown_fba, phi_new[0], phi_new[1]); + } +#endif + AverageDown(); + phi_new[0].FillBoundary(); + + if (chk_int > 0) { + WriteCheckpointFile(); + } + } + else { + // restart from a checkpoint + ReadCheckpointFile(); + } + if (plot_int > 0) { + WritePlotFile(); + } +} + +void AmrCoreAdv::MakeFBA(const BoxArray& ba) +{ + Box domain(Geom(1).Domain()); + BoxList valid_bl(ba); + BoxList com_bl = GetBndryCells(ba,1); +#if (AMREX_SPACEDIM == 2) + Vector pshifts(9); +#else + Vector pshifts(27); +#endif + BoxList com_bl_fixed; + for (auto& b : com_bl) { + Box bx(b); + com_bl_fixed.push_back(b); + } + com_bl_fixed.catenate(valid_bl); + grown_fba.define(com_bl_fixed); +} + +// Make a new level using provided BoxArray and DistributionMapping and +// fill with interpolated coarse level data (overrides the pure virtual function in AmrCore) +// regrid --> RemakeLevel (if level already existed) +// regrid --> MakeNewLevelFromCoarse (if adding new level) +void +AmrCoreAdv::MakeNewLevelFromCoarse (int lev, Real time, const BoxArray& ba, + const DistributionMapping& dm) +{ + const int ncomp = phi_new[lev-1].nComp(); + const int ng = phi_new[lev-1].nGrow(); + + phi_new[lev].define(ba, dm, ncomp, ng); + phi_old[lev].define(ba, dm, ncomp, ng); + + t_new[lev] = time; + t_old[lev] = time - 1.e200; + + if (lev > 0 && do_reflux) { + flux_reg[lev] = std::make_unique(ba, dm, refRatio(lev-1), lev, ncomp); + } + + FillCoarsePatch(lev, time, phi_new[lev], 0, ncomp); +} + +// Make a new level using provided BoxArray and DistributionMapping and +// fill with interpolated coarse level data (overrides the pure virtual function in AmrCore) +// regrid --> RemakeLevel (if level already existed) +// regrid --> MakeNewLevelFromCoarse (if adding new level) +void +AmrCoreAdv::RemakeLevel (int lev, Real time, const BoxArray& ba, + const DistributionMapping& dm) +{ + const int ncomp = phi_new[lev].nComp(); + const int ng = phi_new[lev].nGrow(); + + amrex::Print() << " REGRIDDING: NEW GRIDS AT LEVEL " << lev << " " << ba << std::endl; + + if (lev == 1) { + MakeFBA(ba); + } + + MultiFab new_state(ba, dm, ncomp, ng); + MultiFab old_state(ba, dm, ncomp, ng); + + // Must use fillpatch_function + FillPatch(lev, time, new_state, 0, ncomp, FillPatchType::fillpatch_function); + + std::swap(new_state, phi_new[lev]); + std::swap(old_state, phi_old[lev]); + + t_new[lev] = time; + t_old[lev] = time - 1.e200; + + if (lev > 0 && do_reflux) { + flux_reg[lev] = std::make_unique(ba, dm, refRatio(lev-1), lev, ncomp); + } + +#ifdef AMREX_PARTICLES + if (lev == 1) { + particleData.regrid_particles(grown_fba); + } +#endif +} + +// Delete level data +// overrides the pure virtual function in AmrCore +void +AmrCoreAdv::ClearLevel (int lev) +{ + phi_new[lev].clear(); + phi_old[lev].clear(); + flux_reg[lev].reset(nullptr); + fillpatcher[lev].reset(nullptr); +} + +// Make a new level from scratch using provided BoxArray and DistributionMapping. +// Only used during initialization. +// overrides the pure virtual function in AmrCore +// main.cpp --> AmrCoreAdv::InitData --> InitFromScratch --> MakeNewGrids --> MakeNewLevelFromScratch +// restart --> MakeNewGrids --> MakeNewLevelFromScratch +void AmrCoreAdv::MakeNewLevelFromScratch (int lev, Real time, const BoxArray& ba, + const DistributionMapping& dm) +{ + const int ng = 1; + + amrex::Print() << " GRIDS AT LEVEL " << lev << " " << ba << std::endl; + + // ncomp = number of components for each array + int ncomp; + if (alg_type == 0) { + ncomp = 1; + } else { + ncomp = 2; + } + + if (lev == 1) { + MakeFBA(ba); + } + + phi_new[lev].define(ba, dm, ncomp, ng); + phi_old[lev].define(ba, dm, ncomp, ng); + + t_new[lev] = time; + t_old[lev] = time - 1.e200; + + if (lev > 0 && do_reflux) { + flux_reg[lev] = std::make_unique(ba, dm, refRatio(lev-1), lev, ncomp); + } + + const auto problo = Geom(lev).ProbLoArray(); + const auto dx = Geom(lev).CellSizeArray(); + + int Ncomp = phi_new[lev].nComp(); + + if (lev == 0) { + for (MFIter mfi(phi_new[lev]); mfi.isValid(); ++mfi) + { + const Box& vbx = mfi.validbox(); + auto const& phi_arr = phi_new[lev].array(mfi); + auto npts_scale_local = npts_scale; + amrex::ParallelFor(vbx, + [=] AMREX_GPU_DEVICE(int i, int j, int k) + { + init_phi(i,j,k,phi_arr,dx,problo,npts_scale_local,Ncomp); + }); + } + } else { + phi_new[lev].ParallelCopy(phi_new[lev-1], 0, 0, phi_new[lev].nComp()); + } +} + +// tag all cells for refinement +// overrides the pure virtual function in AmrCore +void +AmrCoreAdv::ErrorEst (int lev, TagBoxArray& tags, Real /*time*/, int /*ngrow*/) +{ + static bool first = true; + static Vector phierr; + + // only do this during the first call to ErrorEst + if (first) + { + first = false; + // read in an array of "phierr", which is the tagging threshold + // in this example, we tag values of "phi" which are greater than phierr + // for that particular level + // in subroutine state_error, you could use more elaborate tagging, such + // as more advanced logical expressions, or gradients, etc. + ParmParse pp("adv"); + int n = pp.countval("phierr"); + if (n > 0) { + pp.getarr("phierr", phierr, 0, n); + } + } + + if (lev >= phierr.size()) { return; } + +// const int clearval = TagBox::CLEAR; + const int tagval = TagBox::SET; + + const MultiFab& state = phi_new[lev]; + +#ifdef AMREX_USE_OMP +#pragma omp parallel if(Gpu::notInLaunchRegion()) +#endif + { + + for (MFIter mfi(state,TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + const Box& bx = mfi.tilebox(); + const auto statefab = state.array(mfi); + const auto tagfab = tags.array(mfi); + Real phierror = phierr[lev]; + + amrex::ParallelFor(bx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + state_error(i, j, k, tagfab, statefab, phierror, tagval); + }); + } + } +} + +// read in some parameters from inputs file +void +AmrCoreAdv::ReadParameters () +{ + { + ParmParse pp; // Traditionally, max_step and stop_time do not have prefix. + pp.query("max_step", max_step); + pp.query("stop_time", stop_time); + + npts_scale = 1.0; + pp.queryAdd("npts_scale", npts_scale); + + alg_type = 0; + pp.queryAdd("alg_type", alg_type); + + seed = 0; + pp.queryAdd("seed", seed); + } + + { + ParmParse pp("amr"); // Traditionally, these have prefix, amr. + + pp.query("regrid_int", regrid_int); + pp.query("plot_file", plot_file); + pp.query("plot_int", plot_int); + pp.query("chk_file", chk_file); + pp.query("chk_int", chk_int); + pp.query("restart",restart_chkfile); + } + + { + ParmParse pp("adv"); + + pp.query("cfl", cfl); + pp.query("do_reflux", do_reflux); + pp.query("do_subcycle", do_subcycle); + } + +#ifdef AMREX_PARTICLES + particleData.init_particle_params(max_level); +#endif +} + +// set covered coarse cells to be the average of overlying fine cells +void +AmrCoreAdv::AverageDown () +{ + for (int lev = finest_level; lev >= 1; --lev) { + phi_new[lev-1].ParallelCopy(phi_new[lev], 0, 0, phi_new[lev].nComp()); + } +} + +// more flexible version of AverageDown() that lets you average down across multiple levels +void +AmrCoreAdv::AverageDownTo (int crse_lev) +{ + phi_new[crse_lev].ParallelCopy(phi_new[crse_lev+1], 0, 0, phi_new[crse_lev].nComp()); +} + +// compute a new multifab by coping in phi from valid region and filling ghost cells +// works for single level and 2-level cases (fill fine grid ghost by interpolating from coarse) +void +AmrCoreAdv::FillPatch (int lev, Real time, MultiFab& mf, int icomp, int ncomp, + FillPatchType fptype) +{ + if (lev == 0) + { + Vector smf; + Vector stime; + GetData(0, time, smf, stime); + + if(Gpu::inLaunchRegion()) + { + GpuBndryFuncFab gpu_bndry_func(AmrCoreFill{}); + PhysBCFunct > physbc(geom[lev],bcs,gpu_bndry_func); + amrex::FillPatchSingleLevel(mf, time, smf, stime, 0, icomp, ncomp, + geom[lev], physbc, 0); + } + else + { + CpuBndryFuncFab bndry_func(nullptr); // Without EXT_DIR, we can pass a nullptr. + PhysBCFunct physbc(geom[lev],bcs,bndry_func); + amrex::FillPatchSingleLevel(mf, time, smf, stime, 0, icomp, ncomp, + geom[lev], physbc, 0); + } + } + else + { + Vector cmf, fmf; + Vector ctime, ftime; + GetData(lev-1, time, cmf, ctime); + GetData(lev , time, fmf, ftime); + + Interpolater* mapper = &cell_cons_interp; + + if (fptype == FillPatchType::fillpatch_class) { + if (fillpatcher[lev] == nullptr) { + fillpatcher[lev] = std::make_unique> + (boxArray(lev ), DistributionMap(lev ), Geom(lev ), + boxArray(lev-1), DistributionMap(lev-1), Geom(lev-1), + mf.nGrowVect(), mf.nComp(), mapper); + } + } + + if(Gpu::inLaunchRegion()) + { + GpuBndryFuncFab gpu_bndry_func(AmrCoreFill{}); + PhysBCFunct > cphysbc(geom[lev-1],bcs,gpu_bndry_func); + PhysBCFunct > fphysbc(geom[lev],bcs,gpu_bndry_func); + + if (fptype == FillPatchType::fillpatch_class) { + fillpatcher[lev]->fill(mf, mf.nGrowVect(), time, + cmf, ctime, fmf, ftime, 0, icomp, ncomp, + cphysbc, 0, fphysbc, 0, bcs, 0); + } else { + amrex::FillPatchTwoLevels(mf, time, cmf, ctime, fmf, ftime, + 0, icomp, ncomp, geom[lev-1], geom[lev], + cphysbc, 0, fphysbc, 0, refRatio(lev-1), + mapper, bcs, 0); + } + } + else + { + CpuBndryFuncFab bndry_func(nullptr); // Without EXT_DIR, we can pass a nullptr. + PhysBCFunct cphysbc(geom[lev-1],bcs,bndry_func); + PhysBCFunct fphysbc(geom[lev],bcs,bndry_func); + + if (fptype == FillPatchType::fillpatch_class) { + fillpatcher[lev]->fill(mf, mf.nGrowVect(), time, + cmf, ctime, fmf, ftime, 0, icomp, ncomp, + cphysbc, 0, fphysbc, 0, bcs, 0); + } else { + amrex::FillPatchTwoLevels(mf, time, cmf, ctime, fmf, ftime, + 0, icomp, ncomp, geom[lev-1], geom[lev], + cphysbc, 0, fphysbc, 0, refRatio(lev-1), + mapper, bcs, 0); + } + } + } +} + +// fill an entire multifab by interpolating from the coarser level +// this comes into play when a new level of refinement appears +void +AmrCoreAdv::FillCoarsePatch (int lev, Real time, MultiFab& mf, int icomp, int ncomp) +{ + BL_ASSERT(lev > 0); + + Vector cmf; + Vector ctime; + GetData(lev-1, time, cmf, ctime); + Interpolater* mapper = &cell_cons_interp; + + if (cmf.size() != 1) { + amrex::Abort("FillCoarsePatch: how did this happen?"); + } + + if(Gpu::inLaunchRegion()) + { + GpuBndryFuncFab gpu_bndry_func(AmrCoreFill{}); + PhysBCFunct > cphysbc(geom[lev-1],bcs,gpu_bndry_func); + PhysBCFunct > fphysbc(geom[lev],bcs,gpu_bndry_func); + + amrex::InterpFromCoarseLevel(mf, time, *cmf[0], 0, icomp, ncomp, geom[lev-1], geom[lev], + cphysbc, 0, fphysbc, 0, refRatio(lev-1), + mapper, bcs, 0); + } + else + { + CpuBndryFuncFab bndry_func(nullptr); // Without EXT_DIR, we can pass a nullptr. + PhysBCFunct cphysbc(geom[lev-1],bcs,bndry_func); + PhysBCFunct fphysbc(geom[lev],bcs,bndry_func); + + amrex::InterpFromCoarseLevel(mf, time, *cmf[0], 0, icomp, ncomp, geom[lev-1], geom[lev], + cphysbc, 0, fphysbc, 0, refRatio(lev-1), + mapper, bcs, 0); + } +} + +void +AmrCoreAdv::GetData (int lev, Real time, Vector& data, Vector& datatime) +{ + data.clear(); + datatime.clear(); + + if (amrex::almostEqual(time, t_new[lev], 5)) + { + data.push_back(&phi_new[lev]); + datatime.push_back(t_new[lev]); + } + else if (amrex::almostEqual(time, t_old[lev], 5)) + { + data.push_back(&phi_old[lev]); + datatime.push_back(t_old[lev]); + } + else + { + data.push_back(&phi_old[lev]); + data.push_back(&phi_new[lev]); + datatime.push_back(t_old[lev]); + datatime.push_back(t_new[lev]); + } +} + +// Advance all the levels with the same dt +void +AmrCoreAdv::timeStepNoSubcycling (Real time, int iteration) +{ + if (max_level > 0 && regrid_int > 0) // We may need to regrid + { + if (istep[0] % regrid_int == 0) + { + regrid(0, time); + } + } + + // Advance phi at level 0 only + int lev = 0; + int nsub = 1; + std::swap(phi_old[lev], phi_new[lev]); + + if (Verbose()) { + amrex::Print() << "[Level " << lev << " step " << istep[lev]+1 << "] "; + amrex::Print() << "ADVANCE with time = " << t_new[lev] << " dt = " << dt[0] << std::endl; + } + + AdvancePhiAtLevel(lev, time, dt[lev], iteration, nsub); + + if (finest_level > 0) { + flux_reg[lev+1]->Reflux(phi_new[lev], 1.0, 0, 0, phi_new[lev].nComp(), geom[lev]); + } + + if (Verbose()) { + amrex::Print() << "[Level " << lev << " step " << istep[lev]+1 << "] "; + amrex::Print() << "Advanced " << CountCells(lev) << " cells" << std::endl; + } + + int lev_for_particles = finest_level; +#ifdef AMREX_PARTICLES + // We set this to finest_level rather than 1 so that we can run with max_level = 0; + // in that case the advance_particle routine will return without doing anything + if (finest_level > 0) { + std::swap(phi_old[lev_for_particles], phi_new[lev_for_particles]); + } + const Real* dx = geom[lev_for_particles].CellSize(); +#if (AMREX_SPACEDIM == 2) + const Real cell_vol = dx[0]*dx[1]; +#else + const Real cell_vol = dx[0]*dx[1]*dx[2]; +#endif + particleData.advance_particles(lev_for_particles, dt[lev_for_particles], cell_vol, + phi_old[0], phi_new[0], phi_new[lev_for_particles]); + particleData.Redistribute(); +#endif + + // Make sure the coarser levels are consistent with the finer levels + AverageDown (); + + for (auto& fp : fillpatcher) { + fp.reset(); // Because the data have changed. + } + + for (int ilev = 0; ilev <= finest_level; ilev++) { + ++istep[ilev]; + } + + if (Verbose()) + { + amrex::Print() << "[Level " << lev_for_particles << " step " << istep[lev_for_particles] << "] "; + amrex::Print() << "Advanced " << CountCells(lev_for_particles) << " cells" << std::endl; + } +} + +// a wrapper for EstTimeStep +void +AmrCoreAdv::ComputeDt () +{ + Vector dt_tmp(finest_level+1); + + for (int lev = 0; lev <= finest_level; ++lev) + { + dt_tmp[lev] = EstTimeStep(lev, t_new[lev]); + } + ParallelDescriptor::ReduceRealMin(dt_tmp.data(), int(dt_tmp.size())); + + constexpr Real change_max = 1.1; + Real dt_0 = dt_tmp[0]; + int n_factor = 1; + + for (int lev = 0; lev <= finest_level; ++lev) { + dt_tmp[lev] = std::min(dt_tmp[lev], change_max*dt[lev]); + n_factor *= nsubsteps[lev]; + dt_0 = std::min(dt_0, n_factor*dt_tmp[lev]); + } + + // Limit dt's by the value of stop_time. + const Real eps = 1.e-3*dt_0; + + if (t_new[0] + dt_0 > stop_time - eps) { + dt_0 = stop_time - t_new[0]; + } + + dt[0] = dt_0; + + for (int lev = 1; lev <= finest_level; ++lev) { + dt[lev] = dt[lev-1] / nsubsteps[lev]; + } +} + +// compute dt from CFL considerations +Real +AmrCoreAdv::EstTimeStep (int lev, Real /*time*/) +{ + BL_PROFILE("AmrCoreAdv::EstTimeStep()"); + + Real dt_est = std::numeric_limits::max(); + + const Real* dx = geom[lev].CellSize(); + + Real coeff = AMREX_D_TERM( 2./(dx[0]*dx[0]), + + 2./(dx[1]*dx[1]), + + 2./(dx[2]*dx[2]) ); + Real est = 1.0 / (2.0*coeff); + dt_est = amrex::min(dt_est, est); + + dt_est *= cfl; + + return dt_est; +} + +// get plotfile name +std::string +AmrCoreAdv::PlotFileName (int lev) const +{ + return amrex::Concatenate(plot_file, lev, 6); +} + +// put together an array of multifabs for writing +Vector +AmrCoreAdv::PlotFileMF () const +{ + Vector r; + for (int i = 0; i <= finest_level; ++i) { + r.push_back(&phi_new[i]); + } + return r; +} + +// set plotfile variable names +Vector +AmrCoreAdv::PlotFileVarNames () +{ + return {"phi"}; +} + +// write plotfile to disk +void +AmrCoreAdv::WritePlotFile () const +{ + const std::string& plotfilename = PlotFileName(istep[0]); + const auto& mf = PlotFileMF(); + const auto& varnames = PlotFileVarNames(); + + amrex::Print() << "Writing plotfile " << plotfilename << "\n"; + + // Note that because amrvis won't plot multilevel data with ref_ratio = 1, + // we only write out the coarsest level data in the plotfile + int fake_finest_level = 0; + amrex::WriteMultiLevelPlotfile(plotfilename, fake_finest_level+1, mf, varnames, + Geom(), t_new[0], istep, refRatio()); +} + +void +AmrCoreAdv::WriteCheckpointFile () const +{ + + // chk00010 write a checkpoint file with this root directory + // chk00010/Header this contains information you need to save (e.g., finest_level, t_new, etc.) and also + // the BoxArrays at each level + // chk00010/Level_0/ + // chk00010/Level_1/ + // etc. these subdirectories will hold the MultiFab data at each level of refinement + + // checkpoint file name, e.g., chk00010 + const std::string& checkpointname = amrex::Concatenate(chk_file,istep[0]); + + amrex::Print() << "Writing checkpoint " << checkpointname << "\n"; + + const int nlevels = finest_level+1; + + // ---- prebuild a hierarchy of directories + // ---- dirName is built first. if dirName exists, it is renamed. then build + // ---- dirName/subDirPrefix_0 .. dirName/subDirPrefix_nlevels-1 + // ---- if callBarrier is true, call ParallelDescriptor::Barrier() + // ---- after all directories are built + // ---- ParallelDescriptor::IOProcessor() creates the directories + amrex::PreBuildDirectorHierarchy(checkpointname, "Level_", nlevels, true); + + // write Header file + if (ParallelDescriptor::IOProcessor()) { + + std::string HeaderFileName(checkpointname + "/Header"); + VisMF::IO_Buffer io_buffer(VisMF::IO_Buffer_Size); + std::ofstream HeaderFile; + HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size()); + HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out | + std::ofstream::trunc | + std::ofstream::binary); + if( ! HeaderFile.good()) { + amrex::FileOpenFailed(HeaderFileName); + } + + HeaderFile.precision(17); + + // write out title line + HeaderFile << "Checkpoint file for AmrCoreAdv\n"; + + // write out finest_level + HeaderFile << finest_level << "\n"; + + // write out array of istep + for (int i = 0; i < istep.size(); ++i) { + HeaderFile << istep[i] << " "; + } + HeaderFile << "\n"; + + // write out array of dt + for (int i = 0; i < dt.size(); ++i) { + HeaderFile << dt[i] << " "; + } + HeaderFile << "\n"; + + // write out array of t_new + for (int i = 0; i < t_new.size(); ++i) { + HeaderFile << t_new[i] << " "; + } + HeaderFile << "\n"; + + // write the BoxArray at each level + for (int lev = 0; lev <= finest_level; ++lev) { + boxArray(lev).writeOn(HeaderFile); + HeaderFile << '\n'; + } + } + + // write the MultiFab data to, e.g., chk00010/Level_0/ + for (int lev = 0; lev <= finest_level; ++lev) { + VisMF::Write(phi_new[lev], + amrex::MultiFabFileFullPrefix(lev, checkpointname, "Level_", "phi")); + } + +#ifdef AMREX_PARTICLES + particleData.Checkpoint(checkpointname); +#endif + +} + +namespace { +// utility to skip to next line in Header +void GotoNextLine (std::istream& is) +{ + constexpr std::streamsize bl_ignore_max { 100000 }; + is.ignore(bl_ignore_max, '\n'); +} +} + +void +AmrCoreAdv::ReadCheckpointFile () +{ + amrex::Print() << "Restart from checkpoint " << restart_chkfile << "\n"; + + // Header + std::string File(restart_chkfile + "/Header"); + + VisMF::IO_Buffer io_buffer(VisMF::GetIOBufferSize()); + + Vector fileCharPtr; + ParallelDescriptor::ReadAndBcastFile(File, fileCharPtr); + std::string fileCharPtrString(fileCharPtr.dataPtr()); + std::istringstream is(fileCharPtrString, std::istringstream::in); + + std::string line, word; + + // read in title line + std::getline(is, line); + + // read in finest_level + is >> finest_level; + GotoNextLine(is); + + // read in array of istep + std::getline(is, line); + { + std::istringstream lis(line); + int i = 0; + while (lis >> word) { + istep[i++] = std::stoi(word); + } + } + + // read in array of dt + std::getline(is, line); + { + std::istringstream lis(line); + int i = 0; + while (lis >> word) { + dt[i++] = std::stod(word); + } + } + + // read in array of t_new + std::getline(is, line); + { + std::istringstream lis(line); + int i = 0; + while (lis >> word) { + t_new[i++] = std::stod(word); + } + } + + for (int lev = 0; lev <= finest_level; ++lev) { + + // read in level 'lev' BoxArray from Header + BoxArray ba; + ba.readFrom(is); + GotoNextLine(is); + + // create a distribution mapping + DistributionMapping dm { ba, ParallelDescriptor::NProcs() }; + + // set BoxArray grids and DistributionMapping dmap in AMReX_AmrMesh.H class + SetBoxArray(lev, ba); + SetDistributionMap(lev, dm); + + // build MultiFab and FluxRegister data + int ncomp = 1; + int ng = 0; + phi_old[lev].define(grids[lev], dmap[lev], ncomp, ng); + phi_new[lev].define(grids[lev], dmap[lev], ncomp, ng); + + if (lev > 0 && do_reflux) { + flux_reg[lev] = std::make_unique(grids[lev], dmap[lev], refRatio(lev-1), lev, ncomp); + } + } + + // read in the MultiFab data + for (int lev = 0; lev <= finest_level; ++lev) { + VisMF::Read(phi_new[lev], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, "Level_", "phi")); + } + +#ifdef AMREX_PARTICLES + particleData.Restart((amrex::ParGDBBase*)GetParGDB(),restart_chkfile); +#endif + + +} diff --git a/src_deankow/multilevel/Kernels.H b/src_deankow/multilevel/Kernels.H new file mode 100644 index 000000000..d76d470eb --- /dev/null +++ b/src_deankow/multilevel/Kernels.H @@ -0,0 +1,15 @@ +#ifndef Kernels_H_ +#define Kernels_H_ + +#include +#include +#include +#include + +#if (AMREX_SPACEDIM == 2) +#include +#else +#include +#endif + +#endif diff --git a/src_deankow/multilevel/Make.package b/src_deankow/multilevel/Make.package new file mode 100644 index 000000000..de031d146 --- /dev/null +++ b/src_deankow/multilevel/Make.package @@ -0,0 +1,16 @@ +CEXE_sources += AdvancePhiAtLevel.cpp +CEXE_sources += AmrCoreAdv.cpp +CEXE_sources += main.cpp +CEXE_sources += myfunc.cpp + +CEXE_headers += AmrCoreAdv.H +CEXE_headers += bc_fill.H +CEXE_headers += Kernels.H +CEXE_headers += Tagging.H +CEXE_headers += mykernel.H + +ifeq ($(USE_PARTICLES),TRUE) +CEXE_headers += ParticleData.H +CEXE_headers += StochasticPC.H +CEXE_sources += StochasticPC.cpp +endif diff --git a/src_deankow/multilevel/ParticleData.H b/src_deankow/multilevel/ParticleData.H new file mode 100644 index 000000000..c3e5af280 --- /dev/null +++ b/src_deankow/multilevel/ParticleData.H @@ -0,0 +1,210 @@ +#ifndef _PARTICLE_DATA_H_ +#define _PARTICLE_DATA_H_ + +#include +#include + +// #include +#include +#include +#include +#include + +#include + +/** + * Container holding many of the particle-related data and options + */ + +struct ParticleData { + public: + void init_particle_params(int max_level) + { + amrex::ParmParse pp(pp_prefix); + + use_particles = 0; + pp.query("use_particles", use_particles); + + if (use_particles) { + if (max_level != 1) { + amrex::Abort("When using particles you must have max_level == 1"); + } + } + + } + + void init_particles(amrex::ParGDBBase* a_gdb, const amrex::BoxArray& grown_fba, + amrex::MultiFab& phi_crse, amrex::MultiFab& phi_fine) + { + // Initialize stochastic particles and phi at level 1 + if (use_particles) { + const amrex::Geometry& geom_fine = a_gdb->Geom(1); + const auto dx_fine = geom_fine.CellSize(); +#if (AMREX_SPACEDIM == 2) + amrex::Real cell_vol_fine = dx_fine[0]*dx_fine[1]; +#else + amrex::Real cell_vol_fine = dx_fine[0]*dx_fine[1]*dx_fine[2]; +#endif + + // ********************************************************************** + // Define the particle class with the default gdb, then change the particle + // boxArray (and DM) to be the grown ba and associated dm + // ********************************************************************** + stochastic_particles = std::make_unique(a_gdb); + stochastic_particles->SetParticleBoxArray(1,grown_fba); + amrex::DistributionMapping grown_dm { grown_fba, amrex::ParallelDescriptor::NProcs() }; + stochastic_particles->SetParticleDistributionMap(1,grown_dm); + + // ********************************************************************** + // Fill a temporary phi_grown with phi from "coarse" level + // ********************************************************************** + amrex::MultiFab phi_grown(grown_fba, grown_dm, 1, 0); + phi_crse.FillBoundary(geom_fine.periodicity()); + phi_grown.ParallelCopy(phi_crse,0,0,1,1,0,geom_fine.periodicity()); + + // ********************************************************************** + // Use this grown boxArray to hold the particles + // ********************************************************************** + stochastic_particles->InitParticles(phi_grown); + + amrex::Print() << "Before Removal we have " << stochastic_particles->TotalNumberOfParticles() << + " stochastic particles." << std::endl; + + // ********************************************************************** + // Remove particles not in the real fine boxArray + // ********************************************************************** + stochastic_particles->RemoveParticlesNotInBA(phi_fine.boxArray()); + + // ********************************************************************** + // Redistribute so every particle is now in the "correct" place + // ********************************************************************** + stochastic_particles->Redistribute(); + amrex::Print() << "Initialized " << stochastic_particles->TotalNumberOfParticles() << + " stochastic particles." << std::endl; + + // ********************************************************************** + // Define phi at level 1 from the particle count + // ********************************************************************** + int lev = 1; + phi_fine.setVal(0.0); + stochastic_particles->Increment(phi_fine, lev); + phi_fine.mult(1./cell_vol_fine); + amrex::Print() << "Used the particle locations to define phi at level 1 " << std::endl; + } + } + + void regrid_particles(const amrex::BoxArray& grown_fba) + { + amrex::DistributionMapping grown_dm { grown_fba, amrex::ParallelDescriptor::NProcs() }; + stochastic_particles->SetParticleDistributionMap(1,grown_dm); + stochastic_particles->SetParticleBoxArray(1,grown_fba); + stochastic_particles->Redistribute(); + } + + void Checkpoint (const std::string& filename) const + { + if (use_particles) { + stochastic_particles->Checkpoint(filename, "stochastic_particles", true, particle_varnames); + } + } + + void Restart(amrex::ParGDBBase* a_gdb, std::string& restart_file) + { + if (use_particles) { + stochastic_particles = std::make_unique(a_gdb); + std::string particle_file("stochastic_particles"); + stochastic_particles->Restart(restart_file, particle_file); + } + } + + void advance_particles(int lev, amrex::Real dt_lev, amrex::Real cell_vol, + amrex::MultiFab& phi_crse_old, + amrex::MultiFab& phi_crse_new, + amrex::MultiFab& phi_fine) + { + // Update stochastic particles + if (use_particles) { + AMREX_ALWAYS_ASSERT(lev == 1); + const amrex::Geometry& geom_fine = stochastic_particles->GetParGDB()->Geom(lev); + + // ********************************************************************** + // Fill a temporary phi_grown with phi from "coarse" level + // ********************************************************************** + amrex::MultiFab phi_grown(stochastic_particles->ParticleBoxArray(1), + stochastic_particles->ParticleDistributionMap(1),1,0); + phi_crse_old.FillBoundary(geom_fine.periodicity()); + phi_grown.ParallelCopy(phi_crse_old,0,0,1,1,0,geom_fine.periodicity()); + + // ********************************************************************** + // Create new particles in first layer of ghost cells around the level 1 grids + // ********************************************************************** + amrex::BoxArray ba_to_exclude(phi_fine.boxArray()); + stochastic_particles->AddParticles(phi_grown, ba_to_exclude); + + // ********************************************************************** + // Advect the particles in the valid region and in the ghost region + // ********************************************************************** + stochastic_particles->AdvectWithRandomWalk(lev, dt_lev); + + // ********************************************************************** + // Put particles on the correct levels and in the right boxes + // ********************************************************************** + stochastic_particles->Redistribute(); + + // ********************************************************************** + // Remove particles on the coarsest level + // ********************************************************************** + int crse_lev = 0; + stochastic_particles->RemoveParticlesAtLevel(crse_lev); + + // ********************************************************************** + // Account for particles that went from fine --> crse + // ********************************************************************** + phi_grown.setVal(0.); + stochastic_particles->RefluxFineToCrse(phi_fine.boxArray(), phi_grown); + phi_grown.mult(1./cell_vol); + phi_crse_new.ParallelAdd(phi_grown,0,0,1); + + // Remove particles that are not in the valid boxArray used by phi_fine + stochastic_particles->RemoveParticlesNotInBA(phi_fine.boxArray()); + + // ********************************************************************** + // Account for particles that went from crse --> fine + // ********************************************************************** + amrex::MultiFab phi_grown2(phi_grown.boxArray(),phi_grown.DistributionMap(),1,1); + phi_grown2.setVal(0.); + stochastic_particles->RefluxCrseToFine(phi_fine.boxArray(), phi_grown2); + phi_grown2.SumBoundary(geom_fine.periodicity()); + phi_grown2.mult(1./cell_vol); + phi_crse_new.ParallelAdd(phi_grown2,0,0,1); + + // ********************************************************************** + // Define phi at level 1 from the particle count + // ********************************************************************** + phi_grown.setVal(0.); + stochastic_particles->Increment(phi_grown, lev); + + // ***************************************************************************** + // Overwrite phi at level 0 from phi at level 1 only in the valid level 1 region + // ***************************************************************************** + phi_fine.ParallelCopy(phi_grown,0,0,1); + + phi_fine.mult(1./cell_vol); + amrex::Print() << "Used the particle locations to define phi at level 1 " << std::endl; + } + } + + void Redistribute() + { + if (use_particles) { + stochastic_particles->Redistribute(); + } + } + + std::string pp_prefix {"amr"}; + + int use_particles; + std::unique_ptr stochastic_particles; + amrex::Vector particle_varnames = {AMREX_D_DECL("xold", "yold", "zold")}; +}; +#endif diff --git a/src_deankow/multilevel/Src_K/Adv_K.H b/src_deankow/multilevel/Src_K/Adv_K.H new file mode 100644 index 000000000..2ededb532 --- /dev/null +++ b/src_deankow/multilevel/Src_K/Adv_K.H @@ -0,0 +1,23 @@ +#ifndef Adv_K_H_ +#define Adv_K_H_ + +#include +#include + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void conservative(int i, int j, int k, + amrex::Array4 const& phi_out, + amrex::Array4 const& phi_in, + AMREX_D_DECL(amrex::Array4 const& flxx, + amrex::Array4 const& flxy, + amrex::Array4 const& flxz), + AMREX_D_DECL(amrex::Real dtdx, amrex::Real dtdy, amrex::Real dtdz)) +{ + phi_out(i,j,k) = phi_in(i,j,k) + + ( AMREX_D_TERM( (flxx(i,j,k) - flxx(i+1,j,k)) * dtdx, + + (flxy(i,j,k) - flxy(i,j+1,k)) * dtdy, + + (flxz(i,j,k) - flxz(i,j,k+1)) * dtdz ) ); +} + +#endif diff --git a/src_deankow/multilevel/Src_K/Make.package b/src_deankow/multilevel/Src_K/Make.package new file mode 100644 index 000000000..5254ff6f6 --- /dev/null +++ b/src_deankow/multilevel/Src_K/Make.package @@ -0,0 +1,3 @@ +CEXE_headers += Adv_K.H +CEXE_headers += compute_flux_$(DIM)D_K.H +CEXE_headers += slope_K.H diff --git a/src_deankow/multilevel/Src_K/compute_flux_2D_K.H b/src_deankow/multilevel/Src_K/compute_flux_2D_K.H new file mode 100644 index 000000000..51bda7f35 --- /dev/null +++ b/src_deankow/multilevel/Src_K/compute_flux_2D_K.H @@ -0,0 +1,65 @@ +#ifndef compute_flux_2d_H_ +#define compute_flux_2d_H_ + +#include +#include + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_x(int i, int j, int k, + amrex::Array4 const& px, + amrex::Array4 const& phi, + amrex::Array4 const& vx, + amrex::Array4 const& slope, + amrex::Real dtdx) +{ + px(i,j,k) = ( (vx(i,j,k) < 0) ? + phi(i ,j,k) - slope(i ,j,k)*(0.5 + 0.5*dtdx*vx(i,j,k)) : + phi(i-1,j,k) + slope(i-1,j,k)*(0.5 - 0.5*dtdx*vx(i,j,k)) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_y(int i, int j, int k, + amrex::Array4 const& py, + amrex::Array4 const& phi, + amrex::Array4 const& vy, + amrex::Array4 const& slope, + amrex::Real dtdy) +{ + py(i,j,k) = ( (vy(i,j,k) < 0) ? + phi(i,j ,k) - slope(i,j ,k)*(0.5 + 0.5*dtdy*vy(i,j,k)) : + phi(i,j-1,k) + slope(i,j-1,k)*(0.5 - 0.5*dtdy*vy(i,j,k)) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void create_flux_x(int i, int j, int k, + amrex::Array4 const& fx, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& px, + amrex::Array4 const& py, + amrex::Real dtdy) +{ + fx(i,j,k) = ( (vx(i,j,k) < 0) ? + (px(i,j,k) - 0.5*dtdy * ( 0.5*(vy(i ,j+1,k ) + vy(i ,j,k)) * (py(i ,j+1,k )-py(i ,j,k))))*vx(i,j,k) : + (px(i,j,k) - 0.5*dtdy * ( 0.5*(vy(i-1,j+1,k ) + vy(i-1,j,k)) * (py(i-1,j+1,k )-py(i-1,j,k))))*vx(i,j,k) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void create_flux_y(int i, int j, int k, + amrex::Array4 const& fy, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& py, + amrex::Array4 const& px, + amrex::Real dtdx) +{ + fy(i,j,k) = ( (vy(i,j,k) < 0) ? + (py(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j ,k ) + vx(i,j ,k)) * (px(i+1,j ,k )-px(i,j ,k))))*vy(i,j,k) : + (py(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j-1,k ) + vx(i,j-1,k)) * (px(i+1,j-1,k )-px(i,j-1,k))))*vy(i,j,k) ); +} + +#endif diff --git a/src_deankow/multilevel/Src_K/compute_flux_3D_K.H b/src_deankow/multilevel/Src_K/compute_flux_3D_K.H new file mode 100644 index 000000000..10caf8217 --- /dev/null +++ b/src_deankow/multilevel/Src_K/compute_flux_3D_K.H @@ -0,0 +1,202 @@ +#ifndef compute_flux_3d_H_ +#define compute_flux_3d_H_ + +#include +#include + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_x(int i, int j, int k, + amrex::Array4 const& px, + amrex::Array4 const& phi, + amrex::Array4 const& vx, + amrex::Array4 const& slope, + amrex::Real dtdx) +{ + px(i,j,k) = ( (vx(i,j,k) < 0) ? + phi(i ,j,k) - slope(i ,j,k)*(0.5 + 0.5*dtdx*vx(i,j,k)) : + phi(i-1,j,k) + slope(i-1,j,k)*(0.5 - 0.5*dtdx*vx(i,j,k)) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_y(int i, int j, int k, + amrex::Array4 const& py, + amrex::Array4 const& phi, + amrex::Array4 const& vy, + amrex::Array4 const& slope, + amrex::Real dtdy) +{ + py(i,j,k) = ( (vy(i,j,k) < 0) ? + phi(i,j ,k) - slope(i,j ,k)*(0.5 + 0.5*dtdy*vy(i,j,k)) : + phi(i,j-1,k) + slope(i,j-1,k)*(0.5 - 0.5*dtdy*vy(i,j,k)) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_z(int i, int j, int k, + amrex::Array4 const& pz, + amrex::Array4 const& phi, + amrex::Array4 const& vz, + amrex::Array4 const& slope, + amrex::Real dtdz) +{ + pz(i,j,k) = ( (vz(i,j,k) < 0) ? + phi(i,j,k ) - slope(i,j,k )*(0.5 + 0.5*dtdz*vz(i,j,k)) : + phi(i,j,k-1) + slope(i,j,k-1)*(0.5 - 0.5*dtdz*vz(i,j,k)) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_xy(int i, int j, int k, + amrex::Array4 const& pxy, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& px, + amrex::Array4 const& py, + amrex::Real dtdy) +{ + pxy(i,j,k) = ( (vx(i,j,k) < 0) ? + px(i,j,k) - dtdy/3.0 * ( 0.5*(vy(i, j+1,k) + vy(i ,j,k)) * (py(i ,j+1,k) - py(i ,j,k))) : + px(i,j,k) - dtdy/3.0 * ( 0.5*(vy(i-1,j+1,k) + vy(i-1,j,k)) * (py(i-1,j+1,k) - py(i-1,j,k))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_xz(int i, int j, int k, + amrex::Array4 const& pxz, + amrex::Array4 const& vx, + amrex::Array4 const& vz, + amrex::Array4 const& px, + amrex::Array4 const& pz, + amrex::Real dtdz) +{ + pxz(i,j,k) = ( (vx(i,j,k) < 0) ? + px(i,j,k) - dtdz/3.0 * ( 0.5*(vz(i, j,k+1) + vz(i ,j,k)) * (pz(i ,j,k+1) - pz(i ,j,k))) : + px(i,j,k) - dtdz/3.0 * ( 0.5*(vz(i-1,j,k+1) + vz(i-1,j,k)) * (pz(i-1,j,k+1) - pz(i-1,j,k))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_yx(int i, int j, int k, + amrex::Array4 const& pyx, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& px, + amrex::Array4 const& py, + amrex::Real dtdx) +{ + pyx(i,j,k) = ( (vy(i,j,k) < 0) ? + py(i,j,k) - dtdx/3.0 * ( 0.5*(vx(i+1,j ,k) + vx(i,j ,k)) * (px(i+1,j ,k) - px(i,j ,k))) : + py(i,j,k) - dtdx/3.0 * ( 0.5*(vx(i+1,j-1,k) + vx(i,j-1,k)) * (px(i+1,j-1,k) - px(i,j-1,k))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_yz(int i, int j, int k, + amrex::Array4 const& pyz, + amrex::Array4 const& vy, + amrex::Array4 const& vz, + amrex::Array4 const& py, + amrex::Array4 const& pz, + amrex::Real dtdz) +{ + pyz(i,j,k) = ( (vy(i,j,k) < 0) ? + py(i,j,k) - dtdz/3.0 * ( 0.5*(vz(i, j,k+1) + vz(i,j ,k)) * (pz(i,j ,k+1) - pz(i,j ,k))) : + py(i,j,k) - dtdz/3.0 * ( 0.5*(vz(i,j-1,k+1) + vz(i,j-1,k)) * (pz(i,j-1,k+1) - pz(i,j-1,k))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_zx(int i, int j, int k, + amrex::Array4 const& pzx, + amrex::Array4 const& vx, + amrex::Array4 const& vz, + amrex::Array4 const& px, + amrex::Array4 const& pz, + amrex::Real dtdx) +{ + pzx(i,j,k) = ( (vz(i,j,k) < 0) ? + pz(i,j,k) - dtdx/3.0 * ( 0.5*(vx(i+1,j,k ) + vx(i,j,k )) * (px(i+1,j,k ) - px(i,j,k ))) : + pz(i,j,k) - dtdx/3.0 * ( 0.5*(vx(i+1,j,k-1) + vx(i,j,k-1)) * (px(i+1,j,k-1) - px(i,j,k-1))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void flux_zy(int i, int j, int k, + amrex::Array4 const& pzy, + amrex::Array4 const& vy, + amrex::Array4 const& vz, + amrex::Array4 const& py, + amrex::Array4 const& pz, + amrex::Real dtdy) +{ + pzy(i,j,k) = ( (vz(i,j,k) < 0) ? + pz(i,j,k) - dtdy/3.0 * ( 0.5*(vy(i,j+1,k ) + vy(i,j,k )) * (py(i,j+1,k ) - py(i,j,k ))) : + pz(i,j,k) - dtdy/3.0 * ( 0.5*(vy(i,j+1,k-1) + vy(i,j,k-1)) * (py(i,j+1,k-1) - py(i,j,k-1))) ); +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void create_flux_x(int i, int j, int k, + amrex::Array4 const& fx, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& vz, + amrex::Array4 const& px, + amrex::Array4 const& pyz, + amrex::Array4 const& pzy, + amrex::Real dtdy, amrex::Real dtdz) +{ + amrex::Real f = ( (vx(i,j,k) < 0) ? + px(i,j,k) - 0.5*dtdy * ( 0.5*(vy(i ,j+1,k ) + vy(i ,j,k)) * (pyz(i ,j+1,k )-pyz(i ,j,k))) + - 0.5*dtdz * ( 0.5*(vz(i ,j ,k+1) + vz(i ,j,k)) * (pzy(i ,j ,k+1)-pzy(i ,j,k))) : + px(i,j,k) - 0.5*dtdy * ( 0.5*(vy(i-1,j+1,k ) + vy(i-1,j,k)) * (pyz(i-1,j+1,k )-pyz(i-1,j,k))) + - 0.5*dtdz * ( 0.5*(vz(i-1,j ,k+1) + vz(i-1,j,k)) * (pzy(i-1,j ,k+1)-pzy(i-1,j,k))) ); + + fx(i,j,k) = vx(i,j,k)*f; +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void create_flux_y(int i, int j, int k, + amrex::Array4 const& fy, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& vz, + amrex::Array4 const& py, + amrex::Array4 const& pxz, + amrex::Array4 const& pzx, + amrex::Real dtdx, amrex::Real dtdz) +{ + amrex::Real f = ( (vy(i,j,k) < 0) ? + py(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j ,k ) + vx(i,j ,k)) * (pxz(i+1,j ,k )-pxz(i,j ,k))) + - 0.5*dtdz * ( 0.5*(vz(i, j ,k+1) + vz(i,j ,k)) * (pzx(i, j ,k+1)-pzx(i,j ,k))) : + py(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j-1,k ) + vx(i,j-1,k)) * (pxz(i+1,j-1,k )-pxz(i,j-1,k))) + - 0.5*dtdz * ( 0.5*(vz(i ,j-1,k+1) + vz(i,j-1,k)) * (pzx(i ,j-1,k+1)-pzx(i,j-1,k))) ); + + fy(i,j,k) = vy(i,j,k)*f; +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void create_flux_z(int i, int j, int k, + amrex::Array4 const& fz, + amrex::Array4 const& vx, + amrex::Array4 const& vy, + amrex::Array4 const& vz, + amrex::Array4 const& pz, + amrex::Array4 const& pxy, + amrex::Array4 const& pyx, + amrex::Real dtdx, amrex::Real dtdy) +{ + amrex::Real f = ( (vz(i,j,k) < 0) ? + pz(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j ,k ) + vx(i,j,k )) * (pxy(i+1,j ,k )-pxy(i,j,k ))) + - 0.5*dtdy * ( 0.5*(vy(i, j+1,k ) + vy(i,j,k )) * (pyx(i, j+1,k )-pyx(i,j,k ))) : + pz(i,j,k) - 0.5*dtdx * ( 0.5*(vx(i+1,j ,k-1) + vx(i,j,k-1)) * (pxy(i+1,j ,k-1)-pxy(i,j,k-1))) + - 0.5*dtdy * ( 0.5*(vy(i ,j+1,k-1) + vy(i,j,k-1)) * (pyx(i ,j+1,k-1)-pyx(i,j,k-1))) ); + + fz(i,j,k) = vz(i,j,k)*f; +} + +#endif diff --git a/src_deankow/multilevel/Src_K/slope_K.H b/src_deankow/multilevel/Src_K/slope_K.H new file mode 100644 index 000000000..5637ea022 --- /dev/null +++ b/src_deankow/multilevel/Src_K/slope_K.H @@ -0,0 +1,180 @@ +#ifndef slope_K_H_ +#define slope_K_H_ + +#include +#include +#include + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopex2(amrex::Box const& bx, + amrex::Array4 const& dq, + amrex::Array4 const& q) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i-1,j,k); + Real drgt = q(i+1,j,k) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + dq(i,j,k) = dsgn*amrex::min(dlim, std::abs(dcen)); + } + } + } +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopex4(amrex::Box const& bx, + amrex::Array4 const& dq4, + amrex::Array4 const& q, + amrex::Array4 const& dq) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i-1,j,k); + Real drgt = q(i+1,j,k) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + Real dq1 = Real(4.0/3.0)*dcen - Real(1.0/6.0)*(dq(i+1,j,k) + dq(i-1,j,k)); + dq4(i,j,k) = dsgn*amrex::min(dlim, std::abs(dq1)); + } + } + } +} + +// *********************************************************** + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopey2(amrex::Box const& bx, + amrex::Array4 const& dq, + amrex::Array4 const& q) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i,j-1,k); + Real drgt = q(i,j+1,k) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + dq(i,j,k) = dsgn*amrex::min(dlim, std::abs(dcen)); + } + } + } +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopey4(amrex::Box const& bx, + amrex::Array4 const& dq4, + amrex::Array4 const& q, + amrex::Array4 const& dq) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i,j-1,k); + Real drgt = q(i,j+1,k) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + Real dq1 = Real(4.0/3.0)*dcen - Real(1.0/6.0)*(dq(i,j+1,k) + dq(i,j-1,k)); + dq4(i,j,k) = dsgn*amrex::min(dlim, std::abs(dq1)); + } + } + } +} + +// *********************************************************** + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopez2(amrex::Box const& bx, + amrex::Array4 const& dq, + amrex::Array4 const& q) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i,j,k-1); + Real drgt = q(i,j,k+1) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + dq(i,j,k) = dsgn*amrex::min(dlim, std::abs(dcen)); + } + } + } +} + +AMREX_GPU_DEVICE +AMREX_FORCE_INLINE +void slopez4(amrex::Box const& bx, + amrex::Array4 const& dq4, + amrex::Array4 const& q, + amrex::Array4 const& dq) +{ + using namespace amrex; + + const auto lo = amrex::lbound(bx); + const auto hi = amrex::ubound(bx); + + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { + for (int i = lo.x; i <= hi.x; ++i) { + Real dlft = q(i,j,k) - q(i,j,k-1); + Real drgt = q(i,j,k+1) - q(i,j,k); + Real dcen = Real(0.5)*(dlft+drgt); + Real dsgn = std::copysign(Real(1.0), dcen); + Real dslop = Real(2.0) * ((std::abs(dlft) < std::abs(drgt)) ? + std::abs(dlft) : std::abs(drgt)); + Real dlim = (dlft*drgt >= Real(0.0)) ? dslop : Real(0.0); + Real dq1 = Real(4.0/3.0)*dcen - Real(1.0/6.0)*(dq(i,j,k+1) + dq(i,j,k-1)); + dq4(i,j,k) = dsgn*amrex::min(dlim, std::abs(dq1)); + } + } + } +} + +#endif diff --git a/src_deankow/multilevel/StochasticPC.H b/src_deankow/multilevel/StochasticPC.H new file mode 100644 index 000000000..da33a5185 --- /dev/null +++ b/src_deankow/multilevel/StochasticPC.H @@ -0,0 +1,97 @@ +#ifndef STOCHASTIC_PC_H_ +#define STOCHASTIC_PC_H_ + +#include + +struct RealIdx +{ + enum { + xold = 0, + yold, zold, + ncomps + }; +}; + +struct IntIdx +{ + enum { + ncomps = 0 + }; +}; + +/* + + Helper function that converts global 3D index to *local* id index + in a given box. + + */ +struct FlatIndex { + amrex::Dim3 lo; + amrex::Dim3 hi; + FlatIndex (amrex::Box bx) + : lo(amrex::lbound(bx)), hi(amrex::ubound(bx)) {} + + AMREX_GPU_HOST_DEVICE + unsigned int operator() (int i, int j, int k) const { + int ix = i - lo.x; + int iy = j - lo.y; + int iz = k - lo.z; + int nx = hi.x-lo.x+1; + int ny = hi.y-lo.y+1; + int nz = hi.z-lo.z+1; + unsigned int uix = amrex::min(nx-1,amrex::max(0,ix)); + unsigned int uiy = amrex::min(ny-1,amrex::max(0,iy)); + unsigned int uiz = amrex::min(nz-1,amrex::max(0,iz)); + unsigned int cellid = (uix * ny + uiy) * nz + uiz; + return cellid; + } +}; + +class StochasticPC + : public amrex::ParticleContainer +{ + +public: + + StochasticPC (amrex::ParGDBBase* gdb) + : amrex::ParticleContainer(gdb) + {} + + StochasticPC (const amrex::Geometry & geom, + const amrex::DistributionMapping & dmap, + const amrex::BoxArray & ba) + : amrex::ParticleContainer(geom, dmap, ba) + {} + + void InitParticles (amrex::MultiFab& phi_fine); + void AddParticles (amrex::MultiFab& phi_fine, const amrex::BoxArray& ba_to_exclude); + void AdvectWithRandomWalk (int lev, amrex::Real dt); + void RemoveParticlesNotInBA (const amrex::BoxArray& ba_to_keep); + void RefluxCrseToFine (const amrex::BoxArray& ba_to_keep, amrex::MultiFab& phi_for_reflux); + void RefluxFineToCrse (const amrex::BoxArray& ba_to_keep, amrex::MultiFab& phi_for_reflux); + +protected: + amrex::ParticleLocator > m_reflux_particle_locator; +}; + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::IntVect getNewCell (StochasticPC::ParticleType const& p, + amrex::GpuArray const& plo, + amrex::GpuArray const& dxi, + const amrex::Box& domain) noexcept; + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::IntVect getOldCell (StochasticPC::ParticleType const& p, + amrex::GpuArray const& plo, + amrex::GpuArray const& dxi, + const amrex::Box& domain) noexcept; + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::IntVect periodicCorrectOldCell (const amrex::IntVect& old_pos, + const amrex::IntVect& new_pos, + const amrex::GpuArray& is_periodic, + const amrex::Box& domain) noexcept; +#endif diff --git a/src_deankow/multilevel/StochasticPC.cpp b/src_deankow/multilevel/StochasticPC.cpp new file mode 100644 index 000000000..9677778b0 --- /dev/null +++ b/src_deankow/multilevel/StochasticPC.cpp @@ -0,0 +1,316 @@ +#include "StochasticPC.H" + +#include +#include +#include + +using namespace amrex; + +void +StochasticPC::InitParticles (MultiFab& phi_fine) +{ + AddParticles(phi_fine, BoxArray{}); +} + +void +StochasticPC:: AddParticles (MultiFab& phi_fine, const BoxArray& ba_to_exclude) +{ + BL_PROFILE("StochasticPC::AddParticles"); + + const int lev = 1; + const auto dx = Geom(lev).CellSizeArray(); + const auto plo = Geom(lev).ProbLoArray(); + +#if (AMREX_SPACEDIM == 2) + const Real cell_vol = dx[0]*dx[1]; +#else + const Real cell_vol = dx[0]*dx[1]*dx[2]; +#endif + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(phi_fine); mfi.isValid(); ++mfi) + { + Box tile_box = mfi.tilebox() & Geom(lev).Domain(); + + if (ba_to_exclude.contains(tile_box)) {continue;} + + const Array4& phi_arr = phi_fine.const_array(mfi); + + // count the number of particles to create in each cell + auto flat_index = FlatIndex(tile_box); + Gpu::DeviceVector counts(tile_box.numPts()+1, 0); + unsigned int* pcount = counts.dataPtr(); + amrex::ParallelForRNG(tile_box, + [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept + { + Real rannum = amrex::Random(engine); + int npart_in_cell = int(phi_arr(i,j,k,0)*cell_vol+rannum); + pcount[flat_index(i, j, k)] += npart_in_cell; + }); + + // fill offsets + Gpu::DeviceVector offsets(tile_box.numPts()+1, 0); + Gpu::exclusive_scan(counts.begin(), counts.end(), offsets.begin()); + + // the last offset is the total number of particles to add + unsigned int num_to_add; + Gpu::copy(Gpu::deviceToHost, offsets.begin() + tile_box.numPts(), offsets.end(), &num_to_add); + if (num_to_add == 0) continue; + + // Get the ids and cpu numbers to assign + int my_cpu = ParallelDescriptor::MyProc(); + Long id_start; +#ifdef AMREX_USE_OMP +#pragma omp critical (init_particles_next_id) +#endif + { + id_start = ParticleType::NextID(); + ParticleType::NextID(id_start+num_to_add); + } + + // resize particle storage + auto& particles = GetParticles(lev); + auto& particle_tile = particles[std::make_pair(mfi.index(), mfi.LocalTileIndex())]; + auto old_size = particle_tile.GetArrayOfStructs().size(); + auto new_size = old_size + num_to_add; + particle_tile.resize(new_size); + amrex::Print() << "INIT: NEW SIZE OF PARTICLES " << new_size << std::endl; + + // now fill in the data + ParticleType* pstruct = particle_tile.GetArrayOfStructs()().data() + old_size; + unsigned int* poffset = offsets.dataPtr(); + amrex::ParallelForRNG(tile_box, + [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept + { + auto cellid = flat_index(i, j, k); + + auto start = poffset[cellid]; + auto stop = poffset[cellid+1]; + + for (unsigned int ip = start; ip < stop; ++ip) { + ParticleType& p = pstruct[ip]; + +#if (AMREX_SPACEDIM == 2) + Real r[2] = {amrex::Random(engine), amrex::Random(engine)}; +#elif (AMREX_SPACEDIM == 3) + Real r[3] = {amrex::Random(engine), amrex::Random(engine), amrex::Random(engine)}; +#endif + AMREX_D_TERM( Real x = plo[0] + (i + r[0])*dx[0];, + Real y = plo[1] + (j + r[1])*dx[1];, + Real z = plo[2] + (k + r[2])*dx[2];); + + p.id() = ip + id_start; + p.cpu() = my_cpu; + + AMREX_D_TERM( p.pos(0) = x;, + p.pos(1) = y;, + p.pos(2) = z;); + + AMREX_D_TERM( p.rdata(RealIdx::xold) = x;, + p.rdata(RealIdx::yold) = y;, + p.rdata(RealIdx::zold) = z;); + } + }); + } +} + +void +StochasticPC::RemoveParticlesNotInBA (const BoxArray& ba_to_keep) +{ + BL_PROFILE("StochasticPC::RemoveParticles"); + const int lev = 1; + + for(ParIterType pti(*this, lev); pti.isValid(); ++pti) + { + auto& ptile = ParticlesAt(lev, pti); + auto& aos = ptile.GetArrayOfStructs(); + const int np = aos.numParticles(); + auto *pstruct = aos().data(); + + if (!ba_to_keep.contains(pti.tilebox())) { + amrex::ParallelFor(np, [=] AMREX_GPU_DEVICE (int i) + { + ParticleType& p = pstruct[i]; + p.id() = -1; + }); +} + } + Redistribute(); +} + +void +StochasticPC::RefluxFineToCrse (const BoxArray& ba_to_keep, MultiFab& phi_for_reflux) +{ + BL_PROFILE("StochasticPC::RefluxFineToCrse"); + const int lev = 1; + const auto geom_lev = Geom(lev); + const auto dxi_lev = Geom(lev).InvCellSizeArray(); + const auto plo_lev = Geom(lev).ProbLoArray(); + const auto domain_lev = Geom(lev).Domain(); + + if (!m_reflux_particle_locator.isValid(ba_to_keep)) { + m_reflux_particle_locator.build(ba_to_keep, Geom(lev)); + } + m_reflux_particle_locator.setGeometry(Geom(lev)); + auto assign_grid = m_reflux_particle_locator.getGridAssignor(); + + for(ParIterType pti(*this, lev); pti.isValid(); ++pti) + { + auto& ptile = ParticlesAt(lev, pti); + auto& aos = ptile.GetArrayOfStructs(); + const int np = aos.numParticles(); + auto *pstruct = aos().data(); + + if (!ba_to_keep.contains(pti.tilebox())) { + + Array4 phi_arr = phi_for_reflux.array(pti.index()); + + amrex::ParallelFor(np, [=] AMREX_GPU_DEVICE (int i) + { + ParticleType& p = pstruct[i]; + auto old_pos = getOldCell(p, plo_lev, dxi_lev, domain_lev); + auto new_pos = getNewCell(p, plo_lev, dxi_lev, domain_lev); + if ( (assign_grid(old_pos) >= 0) && (assign_grid(new_pos) < 0)) { + Gpu::Atomic::AddNoRet(&phi_arr(new_pos,0), 1.0); + } + }); + } // if not in ba_to_keep + } // pti +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +IntVect getNewCell (StochasticPC::ParticleType const& p, + GpuArray const& plo, + GpuArray const& dxi, + const Box& domain) noexcept +{ + return getParticleCell(p, plo, dxi, domain);; +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +IntVect getOldCell (StochasticPC::ParticleType const& p, + GpuArray const& plo, + GpuArray const& dxi, + const Box& domain) noexcept +{ + IntVect iv( + AMREX_D_DECL(int(Math::floor((p.rdata(RealIdx::xold)-plo[0])*dxi[0])), + int(Math::floor((p.rdata(RealIdx::yold)-plo[1])*dxi[1])), + int(Math::floor((p.rdata(RealIdx::zold)-plo[2])*dxi[2])))); + iv += domain.smallEnd(); + return iv; +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +IntVect periodicCorrectOldCell (const IntVect& old_pos, const IntVect& new_pos, + const GpuArray& is_per, + const Box& domain) noexcept +{ + IntVect shifted = old_pos; + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + if (!is_per[idim]) { continue; } + if (Real(new_pos[idim] - old_pos[idim]) > 0.5*Real(domain.length(idim))) { + shifted[idim] += domain.length(idim); + continue; + } + if (Real(new_pos[idim] - old_pos[idim]) < -0.5*Real(domain.length(idim))) { + shifted[idim] -= domain.length(idim); + continue; + } + } + return shifted; +} + +void +StochasticPC::RefluxCrseToFine (const BoxArray& ba_to_keep, MultiFab& phi_for_reflux) +{ + BL_PROFILE("StochasticPC::RefluxCrseToFine"); + const int lev = 1; + const auto geom_lev = Geom(lev); + const auto dxi_lev = Geom(lev).InvCellSizeArray(); + const auto plo_lev = Geom(lev).ProbLoArray(); + const auto domain_lev = Geom(lev).Domain(); + const auto is_per = Geom(lev).isPeriodicArray(); + + if (!m_reflux_particle_locator.isValid(ba_to_keep)) { + m_reflux_particle_locator.build(ba_to_keep, Geom(lev)); + } + m_reflux_particle_locator.setGeometry(Geom(lev)); + auto assign_grid = m_reflux_particle_locator.getGridAssignor(); + + for(ParIterType pti(*this, lev); pti.isValid(); ++pti) + { + auto const& gid = pti.index(); + auto& ptile = ParticlesAt(lev, pti); + auto& aos = ptile.GetArrayOfStructs(); + const int np = aos.numParticles(); + auto *pstruct = aos().data(); + + if (ba_to_keep.contains(pti.tilebox())) + { + Array4 phi_arr = phi_for_reflux.array(gid); + + amrex::ParallelFor(np, [=] AMREX_GPU_DEVICE (int i) + { + ParticleType& p = pstruct[i]; + + auto old_pos = getOldCell(p, plo_lev, dxi_lev, domain_lev); + auto new_pos = getNewCell(p, plo_lev, dxi_lev, domain_lev); + + // Make a box of the cell holding the particle in its previous position + Box bx(old_pos,old_pos); + + if ( (assign_grid(old_pos) < 0) && (assign_grid(new_pos) >= 0)) + { + if (Box(phi_arr).contains(old_pos)) { + Gpu::Atomic::AddNoRet(&phi_arr(old_pos,0), -1.0); + } else { + auto shifted_pos = periodicCorrectOldCell(old_pos, new_pos, + is_per, domain_lev); + Gpu::Atomic::AddNoRet(&phi_arr(shifted_pos,0), -1.0); + } // else + } // if crossed the coarse-fine boundary + }); // i + } // if in ba_to_keep + } // pti +} + +void +StochasticPC::AdvectWithRandomWalk (int lev, Real dt) +{ + BL_PROFILE("StochasticPC::AdvectWithRandomWalk"); + const auto dx = Geom(lev).CellSizeArray(); + + Real stddev = std::sqrt(dt); + + for(ParIterType pti(*this, lev); pti.isValid(); ++pti) + { + auto& ptile = ParticlesAt(lev, pti); + auto& aos = ptile.GetArrayOfStructs(); + const int np = aos.numParticles(); + auto *pstruct = aos().data(); + + amrex::ParallelForRNG( np, [=] AMREX_GPU_DEVICE (int i, RandomEngine const& engine) noexcept + { + ParticleType& p = pstruct[i]; + AMREX_D_TERM( p.rdata(RealIdx::xold) = p.pos(0);, + p.rdata(RealIdx::yold) = p.pos(1);, + p.rdata(RealIdx::zold) = p.pos(2);); + + AMREX_D_TERM( Real incx = amrex::RandomNormal(0.,stddev,engine);, + Real incy = amrex::RandomNormal(0.,stddev,engine);, + Real incz = amrex::RandomNormal(0.,stddev,engine);); + + AMREX_D_TERM( incx = std::max(-dx[0], std::min( dx[0], incx));, + incy = std::max(-dx[1], std::min( dx[1], incy));, + incz = std::max(-dx[2], std::min( dx[2], incz));); + + AMREX_D_TERM( p.pos(0) += static_cast (incx);, + p.pos(1) += static_cast (incy);, + p.pos(2) += static_cast (incz);); + }); // np + } // pti +} diff --git a/src_deankow/multilevel/Tagging.H b/src_deankow/multilevel/Tagging.H new file mode 100644 index 000000000..007aaeae8 --- /dev/null +++ b/src_deankow/multilevel/Tagging.H @@ -0,0 +1,19 @@ +#ifndef TAGGING_H +#define TAGGING_H + +#include + +AMREX_GPU_HOST_DEVICE +AMREX_FORCE_INLINE +void +state_error (int i, int j, int k, + amrex::Array4 const& tag, + amrex::Array4 const& state, + amrex::Real phierr, char tagval) +{ + if (state(i,j,k) < phierr) { + tag(i,j,k) = tagval; + } +} + +#endif diff --git a/src_deankow/multilevel/bc_fill.H b/src_deankow/multilevel/bc_fill.H new file mode 100644 index 000000000..ad890b7e3 --- /dev/null +++ b/src_deankow/multilevel/bc_fill.H @@ -0,0 +1,21 @@ +#ifndef BCFILL_H +#define BCFILL_H + +#include +#include +#include + +struct AmrCoreFill +{ + AMREX_GPU_DEVICE + void operator() (const amrex::IntVect& /*iv*/, amrex::Array4 const& /*data*/, + const int /*dcomp*/, const int /*numcomp*/, + amrex::GeometryData const& /*geom*/, const amrex::Real /*time*/, + const amrex::BCRec* /*bcr*/, const int /*bcomp*/, + const int /*orig_comp*/) const + { + // do something for external Dirichlet (BCType::ext_dir) + } +}; + +#endif diff --git a/src_deankow/multilevel/main.cpp b/src_deankow/multilevel/main.cpp new file mode 100644 index 000000000..cd9f4bebf --- /dev/null +++ b/src_deankow/multilevel/main.cpp @@ -0,0 +1,44 @@ + +#include + +#include +#include +#include + +#include + +using namespace amrex; + +int main(int argc, char* argv[]) +{ + amrex::Initialize(argc,argv); + + { + // timer for profiling + BL_PROFILE("main()"); + + // wallclock time + const auto strt_total = amrex::second(); + + // constructor - reads in parameters from inputs file + // - sizes multilevel arrays and data structures + AmrCoreAdv amr_core_adv; + + // initialize AMR data + amr_core_adv.InitData(); + + // advance solution to final time + amr_core_adv.Evolve(); + + // wallclock time + auto end_total = amrex::second() - strt_total; + + if (amr_core_adv.Verbose()) { + // print wallclock time + ParallelDescriptor::ReduceRealMax(end_total ,ParallelDescriptor::IOProcessorNumber()); + amrex::Print() << "\nTotal Time: " << end_total << '\n'; + } + } + + amrex::Finalize(); +} diff --git a/src_deankow/multilevel/myfunc.H b/src_deankow/multilevel/myfunc.H new file mode 100644 index 000000000..cfa0d0b14 --- /dev/null +++ b/src_deankow/multilevel/myfunc.H @@ -0,0 +1,17 @@ +#ifndef MYFUNC_H_ +#define MYFUNC_H_ + +#include +#include +#include + +using namespace amrex; + +void advance_phi (amrex::MultiFab& phi_old, + amrex::MultiFab& phi_new, + amrex::Array& flux, + amrex::Array& stochFlux, + amrex::Real dt, amrex::Real npts_scale, + amrex::Geometry const& geom, + Vector const& BoundaryCondition); +#endif diff --git a/src_deankow/multilevel/myfunc.cpp b/src_deankow/multilevel/myfunc.cpp new file mode 100644 index 000000000..400de8795 --- /dev/null +++ b/src_deankow/multilevel/myfunc.cpp @@ -0,0 +1,145 @@ +#include "myfunc.H" +#include "mykernel.H" +#include +#include +#include "common_functions.H" +#include "rng_functions.H" + +void advance_phi (MultiFab& phi_old, + MultiFab& phi_new, + Array& flux, + Array& stochFlux, + Real dt, + Real /*npts_scale*/, + Geometry const& geom, + Vector const& BoundaryCondition) +{ + int Ncomp = phi_old.nComp(); + + AMREX_D_TERM(const Real dxinv = geom.InvCellSize(0);, + const Real dyinv = geom.InvCellSize(1);, + const Real dzinv = geom.InvCellSize(2);); + + const Box& domain_bx = geom.Domain(); + const auto dom_lo = lbound(domain_bx); + const auto dom_hi = ubound(domain_bx); + + //Real variance = dxinv*dyinv/(npts_scale*dt); + Real variance = dxinv*dyinv/dt; + +#if(AMREX_SPACEDIM > 2) + variance *=dzinv; +#endif + + // fill random numbers (can skip density component 0) + for(int d=0;d 2) + const Box& zbx = mfi.nodaltilebox(2); + auto const& fluxz = flux[2].array(mfi); +#endif + auto const& stochfluxx = stochFlux[0].array(mfi); + auto const& stochfluxy = stochFlux[1].array(mfi); +#if (AMREX_SPACEDIM > 2) + auto const& stochfluxz = stochFlux[2].array(mfi); +#endif + const Box& bx = mfi.validbox(); + const auto lo = lbound(bx); + const auto hi = ubound(bx); + + auto const& phi = phi_old.array(mfi); + + amrex::ParallelFor(xbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + compute_flux_x(i,j,k,fluxx,stochfluxx,phi,dxinv, + lo.x, hi.x, dom_lo.x, dom_hi.x, bc.lo(0), bc.hi(0),Ncomp); + }); + + amrex::ParallelFor(ybx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + compute_flux_y(i,j,k,fluxy,stochfluxy,phi,dyinv, + lo.y, hi.y, dom_lo.y, dom_hi.y, bc.lo(1), bc.hi(1),Ncomp); + }); +#if (AMREX_SPACEDIM > 2) + amrex::ParallelFor(zbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + compute_flux_z(i,j,k,fluxz,stochfluxz,phi,dzinv, + lo.z, hi.z, dom_lo.z, dom_hi.z, bc.lo(2), bc.hi(2),Ncomp); + }); +#endif + } + + // Advance the solution one grid at a time + for ( MFIter mfi(phi_old); mfi.isValid(); ++mfi ) + { + const Box& vbx = mfi.validbox(); + auto const& fluxx = flux[0].array(mfi); + auto const& fluxy = flux[1].array(mfi); +#if (AMREX_SPACEDIM > 2) + auto const& fluxz = flux[2].array(mfi); +#endif + auto const& phiOld = phi_old.array(mfi); + auto const& phiNew = phi_new.array(mfi); + + amrex::ParallelFor(vbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + update_phi(i,j,k,phiOld,phiNew, + AMREX_D_DECL(fluxx,fluxy,fluxz), + dt, + AMREX_D_DECL(dxinv,dyinv,dzinv), + Ncomp); + }); + } + + Real dx, dy, dz = 1.; + AMREX_D_TERM(dx = geom.CellSize(0);, + dy = geom.CellSize(1);, + dz = geom.CellSize(2);); + + // Compute fluxes one grid at a time + for ( MFIter mfi(phi_old); mfi.isValid(); ++mfi ) + { + const Box& xbx = mfi.nodaltilebox(0); + const Box& ybx = mfi.nodaltilebox(1); + + auto const& fluxx = flux[0].array(mfi); + auto const& fluxy = flux[1].array(mfi); + + amrex::ParallelFor(xbx, Ncomp, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) + { + fluxx(i,j,k,n) *= dt * dy * dz; + }); + + amrex::ParallelFor(ybx, Ncomp, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) + { + fluxy(i,j,k,n) *= dt * dx * dz; + }); + +#if (AMREX_SPACEDIM > 2) + const Box& zbx = mfi.nodaltilebox(2); + auto const& fluxz = flux[2].array(mfi); + amrex::ParallelFor(zbx, Ncomp, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) + { + fluxz(i,j,k,n) *= dt * dx * dy; + }); +#endif + } +} diff --git a/src_deankow/multilevel/mykernel.H b/src_deankow/multilevel/mykernel.H new file mode 100644 index 000000000..7c131cc5c --- /dev/null +++ b/src_deankow/multilevel/mykernel.H @@ -0,0 +1,265 @@ +#ifndef MY_KERNEL_H_ +#define MY_KERNEL_H_ + +#include + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void init_phi (int i, int j, int k, + amrex::Array4 const& phi, + amrex::GpuArray const& dx, + amrex::GpuArray const& prob_lo, + amrex::Real /*npts_scale*/, int Ncomp) +{ + using amrex::Real;; + Real cellvol = dx[0]*dx[1]; + +#if(AMREX_SPACEDIM > 2) + cellvol *=dx[2]; +#endif + + +#if 0 + Real rad = Real(0.1); + Real xcent = Real(0.5); + Real ycent = Real(0.5); + Real x = prob_lo[0] + (i+Real(0.5)) * dx[0] - xcent; + Real y = prob_lo[1] + (j+Real(0.5)) * dx[1] - ycent; + +#if (AMREX_SPACEDIM > 2) + Real zcent = Real(0.5); + Real z = prob_lo[2] + (k+Real(0.5)) * dx[2] - zcent; + Real r2 = (x*x + y*y + z*z) / (rad*rad); +#else + Real z = Real(0.); + Real r2 = (x*x + y*y) / (rad*rad); +#endif + + // Gaussian + phi(i,j,k,0) = Real(1.) + std::exp(-r2); +#endif + +#if 0 + // Jump in x-direction + Real x = prob_lo[0] + (i+Real(0.5)) * dx[0]; + Real y = prob_lo[1] + (j+Real(0.5)) * dx[1]; + if ( x > 0.5) { + phi(i,j,k,0) = 1.5*npts_scale; + } else { + phi(i,j,k,0) = 0.5*npts_scale; + } +#endif + +#if 0 + // Delta function + if (i==4 && j ==4) + { + phi(i,j,k,0) = 1./(dx[0]*dx[1]); + } + else + { + phi(i,j,k,0) = 0;; + } +#endif + +#if 1 + // 2D Test for multilevel + Real rad = Real(0.2); + Real xcent = Real(0.5); + Real ycent = Real(0.5); + Real x = prob_lo[0] + (i+Real(0.5)) * dx[0] - xcent; + Real y = prob_lo[1] + (j+Real(0.5)) * dx[1] - ycent; + Real r2 = (x*x + y*y); +#if (AMREX_SPACEDIM == 3) + Real zcent = Real(0.75); + Real z = prob_lo[2] + (k+Real(0.5)) * dx[2] - zcent; + r2 = (x*x + y*y+z*z); +#endif + + if ( r2 > rad*rad) { + phi(i,j,k,0) = 15./cellvol; + } else { + phi(i,j,k,0) = 0./cellvol; + } +#endif + + if (Ncomp == 2){ + phi(i,j,k,1) = phi(i,j,k,0); + } +} + + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_x (int i, int j, int k, + amrex::Array4 const& fluxx, + amrex::Array4 const& stochfluxx, + amrex::Array4 const& phi, amrex::Real dxinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi,int Ncomp) +{ + int coeff_index = Ncomp-1; + if (lo == dom_lo && + (bc_lo == amrex::BCType::foextrap || + bc_lo == amrex::BCType::ext_dir)) + { + if(i == lo) + { + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv / 0.5; + } + else + { + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv; + } + } + else if (hi == dom_hi && + (bc_hi == amrex::BCType::foextrap || + bc_hi == amrex::BCType::ext_dir)) + { + if(i == hi+1) + { + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv / 0.5; + } + else + { + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv; + } + } + else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i-1,j,k))/(phi(i,j,k)+phi(i-1,j,k)+1.e-16); + amrex::Real phip = std::max(phi(i,j,k,coeff_index),0.); + amrex::Real phim = std::max(phi(i-1,j,k,coeff_index),0.); + //amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + amrex::Real phiavg = 0.5*(phip+phim); + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv + + stochfluxx(i,j,k) * std::sqrt(phiavg); + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i-1,j,k,1)) * dxinv; + } + } +} + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_y (int i, int j, int k, + amrex::Array4 const& fluxy, + amrex::Array4 const& stochfluxy, + amrex::Array4 const& phi, amrex::Real dyinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi, int Ncomp) +{ + int coeff_index = Ncomp-1; + if (lo == dom_lo && + (bc_lo == amrex::BCType::foextrap || + bc_lo == amrex::BCType::ext_dir)) + { + if(j == lo) + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; + } + else + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; + } + } + else if (hi == dom_hi && + (bc_hi == amrex::BCType::foextrap || + bc_hi == amrex::BCType::ext_dir)) + { + if(j == hi+1) + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; + } + else + { + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; + } + } + else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j-1,k))/(phi(i,j,k)+phi(i,j-1,k)+1.e-16); + amrex::Real phip = std::max(phi(i,j,k,coeff_index),0.); + amrex::Real phim = std::max(phi(i,j-1,k,coeff_index),0.); + //amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + amrex::Real phiavg = 0.5*(phip+phim); + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv + + stochfluxy(i,j,k)* std::sqrt(phiavg); + if(Ncomp == 2){ + fluxy(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j-1,k,1)) * dyinv; + } + } +} + + +#if (AMREX_SPACEDIM > 2) +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void compute_flux_z (int i, int j, int k, + amrex::Array4 const& fluxz, + amrex::Array4 const& stochfluxz, + amrex::Array4 const& phi, amrex::Real dzinv, + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi, int Ncomp) +{ + int coeff_index = Ncomp-1; + if (lo == dom_lo && + (bc_lo == amrex::BCType::foextrap || + bc_lo == amrex::BCType::ext_dir)) + { + if(k == lo) + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; + } + else + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; + } + } + else if (hi == dom_hi && + (bc_hi == amrex::BCType::foextrap || + bc_hi == amrex::BCType::ext_dir)) + { + if(k == hi+1) + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; + } + else + { + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; + } + } + else + { +// amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j,k-1))/(phi(i,j,k)+phi(i,j,k-1)+1.e-16); + amrex::Real phip = std::max(phi(i,j,k,coeff_index),0.); + amrex::Real phim = std::max(phi(i,j,k-1,coeff_index),0.); + amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv + +stochfluxz(i,j,k) * std::sqrt(phiavg); + if(Ncomp == 2){ + fluxz(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j,k-1,1)) * dzinv; + } + } +} +#endif + +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void update_phi (int i, int j, int k, + amrex::Array4 const& phiold, + amrex::Array4 const& phinew, + AMREX_D_DECL(amrex::Array4 const& fluxx, + amrex::Array4 const& fluxy, + amrex::Array4 const& fluxz), + amrex::Real dt, + AMREX_D_DECL(amrex::Real dxinv, + amrex::Real dyinv, + amrex::Real dzinv), + int Ncomp) +{ + for (int n = 0; n < Ncomp; n++){ + phinew(i,j,k,n) = phiold(i,j,k,n) + + dt * dxinv * (fluxx(i+1,j ,k,n ) - fluxx(i,j,k,n)) + + dt * dyinv * (fluxy(i ,j+1,k,n ) - fluxy(i,j,k,n)) +#if (AMREX_SPACEDIM > 2) + + dt * dzinv * (fluxz(i ,j ,k+1,n) - fluxz(i,j,k,n)); +#else + ; +#endif + } +} + +#endif diff --git a/src_deankow/singlelevel/Make.package b/src_deankow/singlelevel/Make.package new file mode 100644 index 000000000..9d738c91b --- /dev/null +++ b/src_deankow/singlelevel/Make.package @@ -0,0 +1,2 @@ +CEXE_sources += main.cpp myfunc.cpp +CEXE_heaeders += myfunc.H mykernel.H diff --git a/src_deankow/main.cpp b/src_deankow/singlelevel/main.cpp similarity index 93% rename from src_deankow/main.cpp rename to src_deankow/singlelevel/main.cpp index bec71e3da..1323564a4 100644 --- a/src_deankow/main.cpp +++ b/src_deankow/singlelevel/main.cpp @@ -32,6 +32,7 @@ void main_main () // AMREX_SPACEDIM: number of dimensions int n_cell, max_grid_size, nsteps, plot_int; + int alg_type; amrex::Real npts_scale; amrex::Real cfl; Vector bc_lo(AMREX_SPACEDIM,0); @@ -61,6 +62,9 @@ void main_main () npts_scale = 1.; pp.query ("npts_scale",npts_scale); + alg_type = 0; + pp.query ("alg_type",alg_type); + cfl=.9; pp.query ("cfl",cfl); @@ -104,7 +108,12 @@ void main_main () int Nghost = 1; // Ncomp = number of components for each array - int Ncomp = 1; + int Ncomp; + if(alg_type == 0){ + Ncomp = 1; + } else { + Ncomp = 2; + } // How Boxes are distrubuted among MPI processes DistributionMapping dm(ba); @@ -143,7 +152,7 @@ void main_main () GpuArray dx = geom.CellSizeArray(); - init_phi(phi_new, geom,npts_scale); + init_phi(phi_new, geom,npts_scale, Ncomp); //Boundary conditions are assigned to phi_old such that the ghost cells at the boundary will //be filled to satisfy those conditions. @@ -200,7 +209,7 @@ void main_main () if (plot_int > 0) { int n = 0; - const std::string& pltfile = amrex::Concatenate("plt",n,5); + const std::string& pltfile = amrex::Concatenate("plt",n,6); WriteSingleLevelPlotfile(pltfile, phi_new, {"phi"}, geom, time, 0); } @@ -212,8 +221,8 @@ void main_main () // flux(dir) has one component, zero ghost cells, and is nodal in direction dir BoxArray edge_ba = ba; edge_ba.surroundingNodes(dir); - flux[dir].define(edge_ba, dm, 1, 0); - stochFlux[dir].define(edge_ba, dm, 1, 0); + flux[dir].define(edge_ba, dm, Ncomp, 0); + stochFlux[dir].define(edge_ba, dm, Ncomp, 0); } AMREX_D_TERM(stochFlux[0].setVal(0.0);, @@ -223,10 +232,10 @@ void main_main () for (int n = 1; n <= nsteps; ++n) { - MultiFab::Copy(phi_old, phi_new, 0, 0, 1, 0); + MultiFab::Copy(phi_old, phi_new, 0, 0, Ncomp, 0); // new_phi = old_phi + dt * (something) - advance(phi_old, phi_new, flux, stochFlux, dt, npts_scale, geom, bc); + advance(phi_old, phi_new, flux, stochFlux, dt, npts_scale, geom, bc, Ncomp); time = time + dt; // Tell the I/O Processor to write out which step we're doing @@ -235,13 +244,12 @@ void main_main () // Write a plotfile of the current data (plot_int was defined in the inputs file) if (plot_int > 0 && n%plot_int == 0) { - const std::string& pltfile = amrex::Concatenate("plt",n,5); + const std::string& pltfile = amrex::Concatenate("plt",n,6); WriteSingleLevelPlotfile(pltfile, phi_new, {"phi"}, geom, time, n); - } // amrex::Real Ephi=0.; // amrex::Real Ephi2=0.; - Vector Ephi(2,0.); + Vector Ephi(3,0.); Real Ephimin = npts_scale; for ( MFIter mfi(phi_old); mfi.isValid(); ++mfi ) { @@ -257,6 +265,7 @@ void main_main () Ephi[0] += phiNew(i,j,k); Ephi[1] += phiNew(i,j,k)*phiNew(i,j,k); Ephimin = std::min(Ephimin,phiNew(i,j,k)); + Ephi[2] += (phiNew(i,j,k) < 0) ? 1. : 0. ; } } } @@ -265,7 +274,7 @@ void main_main () } const int IOProc = ParallelDescriptor::IOProcessorNumber(); - ParallelDescriptor::ReduceRealSum(Ephi.dataPtr(),2); + ParallelDescriptor::ReduceRealSum(Ephi.dataPtr(),3); ParallelDescriptor::ReduceRealMin(Ephimin); amrex::Real scale = n_cell*n_cell; amrex::Real scale2 = AMREX_D_TERM( dx[0], @@ -275,7 +284,9 @@ void main_main () amrex::Print() << "phi variance = " << Ephi[1]/scale - (Ephi[0]*Ephi[0] /(scale*scale)) << std::endl; amrex::Print() << "phi integral = " << Ephi[0]*scale2 << std::endl; - amrex::Print() << "phi min = " << Ephimin << std::endl; + amrex::Print() << "phi min = " << Ephimin << " Number of negative points " << Ephi[2] << std::endl; + + } } diff --git a/src_deankow/myfunc.H b/src_deankow/singlelevel/myfunc.H similarity index 82% rename from src_deankow/myfunc.H rename to src_deankow/singlelevel/myfunc.H index 0f3bc0ff6..2b71fed3d 100644 --- a/src_deankow/myfunc.H +++ b/src_deankow/singlelevel/myfunc.H @@ -16,7 +16,8 @@ void advance (amrex::MultiFab& phi_old, amrex::Real dt, amrex::Real npts_scale, amrex::Geometry const& geom, - Vector const& BoundaryCondition); + Vector const& BoundaryCondition, + int Ncomp); -void init_phi (amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale); +void init_phi (amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale, int Ncomp); #endif diff --git a/src_deankow/myfunc.cpp b/src_deankow/singlelevel/myfunc.cpp similarity index 93% rename from src_deankow/myfunc.cpp rename to src_deankow/singlelevel/myfunc.cpp index 9eb5d573d..03adfcb6e 100644 --- a/src_deankow/myfunc.cpp +++ b/src_deankow/singlelevel/myfunc.cpp @@ -12,7 +12,8 @@ void advance (MultiFab& phi_old, Real dt, Real npts_scale, Geometry const& geom, - Vector const& BoundaryCondition) + Vector const& BoundaryCondition, + int Ncomp) { // Fill the ghost cells of each grid from the other grids @@ -85,21 +86,21 @@ void advance (MultiFab& phi_old, [=] AMREX_GPU_DEVICE (int i, int j, int k) { compute_flux_x(i,j,k,fluxx,stochfluxx,phi,dxinv, - lo.x, hi.x, dom_lo.x, dom_hi.x, bc.lo(0), bc.hi(0)); + lo.x, hi.x, dom_lo.x, dom_hi.x, bc.lo(0), bc.hi(0),Ncomp); }); amrex::ParallelFor(ybx, [=] AMREX_GPU_DEVICE (int i, int j, int k) { compute_flux_y(i,j,k,fluxy,stochfluxy,phi,dyinv, - lo.y, hi.y, dom_lo.y, dom_hi.y, bc.lo(1), bc.hi(1)); + lo.y, hi.y, dom_lo.y, dom_hi.y, bc.lo(1), bc.hi(1),Ncomp); }); #if (AMREX_SPACEDIM > 2) amrex::ParallelFor(zbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) { compute_flux_z(i,j,k,fluxz,stochfluxz,phi,dzinv, - lo.z, hi.z, dom_lo.z, dom_hi.z, bc.lo(2), bc.hi(2)); + lo.z, hi.z, dom_lo.z, dom_hi.z, bc.lo(2), bc.hi(2),Ncomp); }); #endif } @@ -122,12 +123,13 @@ void advance (MultiFab& phi_old, update_phi(i,j,k,phiOld,phiNew, AMREX_D_DECL(fluxx,fluxy,fluxz), dt, - AMREX_D_DECL(dxinv,dyinv,dzinv)); + AMREX_D_DECL(dxinv,dyinv,dzinv), + Ncomp); }); } } -void init_phi(amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale){ +void init_phi(amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real npts_scale,int Ncomp){ GpuArray dx = geom.CellSizeArray(); GpuArray prob_lo = geom.ProbLoArray(); @@ -141,7 +143,7 @@ void init_phi(amrex::MultiFab& phi_new, amrex::Geometry const& geom, amrex::Real amrex::ParallelFor(vbx, [=] AMREX_GPU_DEVICE(int i, int j, int k) { - init_phi(i,j,k,phiNew,dx,prob_lo, npts_scale); + init_phi(i,j,k,phiNew,dx,prob_lo, npts_scale,Ncomp); }); } } diff --git a/src_deankow/mykernel.H b/src_deankow/singlelevel/mykernel.H similarity index 55% rename from src_deankow/mykernel.H rename to src_deankow/singlelevel/mykernel.H index 7a72c2241..ff9c516be 100644 --- a/src_deankow/mykernel.H +++ b/src_deankow/singlelevel/mykernel.H @@ -8,10 +8,16 @@ void init_phi (int i, int j, int k, amrex::Array4 const& phi, GpuArray const& dx, GpuArray const& prob_lo, - amrex::Real npts_scale) + amrex::Real npts_scale,int Ncomp) { using amrex::Real;; + Real cellvol = dx[0]*dx[1]; +#if(AMREX_SPACEDIM > 2) + cellvol *=dx[2]; +#endif + +#if 0 Real x = prob_lo[0] + (i+Real(0.5)) * dx[0]; Real y = prob_lo[1] + (j+Real(0.5)) * dx[1]; #if (AMREX_SPACEDIM > 2) @@ -21,19 +27,47 @@ void init_phi (int i, int j, int k, Real z = Real(0.); Real r2 = ((x-Real(0.25))*(x-Real(0.25))+(y-Real(0.25))*(y-Real(0.25)))/Real(0.01); #endif - phi(i,j,k) = Real(1.) + std::exp(-r2); + phi(i,j,k,0) = Real(1.) + std::exp(-r2); if( x > 0.5){ - phi(i,j,k) = 1.5*npts_scale; + phi(i,j,k,0) = 1.5*npts_scale; } else { - phi(i,j,k) = 0.5*npts_scale; + phi(i,j,k,0) = 0.5*npts_scale; } if(i==4 && j ==4) { - phi(i,j,k) = 1./(dx[0]*dx[1]); + phi(i,j,k,0) = 1./(dx[0]*dx[1]); } else { - phi(i,j,k) = 0;; + phi(i,j,k,0) = 0;; + } + if(Ncomp == 2){ + phi(i,j,k,1) = phi(i,j,k,0); + } +#endif +#if 1 + // 2D Test for multilevel + Real rad = Real(0.2); + Real xcent = Real(0.5); + Real ycent = Real(0.5); + Real x = prob_lo[0] + (i+Real(0.5)) * dx[0] - xcent; + Real y = prob_lo[1] + (j+Real(0.5)) * dx[1] - ycent; + Real r2 = (x*x + y*y); +#if (AMREX_SPACEDIM == 3) + Real zcent = Real(0.75); + Real z = prob_lo[2] + (k+Real(0.5)) * dx[2] - zcent; + r2 = (x*x + y*y+z*z); +#endif + + if ( r2 > rad*rad) { + phi(i,j,k,0) = 15./cellvol; + } else { + phi(i,j,k,0) = 0./cellvol; + } +#endif + + if (Ncomp == 2){ + phi(i,j,k,1) = phi(i,j,k,0); } } @@ -43,19 +77,20 @@ void compute_flux_x (int i, int j, int k, amrex::Array4 const& fluxx, amrex::Array4 const& stochfluxx, amrex::Array4 const& phi, amrex::Real dxinv, - int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi) + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi,int Ncomp) { + int coeff_index = Ncomp-1; if (lo == dom_lo && (bc_lo == BCType::foextrap || bc_lo == BCType::ext_dir)) { if(i == lo) { - fluxx(i,j,k) = 0.5*(phi(i,j,k)-phi(i-1,j,k)) * dxinv / 0.5; + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv / 0.5; } else { - fluxx(i,j,k) = 0.5*(phi(i,j,k)-phi(i-1,j,k)) * dxinv; + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv; } } else if (hi == dom_hi && @@ -64,21 +99,25 @@ void compute_flux_x (int i, int j, int k, { if(i == hi+1) { - fluxx(i,j,k) = 0.5*(phi(i,j,k)-phi(i-1,j,k)) * dxinv / 0.5; + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv / 0.5; } else { - fluxx(i,j,k) = 0.5*(phi(i,j,k)-phi(i-1,j,k)) * dxinv; + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv; } } else { // amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i-1,j,k))/(phi(i,j,k)+phi(i-1,j,k)+1.e-16); - Real phip = std::max(phi(i,j,k),0.); - Real phim = std::max(phi(i-1,j,k),0.); - amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); - fluxx(i,j,k) = 0.5*(phi(i,j,k)-phi(i-1,j,k)) * dxinv + Real phip = std::max(phi(i,j,k,coeff_index),0.); + Real phim = std::max(phi(i-1,j,k,coeff_index),0.); +// amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + amrex::Real phiavg = 0.5*(phip+phim); + fluxx(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i-1,j,k,0)) * dxinv + stochfluxx(i,j,k) * std::sqrt(phiavg); + if(Ncomp == 2){ + fluxx(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i-1,j,k,1)) * dxinv; + } } } @@ -87,19 +126,20 @@ void compute_flux_y (int i, int j, int k, amrex::Array4 const& fluxy, amrex::Array4 const& stochfluxy, amrex::Array4 const& phi, amrex::Real dyinv, - int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi) + int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi, int Ncomp) { + int coeff_index = Ncomp-1; if (lo == dom_lo && (bc_lo == BCType::foextrap || bc_lo == BCType::ext_dir)) { if(j == lo) { - fluxy(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j-1,k)) * dyinv / 0.5; + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; } else { - fluxy(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j-1,k)) * dyinv; + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; } } else if (hi == dom_hi && @@ -108,21 +148,25 @@ void compute_flux_y (int i, int j, int k, { if(j == hi+1) { - fluxy(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j-1,k)) * dyinv / 0.5; + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv / 0.5; } else { - fluxy(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j-1,k)) * dyinv; + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv; } } else { // amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j-1,k))/(phi(i,j,k)+phi(i,j-1,k)+1.e-16); - Real phip = std::max(phi(i,j,k),0.); - Real phim = std::max(phi(i,j-1,k),0.); - amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); - fluxy(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j-1,k)) * dyinv + Real phip = std::max(phi(i,j,k,coeff_index),0.); + Real phim = std::max(phi(i,j-1,k,coeff_index),0.); + //amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); + amrex::Real phiavg = 0.5*(phip+phim); + fluxy(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j-1,k,0)) * dyinv + stochfluxy(i,j,k)* std::sqrt(phiavg); + if(Ncomp == 2){ + fluxy(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j-1,k,1)) * dyinv; + } } } @@ -135,17 +179,18 @@ void compute_flux_z (int i, int j, int k, amrex::Array4 const& phi, amrex::Real dzinv, int lo, int hi, int dom_lo, int dom_hi, int bc_lo, int bc_hi) { + int coeff_index = Ncomp-1; if (lo == dom_lo && (bc_lo == BCType::foextrap || bc_lo == BCType::ext_dir)) { if(k == lo) { - fluxz(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j,k-1)) * dzinv / 0.5; + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; } else { - fluxz(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j,k-1)) * dzinv; + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; } } else if (hi == dom_hi && @@ -154,21 +199,24 @@ void compute_flux_z (int i, int j, int k, { if(k == hi+1) { - fluxz(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j,k-1)) * dzinv / 0.5; + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv / 0.5; } else { - fluxz(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j,k-1)) * dzinv; + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv; } } else { // amrex::Real phiavg = 2.*(phi(i,j,k)*phi(i,j,k-1))/(phi(i,j,k)+phi(i,j,k-1)+1.e-16); - Real phip = std::max(phi(i,j,k),0.); - Real phim = std::max(phi(i,j,k-1),0.); + Real phip = std::max(phi(i,j,k,coeff_index),0.); + Real phim = std::max(phi(i,j,k-1,coeff_index),0.); amrex::Real phiavg = 2.*(phip*phim)/(phip+phim+1.e-16); - fluxz(i,j,k) = 0.5*(phi(i,j,k)-phi(i,j,k-1)) * dzinv + fluxz(i,j,k,0) = 0.5*(phi(i,j,k,0)-phi(i,j,k-1,0)) * dzinv +stochfluxz(i,j,k) * std::sqrt(phiavg); + if(Ncomp == 2){ + fluxz(i,j,k,1) = 0.5*(phi(i,j,k,1)-phi(i,j,k-1,1)) * dzinv; + } } } #endif @@ -183,16 +231,19 @@ void update_phi (int i, int j, int k, amrex::Real dt, AMREX_D_DECL(amrex::Real dxinv, amrex::Real dyinv, - amrex::Real dzinv)) + amrex::Real dzinv), + int Ncomp) { - phinew(i,j,k) = phiold(i,j,k) - + dt * dxinv * (fluxx(i+1,j ,k ) - fluxx(i,j,k)) - + dt * dyinv * (fluxy(i ,j+1,k ) - fluxy(i,j,k)) + for (int n = 0; n < Ncomp; n++){ + phinew(i,j,k,n) = phiold(i,j,k,n) + + dt * dxinv * (fluxx(i+1,j ,k,n ) - fluxx(i,j,k,n)) + + dt * dyinv * (fluxy(i ,j+1,k,n ) - fluxy(i,j,k,n)) #if (AMREX_SPACEDIM > 2) - + dt * dzinv * (fluxz(i ,j ,k+1) - fluxz(i,j,k)); + + dt * dzinv * (fluxz(i ,j ,k+1,n) - fluxz(i,j,k,n)); #else ; #endif + } } #endif diff --git a/src_hydro/BDS.cpp b/src_hydro/BDS.cpp index b82f754e1..8701f9fa3 100644 --- a/src_hydro/BDS.cpp +++ b/src_hydro/BDS.cpp @@ -179,10 +179,10 @@ void BDS_ComputeSlopes(Box const& bx, Vector bc_hi(AMREX_SPACEDIM); BCPhysToMath(bccomp,bc_lo,bc_hi); - bool lo_x_physbc = (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) ? true : false; - bool hi_x_physbc = (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) ? true : false; - bool lo_y_physbc = (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) ? true : false; - bool hi_y_physbc = (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) ? true : false; + bool lo_x_physbc = (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) ? true : false; + bool hi_x_physbc = (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) ? true : false; + bool lo_y_physbc = (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) ? true : false; + bool hi_y_physbc = (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) ? true : false; // bicubic interpolation to corner points // (i,j,k) refers to lower corner of cell @@ -441,10 +441,10 @@ void BDS_ComputeConc(Box const& bx, Vector bc_hi(AMREX_SPACEDIM); BCPhysToMath(bccomp,bc_lo,bc_hi); - bool lo_x_physbc = (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) ? true : false; - bool hi_x_physbc = (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) ? true : false; - bool lo_y_physbc = (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) ? true : false; - bool hi_y_physbc = (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) ? true : false; + bool lo_x_physbc = (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) ? true : false; + bool hi_x_physbc = (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) ? true : false; + bool lo_y_physbc = (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) ? true : false; + bool hi_y_physbc = (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) ? true : false; // compute cell-centered ux, vy ParallelFor(gbx, [=] AMREX_GPU_DEVICE (int i, int j, int k){ @@ -913,12 +913,12 @@ void BDS_ComputeSlopes(Box const& bx, Vector bc_hi(AMREX_SPACEDIM); BCPhysToMath(bccomp,bc_lo,bc_hi); - bool lo_x_physbc = (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) ? true : false; - bool hi_x_physbc = (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) ? true : false; - bool lo_y_physbc = (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) ? true : false; - bool hi_y_physbc = (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) ? true : false; - bool lo_z_physbc = (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) ? true : false; - bool hi_z_physbc = (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) ? true : false; + bool lo_x_physbc = (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) ? true : false; + bool hi_x_physbc = (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) ? true : false; + bool lo_y_physbc = (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) ? true : false; + bool hi_y_physbc = (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) ? true : false; + bool lo_z_physbc = (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) ? true : false; + bool hi_z_physbc = (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) ? true : false; // tricubic interpolation to corner points // (i,j,k) refers to lower corner of cell @@ -1368,12 +1368,12 @@ void BDS_ComputeConc(Box const& bx, Vector bc_hi(AMREX_SPACEDIM); BCPhysToMath(bccomp,bc_lo,bc_hi); - bool lo_x_physbc = (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) ? true : false; - bool hi_x_physbc = (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) ? true : false; - bool lo_y_physbc = (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) ? true : false; - bool hi_y_physbc = (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) ? true : false; - bool lo_z_physbc = (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) ? true : false; - bool hi_z_physbc = (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) ? true : false; + bool lo_x_physbc = (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) ? true : false; + bool hi_x_physbc = (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) ? true : false; + bool lo_y_physbc = (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) ? true : false; + bool hi_y_physbc = (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) ? true : false; + bool lo_z_physbc = (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) ? true : false; + bool hi_z_physbc = (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) ? true : false; ParallelFor(gbx, [=] AMREX_GPU_DEVICE (int i, int j, int k){ ux(i,j,k) = (umac(i+1,j,k) - umac(i,j,k)) / hx; diff --git a/src_multispec/ComputeMixtureProperties.cpp b/src_multispec/ComputeMixtureProperties.cpp index a553949d1..f37efa387 100644 --- a/src_multispec/ComputeMixtureProperties.cpp +++ b/src_multispec/ComputeMixtureProperties.cpp @@ -84,10 +84,12 @@ void ComputeEta(const MultiFab& rho_in, amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { - Real c = rho(i,j,k,0) / rhotot(i,j,k); + Real c = rho(i,j,k,1) / rhotot(i,j,k); + c = (c-.034815)/(.9651885-.034815); + eta(i,j,k) = (.1+.9*c)*visc_coef; - eta(i,j,k) = -0.99*visc_coef*c + visc_coef; - eta(i,j,k) = std::max(0.01*visc_coef,eta(i,j,k)); + // eta(i,j,k) = -0.99*visc_coef*c + visc_coef; + eta(i,j,k) = std::max(0.1*visc_coef,eta(i,j,k)); eta(i,j,k) = std::min(visc_coef,eta(i,j,k)); diff --git a/src_multispec/DiffusiveMassFluxdiv.cpp b/src_multispec/DiffusiveMassFluxdiv.cpp index 24a7b2098..081b18b3f 100644 --- a/src_multispec/DiffusiveMassFluxdiv.cpp +++ b/src_multispec/DiffusiveMassFluxdiv.cpp @@ -212,7 +212,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, #endif // boundary conditions - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx_x.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx_x, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -224,7 +224,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx_x.bigEnd(0) >= dom.bigEnd(0)+1) { int hi = dom.bigEnd(0)+1; amrex::ParallelFor(bx_x, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -236,7 +236,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx_y.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx_y, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -248,7 +248,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx_y.bigEnd(1) >= dom.bigEnd(1)+1) { int hi = dom.bigEnd(1)+1; amrex::ParallelFor(bx_y, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -261,7 +261,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, } #if (AMREX_SPACEDIM == 3) - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx_z.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx_z, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -273,7 +273,7 @@ void ComputeHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx_z.bigEnd(2) >= dom.bigEnd(2)+1) { int hi = dom.bigEnd(2)+1; amrex::ParallelFor(bx_z, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -449,7 +449,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, #endif // boundary conditions - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { if (bx_x.smallEnd(0) <= dom.smallEnd(0)) { int lo = dom.smallEnd(0); amrex::ParallelFor(bx_x, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -461,7 +461,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { if (bx_x.bigEnd(0) >= dom.bigEnd(0)+1) { int hi = dom.bigEnd(0)+1; amrex::ParallelFor(bx_x, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -473,7 +473,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { if (bx_y.smallEnd(1) <= dom.smallEnd(1)) { int lo = dom.smallEnd(1); amrex::ParallelFor(bx_y, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -485,7 +485,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { if (bx_y.bigEnd(1) >= dom.bigEnd(1)+1) { int hi = dom.bigEnd(1)+1; amrex::ParallelFor(bx_y, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -498,7 +498,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, } #if (AMREX_SPACEDIM == 3) - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { if (bx_z.smallEnd(2) <= dom.smallEnd(2)) { int lo = dom.smallEnd(2); amrex::ParallelFor(bx_z, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept @@ -510,7 +510,7 @@ void ComputeFHHigherOrderTerm(const MultiFab& molarconc, } } - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { if (bx_z.bigEnd(2) >= dom.bigEnd(2)+1) { int hi = dom.bigEnd(2)+1; amrex::ParallelFor(bx_z, nspecies, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept diff --git a/src_multispec/RhoUtil.cpp b/src_multispec/RhoUtil.cpp index 00b9356c4..5f99a8523 100644 --- a/src_multispec/RhoUtil.cpp +++ b/src_multispec/RhoUtil.cpp @@ -187,7 +187,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr int hi = dom.bigEnd(0); if (bx.smallEnd(0) < lo) { - if (bc_lo[0] == FOEXTRAP || bc_lo[0] == EXT_DIR) { + if (bc_lo[0] == amrex::BCType::foextrap || bc_lo[0] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (i < lo) { @@ -202,7 +202,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr } if (bx.bigEnd(0) > hi) { - if (bc_hi[0] == FOEXTRAP || bc_hi[0] == EXT_DIR) { + if (bc_hi[0] == amrex::BCType::foextrap || bc_hi[0] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (i > hi) { @@ -224,7 +224,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr hi = dom.bigEnd(1); if (bx.smallEnd(1) < lo) { - if (bc_lo[1] == FOEXTRAP || bc_lo[1] == EXT_DIR) { + if (bc_lo[1] == amrex::BCType::foextrap || bc_lo[1] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (j < lo) { @@ -239,7 +239,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr } if (bx.bigEnd(1) > hi) { - if (bc_hi[1] == FOEXTRAP || bc_hi[1] == EXT_DIR) { + if (bc_hi[1] == amrex::BCType::foextrap || bc_hi[1] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (j > hi) { @@ -262,7 +262,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr hi = dom.bigEnd(2); if (bx.smallEnd(2) < lo) { - if (bc_lo[2] == FOEXTRAP || bc_lo[2] == EXT_DIR) { + if (bc_lo[2] == amrex::BCType::foextrap || bc_lo[2] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (k < lo) { @@ -277,7 +277,7 @@ void FillRhototGhost(MultiFab& rhotot_in, const MultiFab& conc_in, const Geometr } if (bx.bigEnd(2) > hi) { - if (bc_hi[2] == FOEXTRAP || bc_hi[2] == EXT_DIR) { + if (bc_hi[2] == amrex::BCType::foextrap || bc_hi[2] == amrex::BCType::ext_dir) { amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { if (k > hi) { diff --git a/src_multispec/StochMassFlux.cpp b/src_multispec/StochMassFlux.cpp index f0512eb12..02df29059 100644 --- a/src_multispec/StochMassFlux.cpp +++ b/src_multispec/StochMassFlux.cpp @@ -70,11 +70,11 @@ void StochMassFlux::StochMassFluxBC() { BL_PROFILE_VAR("StochMassFluxBC()",StochMassFluxBC); // lo-x domain boundary - if (bc_mass_lo[0] == 1 || bc_mass_lo[0] == 2) { + if (bc_mass_lo[0] == 1 || bc_mass_lo[0] == 2 || bc_mass_lo[0] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_lo[0] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_lo[0] == 1 || bc_mass_lo[0] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[0] nodality (x) const Box& dom_x = amrex::convert(geom.Domain(), stoch_W_fc_weighted[0].ixType()); @@ -100,11 +100,11 @@ void StochMassFlux::StochMassFluxBC() { } // hi-x domain boundary - if (bc_mass_hi[0] == 1 || bc_mass_hi[0] == 2) { + if (bc_mass_hi[0] == 1 || bc_mass_hi[0] == 2 || bc_mass_hi[0] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_hi[0] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_hi[0] == 1 || bc_mass_hi[0] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[0] nodality (x) const Box& dom_x = amrex::convert(geom.Domain(), stoch_W_fc_weighted[0].ixType()); @@ -131,11 +131,11 @@ void StochMassFlux::StochMassFluxBC() { } // lo-y domain boundary - if (bc_mass_lo[1] == 1 || bc_mass_lo[1] == 2) { + if (bc_mass_lo[1] == 1 || bc_mass_lo[1] == 2 || bc_mass_lo[1] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_lo[1] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_lo[1] == 1 || bc_mass_lo[1] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[1] nodality (y) const Box& dom_y = amrex::convert(geom.Domain(), stoch_W_fc_weighted[1].ixType()); @@ -161,11 +161,11 @@ void StochMassFlux::StochMassFluxBC() { } // hi-y domain boundary - if (bc_mass_hi[1] == 1 || bc_mass_hi[1] == 2) { + if (bc_mass_hi[1] == 1 || bc_mass_hi[1] == 2 || bc_mass_hi[1] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_hi[1] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_hi[1] == 1 || bc_mass_hi[1] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[1] nodality (y) const Box& dom_y = amrex::convert(geom.Domain(), stoch_W_fc_weighted[1].ixType()); @@ -193,11 +193,11 @@ void StochMassFlux::StochMassFluxBC() { #if (AMREX_SPACEDIM == 3) // lo-z domain boundary - if (bc_mass_lo[2] == 1 || bc_mass_lo[2] == 2) { + if (bc_mass_lo[2] == 1 || bc_mass_lo[2] == 2 || bc_mass_lo[2] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_lo[2] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_lo[2] == 1 || bc_mass_lo[2] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[2] nodality (z) const Box& dom_z = amrex::convert(geom.Domain(), stoch_W_fc_weighted[2].ixType()); @@ -223,11 +223,11 @@ void StochMassFlux::StochMassFluxBC() { } // hi-z domain boundary - if (bc_mass_hi[2] == 1 || bc_mass_hi[2] == 2) { + if (bc_mass_hi[2] == 1 || bc_mass_hi[2] == 2 || bc_mass_hi[2] == 4) { // 1 = wall : multiply fluxes on wall by 0 // 2 = reservoir : multiply fluxes on wall by sqrt(2) - Real factor = (bc_mass_hi[2] == 1) ? 0. : sqrt(2.); + Real factor = (bc_mass_hi[2] == 1 || bc_mass_hi[2] == 4) ? 0. : sqrt(2.); // domain grown nodally based on stoch_W_fc_weighted[2] nodality (z) const Box& dom_z = amrex::convert(geom.Domain(), stoch_W_fc_weighted[2].ixType()); diff --git a/src_particles/DsmcParticleContainer.cpp b/src_particles/DsmcParticleContainer.cpp index 2eb1def62..69d850c2c 100644 --- a/src_particles/DsmcParticleContainer.cpp +++ b/src_particles/DsmcParticleContainer.cpp @@ -220,7 +220,7 @@ void FhdParticleContainer::MoveParticlesCPP(const Real dt, paramPlane* paramPlan while(runtime > 0) { find_inter_gpu(part, runtime, paramPlaneListPtr, paramPlaneCount, - &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); for (int d=0; d<(AMREX_SPACEDIM); ++d) { @@ -418,7 +418,7 @@ void FhdParticleContainer::MovePhononsCPP(const Real dt, paramPlane* paramPlaneL // printf("DT: %e\n", dt); // cout << "DT: " << dt << endl; find_inter_gpu(part, runtime, paramPlaneListPtr, paramPlaneCount, - &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); Real tauImpurityInv = pow(part.rdata(FHD_realData::omega),4)/tau_i_p; Real tauTAInv = part.rdata(FHD_realData::omega)*pow(T_init[0],4)/tau_ta_p; diff --git a/src_particles/DsmcParticleContainer.cpp.old b/src_particles/DsmcParticleContainer.cpp.old index 43e11627a..98c3627c0 100644 --- a/src_particles/DsmcParticleContainer.cpp.old +++ b/src_particles/DsmcParticleContainer.cpp.old @@ -72,7 +72,7 @@ void DsmcParticleContainer::MoveParticlesDSMC(const Real dt, const paramPlane* p m_vector_size[grid_id].dataPtr(), ARLIM_3D(m_vector_ptrs[grid_id].loVect()), ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), - ZFILL(plo),ZFILL(phi),ZFILL(dx), &dt, + AMREX_ZFILL(plo),AMREX_ZFILL(phi),ZFILL(dx), &dt, paramPlaneList, ¶mPlaneCount, &time, flux); lFlux += flux[0]; rFlux += flux[1]; diff --git a/src_particles/FhdParticleContainer.cpp b/src_particles/FhdParticleContainer.cpp index a7d1b9ad8..b8bcc6952 100644 --- a/src_particles/FhdParticleContainer.cpp +++ b/src_particles/FhdParticleContainer.cpp @@ -645,8 +645,8 @@ void FhdParticleContainer::MoveIonsCPP(const Real dt, const Real* dxFluid, const while(runtime > 0) { - //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); for (int d=0; d 0) { - //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); //Print() << "PART " << part.id() << ", " << intsurf << "\n"; //cin.get(); @@ -1013,7 +1013,7 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Real* dxE, c emf_gpu(particles, efield[0][pti], efield[1][pti], efield[2][pti], - ZFILL(plo), ZFILL(dxE)); + AMREX_ZFILL(plo), AMREX_ZFILL(dxE)); } @@ -1021,8 +1021,8 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Real* dxE, c { //spread_ions_fhd_gpu(particles, // sourceTemp[0][pti], sourceTemp[1][pti], sourceTemp[2][pti], - // ZFILL(plo), - // ZFILL(dxFluid)); + // AMREX_ZFILL(plo), + // AMREX_ZFILL(dxFluid)); SpreadMarkersGpu(lev, sourceTemp, coords, dxFluid, 1); } @@ -1089,8 +1089,8 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Geometry geo { //spread_ions_fhd_gpu(particles, // sourceTemp[0][pti], sourceTemp[1][pti], sourceTemp[2][pti], - // ZFILL(plo), - // ZFILL(dxFluid)); + // AMREX_ZFILL(plo), + // AMREX_ZFILL(dxFluid)); SpreadMarkersGpu(lev, sourceTemp, coords, dxFluid, 1); } @@ -1814,7 +1814,7 @@ void FhdParticleContainer::collectFieldsGPU(const Real dt, const Real* dxPotenti auto& particles = particle_tile.GetArrayOfStructs(); const int np = particles.numParticles(); - collect_charge_gpu(particles, chargeTemp[pti], ZFILL(geomP.ProbLo()), ZFILL(dxPotential)); + collect_charge_gpu(particles, chargeTemp[pti], AMREX_ZFILL(geomP.ProbLo()), AMREX_ZFILL(dxPotential)); } MultiFabPhysBCCharge(chargeTemp, geomP); diff --git a/src_particles/FhdParticleContainer_WF.cpp b/src_particles/FhdParticleContainer_WF.cpp index 3d8e37969..6babe959a 100644 --- a/src_particles/FhdParticleContainer_WF.cpp +++ b/src_particles/FhdParticleContainer_WF.cpp @@ -343,13 +343,13 @@ void FhdParticleContainer::DoRFD(const Real dt, const Real* dxFluid, const Real* //Print() << "FHD\n"; do_rfd(particles.data(), &np, - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), - ZFILL(plo), ZFILL(phi), ZFILL(dx), &dt, ZFILL(geomF.ProbLo()), ZFILL(dxFluid), ZFILL(dxE), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ZFILL(plo), AMREX_ZFILL(phi), AMREX_ZFILL(dx), &dt, AMREX_ZFILL(geomF.ProbLo()), AMREX_ZFILL(dxFluid), AMREX_ZFILL(dxE), BL_TO_FORTRAN_3D(umac[0][pti]), BL_TO_FORTRAN_3D(umac[1][pti]), #if (AMREX_SPACEDIM == 3) @@ -421,7 +421,7 @@ void FhdParticleContainer::DoRFD(const Real dt, const Real* dxFluid, const Real* // amrex_compute_p3m_sr_correction_nl(particles.data(), &Np, // neighbors[lev][index].dataPtr(), &Nn, // neighbor_list[lev][index].dataPtr(), &size, &rcount, -// BL_TO_FORTRAN_3D(charge[pti]),BL_TO_FORTRAN_3D(coords[pti]), ARLIM_3D(tile_box.loVect()), ARLIM_3D(tile_box.hiVect()), ZFILL(dx)); } +// BL_TO_FORTRAN_3D(charge[pti]),BL_TO_FORTRAN_3D(coords[pti]), AMREX_ARLIM_3D(tile_box.loVect()), AMREX_ARLIM_3D(tile_box.hiVect()), AMREX_ZFILL(dx)); } // } // if(sr_tog==1) @@ -537,14 +537,14 @@ void FhdParticleContainer::MoveIons(const Real dt, const Real* dxFluid, const Re move_ions_fhd(particles.data(), &np_tile, &rejected_tile, &moves_tile, &maxspeed_tile, &maxdist_tile, &diffinst_tile, - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), - ZFILL(plo), ZFILL(phi), ZFILL(dx), &dt, - ZFILL(geomF.ProbLo()), ZFILL(dxFluid), ZFILL(dxE), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ZFILL(plo), AMREX_ZFILL(phi), AMREX_ZFILL(dx), &dt, + AMREX_ZFILL(geomF.ProbLo()), AMREX_ZFILL(dxFluid), AMREX_ZFILL(dxE), BL_TO_FORTRAN_3D(umac[0][pti]), BL_TO_FORTRAN_3D(umac[1][pti]), #if (AMREX_SPACEDIM == 3) @@ -707,7 +707,7 @@ void FhdParticleContainer::MoveIonsGPU(const Real dt, const Real* dxFluid, const while(runtime > 0) { printf("delt pre %e\n",inttime); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); for (int d=0; d 0) { - //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); //Print() << "PART " << part.id() << ", " << intsurf << "\n"; //cin.get(); @@ -1170,8 +1170,8 @@ void FhdParticleContainer::MoveIonsCPP(const Real dt, const Real* dxFluid, const while(runtime > 0) { - //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); for (int d=0; d 0) { - //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); - find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, ZFILL(plo), ZFILL(phi)); + //find_inter(&part, &runtime, paramPlaneList, ¶mPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); + find_inter_gpu(part, runtime, paramPlaneList, paramPlaneCount, &intsurf, &inttime, &intside, AMREX_ZFILL(plo), AMREX_ZFILL(phi)); //Print() << "PART " << part.id() << ", " << intsurf << "\n"; //cin.get(); @@ -1607,13 +1607,13 @@ void FhdParticleContainer::SpreadIons(const Real dt, const Real* dxFluid, const //Print() << "FHD\n"; spread_ions_fhd(particles.data(), &np, - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), - ZFILL(plo), ZFILL(phi), ZFILL(dx), &dt, ZFILL(geomF.ProbLo()), ZFILL(dxFluid), ZFILL(dxE), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ZFILL(plo), AMREX_ZFILL(phi), AMREX_ZFILL(dx), &dt, AMREX_ZFILL(geomF.ProbLo()), AMREX_ZFILL(dxFluid), AMREX_ZFILL(dxE), BL_TO_FORTRAN_3D(umac[0][pti]), BL_TO_FORTRAN_3D(umac[1][pti]), #if (AMREX_SPACEDIM == 3) @@ -1698,13 +1698,13 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Real* dxE, c emf_gpu(particles, efield[0][pti], efield[1][pti], efield[2][pti], - ZFILL(plo), ZFILL(dxE)); + AMREX_ZFILL(plo), AMREX_ZFILL(dxE)); if(fluid_tog != 0) { spread_ions_fhd_gpu(particles, sourceTemp[0][pti], sourceTemp[1][pti], sourceTemp[2][pti], - ZFILL(plo), - ZFILL(dxFluid)); + AMREX_ZFILL(plo), + AMREX_ZFILL(dxFluid)); } } @@ -1770,8 +1770,8 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Geometry geo { spread_ions_fhd_gpu(particles, sourceTemp[0][pti], sourceTemp[1][pti], sourceTemp[2][pti], - ZFILL(plo), - ZFILL(dxFluid)); + AMREX_ZFILL(plo), + AMREX_ZFILL(dxFluid)); } } @@ -1839,13 +1839,13 @@ void FhdParticleContainer::SpreadIonsGPU(const Real* dxFluid, const Geometry geo // emf_gpu(particles, // efield[0][pti], efield[1][pti], efield[2][pti], -// ZFILL(plo), ZFILL(dxE)); +// AMREX_ZFILL(plo), AMREX_ZFILL(dxE)); // if(fluid_tog != 0) // { // spread_ions_fhd_gpu(particles, // sourceTemp[0][pti], sourceTemp[1][pti], sourceTemp[2][pti], -// ZFILL(plo), -// ZFILL(dxFluid)); +// AMREX_ZFILL(plo), +// AMREX_ZFILL(dxFluid)); // } // } @@ -2647,13 +2647,13 @@ void FhdParticleContainer::collectFields(const Real dt, const Real* dxPotential, const int np = particles.numParticles(); collect_charge(particles.data(), &np, - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), - ZFILL(plo), ZFILL(phi), ZFILL(dx), &dt, ZFILL(geomP.ProbLo()), ZFILL(dxPotential), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ZFILL(plo), AMREX_ZFILL(phi), AMREX_ZFILL(dx), &dt, AMREX_ZFILL(geomP.ProbLo()), AMREX_ZFILL(dxPotential), BL_TO_FORTRAN_3D(RealCenterCoords[pti]), BL_TO_FORTRAN_3D(chargeTemp[pti])); @@ -2701,7 +2701,7 @@ void FhdParticleContainer::collectFieldsGPU(const Real dt, const Real* dxPotenti auto& particles = particle_tile.GetArrayOfStructs(); const int np = particles.numParticles(); - collect_charge_gpu(particles, chargeTemp[pti], ZFILL(geomP.ProbLo()), ZFILL(dxPotential)); + collect_charge_gpu(particles, chargeTemp[pti], AMREX_ZFILL(geomP.ProbLo()), AMREX_ZFILL(dxPotential)); } MultiFabPhysBCCharge(chargeTemp, geomP); @@ -2737,12 +2737,12 @@ void FhdParticleContainer::InitCollisionCells(MultiFab& collisionPairs, const int Np = particles.numParticles(); init_cells(particles.data(), - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), BL_TO_FORTRAN_3D(collisionPairs[pti]), BL_TO_FORTRAN_3D(collisionFactor[pti]), BL_TO_FORTRAN_3D(cellVols[pti]),&Np,&particleInfo.Neff,&particleInfo.cp,&particleInfo.d,&delt @@ -2769,12 +2769,12 @@ void FhdParticleContainer::CollideParticles(MultiFab& collisionPairs, const int Np = particles.numParticles(); collide_cells(particles.data(), - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), BL_TO_FORTRAN_3D(collisionPairs[pti]), BL_TO_FORTRAN_3D(collisionFactor[pti]), BL_TO_FORTRAN_3D(cellVols[pti]),&Np,&particleInfo.Neff,&particleInfo.cp,&particleInfo.d,&delt @@ -2801,12 +2801,12 @@ void FhdParticleContainer::InitializeFields(MultiFab& particleInstant, const int Np = parts.numParticles(); initialize_fields(parts.data(), - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), BL_TO_FORTRAN_3D(particleInstant[pti]), BL_TO_FORTRAN_3D(cellVols[pti]),&particleInfo.Neff, &Np, &particleInfo.R, &particleInfo.T ); @@ -2861,12 +2861,12 @@ void FhdParticleContainer::EvaluateStats(MultiFab& particleInstant, tp = tp + Np; evaluate_fields_pp(parts.data(), - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), BL_TO_FORTRAN_3D(particleInstant[pti]), BL_TO_FORTRAN_3D(cellVols[pti]),&Neff, &Np, dx ); @@ -2877,8 +2877,8 @@ void FhdParticleContainer::EvaluateStats(MultiFab& particleInstant, { const Box& tile_box = mfi.tilebox(); - evaluate_means(ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + evaluate_means(AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), BL_TO_FORTRAN_3D(particleInstant[mfi]), BL_TO_FORTRAN_3D(particleMeans[mfi]), BL_TO_FORTRAN_3D(particleVars[mfi]), @@ -2907,12 +2907,12 @@ void FhdParticleContainer::EvaluateStats(MultiFab& particleInstant, const int Np = parts.numParticles(); evaluate_corrs(parts.data(), - ARLIM_3D(tile_box.loVect()), - ARLIM_3D(tile_box.hiVect()), + AMREX_ARLIM_3D(tile_box.loVect()), + AMREX_ARLIM_3D(tile_box.hiVect()), m_vector_ptrs[grid_id].dataPtr(), m_vector_size[grid_id].dataPtr(), - ARLIM_3D(m_vector_ptrs[grid_id].loVect()), - ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].loVect()), + AMREX_ARLIM_3D(m_vector_ptrs[grid_id].hiVect()), BL_TO_FORTRAN_3D(particleInstant[pti]), BL_TO_FORTRAN_3D(particleMeans[pti]), BL_TO_FORTRAN_3D(particleVars[pti]), diff --git a/src_particles/particle_mobility.cpp b/src_particles/particle_mobility.cpp index 965285f7c..0f307ee91 100644 --- a/src_particles/particle_mobility.cpp +++ b/src_particles/particle_mobility.cpp @@ -12,7 +12,7 @@ void ComputeDryMobility(MultiFab & dryMobility, species* particleInfo, const Geo for ( MFIter mfi(dryMobility); mfi.isValid(); ++mfi ) { const Box& bx = mfi.validbox(); - compute_dry_mobility(ARLIM_3D(bx.loVect()), ARLIM_3D(bx.hiVect()), BL_TO_FORTRAN_3D(dryMobility[mfi]), ZFILL(dx), ZFILL(plo), ZFILL(phi), &ngc, particleInfo); + compute_dry_mobility(AMREX_ARLIM_3D(bx.loVect()), AMREX_ARLIM_3D(bx.hiVect()), BL_TO_FORTRAN_3D(dryMobility[mfi]), AMREX_ZFILL(dx), AMREX_ZFILL(plo), AMREX_ZFILL(phi), &ngc, particleInfo); } } diff --git a/unmaintained/src_chem/AmrCoreAdv.cpp b/unmaintained/src_chem/AmrCoreAdv.cpp index 5e6d27894..7685e0c4c 100644 --- a/unmaintained/src_chem/AmrCoreAdv.cpp +++ b/unmaintained/src_chem/AmrCoreAdv.cpp @@ -57,8 +57,8 @@ void AmrCoreAdv::Initialize( ) /* // walls (Neumann) - int bc_con_lo[] = {FOEXTRAP, FOEXTRAP, FOEXTRAP}; - int bc_con_hi[] = {FOEXTRAP, FOEXTRAP, FOEXTRAP}; + int bc_con_lo[] = {amrex::BCType::foextrap, amrex::BCType::foextrap, amrex::BCType::foextrap}; + int bc_con_hi[] = {amrex::BCType::foextrap, amrex::BCType::foextrap, amrex::BCType::foextrap}; */ // bcs.resize(1); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)