diff --git a/.github/workflows/clang-tidy.yml b/.github/workflows/clang-tidy.yml index 664917d4e..f340a9709 100644 --- a/.github/workflows/clang-tidy.yml +++ b/.github/workflows/clang-tidy.yml @@ -23,7 +23,7 @@ jobs: config_file: src/.clang-tidy build_dir: build apt_packages: libopenmpi-dev,libhdf5-mpi-dev,python3-dev,python3-numpy,python3-matplotlib - cmake_command: cmake . -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DQUOKKA_PYTHON=ON + cmake_command: cmake . -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DQUOKKA_PYTHON=ON -DQUOKKA_OPENPMD=ON -DopenPMD_USE_ADIOS2=OFF split_workflow: true # Uploads an artefact containing clang_fixes.json diff --git a/.gitmodules b/.gitmodules index 053a51af8..af07e93a9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,3 +14,6 @@ [submodule "extern/yaml-cpp"] path = extern/yaml-cpp url = https://github.com/jbeder/yaml-cpp.git +[submodule "extern/openPMD-api"] + path = extern/openPMD-api + url = https://github.com/openPMD/openPMD-api.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 7bc69218f..9bbb5cebc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,7 @@ option(QUOKKA_PYTHON "Compile with Python support (on/off)" ON) option(DISABLE_FMAD "Disable fused multiply-add instructions on GPU (on/off)" ON) option(ENABLE_ASAN "Enable AddressSanitizer and UndefinedBehaviorSanitizer" OFF) option(WARNINGS_AS_ERRORS "Treat compiler warnings as errors" OFF) +option(QUOKKA_OPENPMD "Enable OpenPMD output (on/off)" OFF) if(AMReX_GPU_BACKEND MATCHES "CUDA") enable_language(CUDA) diff --git a/README.md b/README.md index d022bb12d..c3b3a09fa 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@ Quokka also features advanced Adaptive Quokka Refinement:tm: technology: * ROCm 5.2.0+ (optional, for AMD GPUs) * Ninja (optional, for faster builds) * Python 3.7+ (optional) +* ADIOS2 2.9+ with GPU-aware support (optional, for writing terabyte-sized or larger outputs) ## Quickstart diff --git a/extern/openPMD-api b/extern/openPMD-api new file mode 160000 index 000000000..e8e2aeb28 --- /dev/null +++ b/extern/openPMD-api @@ -0,0 +1 @@ +Subproject commit e8e2aeb28eda917b6b4e21f8d072d55c651b0915 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e5932274d..cc787d470 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -11,6 +11,25 @@ if(Python_FOUND) link_libraries(${Python_LIBRARIES}) endif() +if(QUOKKA_OPENPMD) + message(STATUS "Building Quokka with OpenPMD support") + add_compile_definitions(QUOKKA_USE_OPENPMD) + set(openPMD_USE_ADIOS2 ON CACHE BOOL "") # ADIOS2 is required + set(openPMD_USE_HDF5 OFF CACHE BOOL "") + set(openPMD_USE_PYTHON OFF CACHE BOOL "") + set(openPMD_BUILD_TESTING OFF CACHE BOOL "") + set(openPMD_BUILD_EXAMPLES OFF CACHE BOOL "") + set(openPMD_BUILD_CLI_TOOLS OFF CACHE BOOL "") + set(openPMD_INSTALL OFF CACHE BOOL "") + add_subdirectory(${QuokkaCode_SOURCE_DIR}/extern/openPMD-api ${QuokkaCode_BINARY_DIR}/openPMD-api) + include_directories(${OpenPMD_INCLUDE_DIRS_RET}) + link_libraries(openPMD::openPMD) + set(openPMDSources "${CMAKE_CURRENT_SOURCE_DIR}/openPMD.cpp") + message(STATUS "WARNING: OpenPMD plotfiles are ENABLED. Face-centered variables will only be available as cell-centered averages in plotfiles!") +else() + set(openPMDSources "") +endif() + # HDF5 find_package(HDF5 REQUIRED) @@ -97,7 +116,7 @@ include(CTest) #create an object library for files that should be compiled with all test problems set (QuokkaObjSources "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/CloudyCooling.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/GrackleDataReader.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/GrackleDataReader.cpp" "${openPMDSources}" "${gamma_law_sources}") #we don't use it anymore because it gives issues on Cray systems #add_library(QuokkaObj OBJECT ${QuokkaObjSources} ${gamma_law_sources}) diff --git a/src/PrimordialChem/CMakeLists.txt b/src/PrimordialChem/CMakeLists.txt index 9bb2b1d70..75ce2dae5 100644 --- a/src/PrimordialChem/CMakeLists.txt +++ b/src/PrimordialChem/CMakeLists.txt @@ -5,7 +5,7 @@ setup_target_for_microphysics_compilation(${microphysics_network_name} "${CMAKE_ #this is critical to ensure the correct Microphysics files are linked to primordial chem target include_directories(BEFORE ${primordial_chem_dirs} "${CMAKE_CURRENT_BINARY_DIR}/" "includes/extern_parameters.H" "includes/network_properties.H") -add_executable(test_primordial_chem test_primordial_chem.cpp ../main.cpp ../GrackleDataReader.cpp ../CloudyCooling.cpp ../Chemistry.cpp ${primordial_chem_sources}) +add_executable(test_primordial_chem test_primordial_chem.cpp ../main.cpp "${openPMDSources}" ../GrackleDataReader.cpp ../CloudyCooling.cpp ../Chemistry.cpp ${primordial_chem_sources}) target_compile_definitions(test_primordial_chem PUBLIC PRIMORDIAL_CHEM) #this will add #define PRIMORDIAL_CHEM #de-link QuokkaObj from this target to avoid multiple definitions of same variables diff --git a/src/openPMD.cpp b/src/openPMD.cpp new file mode 100644 index 000000000..f4c3c5372 --- /dev/null +++ b/src/openPMD.cpp @@ -0,0 +1,144 @@ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +//! \file openPMD.cpp +/// \brief openPMD I/O for snapshots + +#include +#include + +// AMReX headers +#include "AMReX.H" +#include "AMReX_Geometry.H" +#include "AMReX_IntVect.H" +#include "AMReX_MultiFab.H" + +// openPMD headers +#include "openPMD/openPMD.hpp" + +namespace quokka::OpenPMDOutput +{ + +namespace detail +{ +/** \brief + * Convert an IntVect to a std::vector + * and reverse the order of the elements + * (used for compatibility with the openPMD API) + */ +auto getReversedVec(const amrex::IntVect &v) -> std::vector +{ + // Convert the IntVect v to and std::vector u + std::vector u = {AMREX_D_DECL(static_cast(v[0]), static_cast(v[1]), static_cast(v[2]))}; + // Reverse the order of elements, if v corresponds to the indices of a Fortran-order array (like an AMReX FArrayBox) + // but u is intended to be used with a C-order API (like openPMD) + std::reverse(u.begin(), u.end()); + return u; +} + +/** \brief + * Convert Real* pointer to a std::vector, + * and reverse the order of the elements + * (used for compatibility with the openPMD API) + */ +auto getReversedVec(const amrex::Real *v) -> std::vector +{ + // Convert Real* v to and std::vector u + std::vector u = {AMREX_D_DECL(static_cast(v[0]), static_cast(v[1]), static_cast(v[2]))}; // NOLINT + // Reverse the order of elements, if v corresponds to the indices of a Fortran-order array (like an AMReX FArrayBox) + // but u is intended to be used with a C-order API (like openPMD) + std::reverse(u.begin(), u.end()); + return u; +} + +void SetupMeshComponent(openPMD::Mesh &mesh, amrex::Geometry &full_geom) +{ + amrex::Box const &global_box = full_geom.Domain(); + auto global_size = getReversedVec(global_box.size()); + std::vector const grid_spacing = getReversedVec(full_geom.CellSize()); + std::vector const global_offset = getReversedVec(full_geom.ProbLo()); + + // Prepare the type of dataset that will be written + mesh.setDataOrder(openPMD::Mesh::DataOrder::C); + mesh.setGridSpacing(grid_spacing); + mesh.setGridGlobalOffset(global_offset); + mesh.setAttribute("fieldSmoothing", "none"); + + auto mesh_comp = mesh[openPMD::MeshRecordComponent::SCALAR]; + auto const dataset = openPMD::Dataset(openPMD::determineDatatype(), global_size); + mesh_comp.resetDataset(dataset); + std::vector const relativePosition{0.5, 0.5, 0.5}; // cell-centered only (for now) + mesh_comp.setPosition(relativePosition); +} + +auto GetMeshComponentName(int meshLevel, std::string const &field_name) -> std::string +{ + std::string new_field_name = field_name; + if (meshLevel > 0) { + new_field_name += std::string("_lvl").append(std::to_string(meshLevel)); + } + return new_field_name; +} +} // namespace detail +//---------------------------------------------------------------------------------------- +//! \fn void OpenPMDOutput:::WriteOutputFile(Mesh *pm) +// \brief Write cell-centered MultiFab using openPMD +void WriteFile(const std::vector &varnames, int const output_levels, amrex::Vector &mf, + amrex::Vector &geom, const std::string &output_basename, amrex::Real const time, int const file_number) +{ + // open file + std::string const filename = output_basename + ".bp"; + auto series = openPMD::Series(filename, openPMD::Access::CREATE, amrex::ParallelDescriptor::Communicator()); + series.setSoftware("Quokka", "1.0"); + + auto series_iteration = series.iterations[file_number]; + series_iteration.open(); + series_iteration.setTime(time); + auto meshes = series_iteration.meshes; + + // loop over levels up to output_levels + for (int lev = 0; lev < output_levels; lev++) { + amrex::Geometry full_geom = geom[lev]; + amrex::Box const &global_box = full_geom.Domain(); + int const ncomp = mf[lev]->nComp(); + + for (int icomp = 0; icomp < ncomp; icomp++) { + const std::string field_name = detail::GetMeshComponentName(lev, varnames[icomp]); + if (!meshes.contains(field_name)) { + auto mesh = meshes[field_name]; + detail::SetupMeshComponent(mesh, full_geom); + } + } + + // pass data pointers for each box to ADIOS + for (int icomp = 0; icomp < ncomp; icomp++) { + std::string const field_name = detail::GetMeshComponentName(lev, varnames[icomp]); + openPMD::MeshRecordComponent mesh = series_iteration.meshes[field_name][openPMD::MeshRecordComponent::SCALAR]; + + // Loop through the multifab, and store each box as a chunk in the openPMD file. + for (amrex::MFIter mfi(*mf[lev]); mfi.isValid(); ++mfi) { + amrex::FArrayBox const &fab = (*mf[lev])[mfi]; + amrex::Box const &local_box = fab.box(); + + // Determine the offset and size of this chunk + amrex::IntVect const box_offset = local_box.smallEnd() - global_box.smallEnd(); + auto chunk_offset = detail::getReversedVec(box_offset); // this overflows if ghost zones are used (!) + auto chunk_size = detail::getReversedVec(local_box.size()); + + // pass device pointer directly to ADIOS + amrex::Real const *local_data = fab.dataPtr(icomp); + mesh.storeChunkRaw(local_data, chunk_offset, chunk_size); + } + } + + // flush this level to disk + series.flush(); + } + + // close file + series.close(); +} + +} // namespace quokka::OpenPMDOutput diff --git a/src/openPMD.hpp b/src/openPMD.hpp new file mode 100644 index 000000000..2c6211b74 --- /dev/null +++ b/src/openPMD.hpp @@ -0,0 +1,40 @@ +#ifndef OPENPMD_HPP_ // NOLINT +#define OPENPMD_HPP_ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +//! \file openPMD.hpp +/// \brief openPMD I/O for snapshots + +#include + +// AMReX headers +#include "AMReX_Geometry.H" +#include "AMReX_IntVect.H" +#include "AMReX_MultiFab.H" +#include + +// openPMD headers +#include "openPMD/openPMD.hpp" + +namespace quokka::OpenPMDOutput +{ + +namespace detail +{ + +auto getReversedVec(const amrex::IntVect &v) -> std::vector; +auto getReversedVec(const amrex::Real *v) -> std::vector; +void SetupMeshComponent(openPMD::Mesh &mesh, int /*meshLevel*/, const std::string &comp_name, amrex::Geometry &full_geom); +auto GetMeshComponentName(int meshLevel, std::string const &field_name) -> std::string; + +} // namespace detail + +void WriteFile(const std::vector &varnames, int output_levels, amrex::Vector &mf, amrex::Vector &geom, + const std::string &output_basename, amrex::Real time, int file_number); + +} // namespace quokka::OpenPMDOutput + +#endif // OPENPMD_HPP_ \ No newline at end of file diff --git a/src/simulation.hpp b/src/simulation.hpp index b5503f9f6..adcfdc446 100644 --- a/src/simulation.hpp +++ b/src/simulation.hpp @@ -87,6 +87,10 @@ namespace filesystem = experimental::filesystem; #include "math_impl.hpp" #include "physics_info.hpp" +#ifdef QUOKKA_USE_OPENPMD +#include "openPMD.hpp" +#endif + #define USE_YAFLUXREGISTER #ifdef AMREX_USE_ASCENT @@ -268,8 +272,8 @@ template class AMRSimulation : public amrex::AmrCore [[nodiscard]] auto PlotFileName(int lev) const -> std::string; [[nodiscard]] auto CustomPlotFileName(const char *base, int lev) const -> std::string; [[nodiscard]] auto GetPlotfileVarNames() const -> amrex::Vector; - [[nodiscard]] auto PlotFileMF() -> amrex::Vector; - [[nodiscard]] auto PlotFileMFAtLevel(int lev) -> amrex::MultiFab; + [[nodiscard]] auto PlotFileMF(int included_ghosts) -> amrex::Vector; + [[nodiscard]] auto PlotFileMFAtLevel(int lev, int included_ghosts) -> amrex::MultiFab; void WriteMetadataFile(std::string const &MetadataFileName) const; void ReadMetadataFile(std::string const &chkfilename); void WriteStatisticsFile(); @@ -459,6 +463,14 @@ template void AMRSimulation::PerformanceHints() "128 (or greater) when running on GPUs, and 64 (or " "greater) when running on CPUs.\n"; } + +#ifdef QUOKKA_USE_OPENPMD + // warning about face-centered variables and OpenPMD outputs + if constexpr (Physics_Indices::nvarTotal_fc > 0) { + amrex::Print() << "\n[Warning] [I/O] Plotfiles will ONLY contain cell-centered averages of face-centered variables!" + << " Support for outputting face-centered variables for openPMD is not yet implemented.\n"; + } +#endif } template void AMRSimulation::readParameters() @@ -1664,12 +1676,11 @@ void AMRSimulation::AverageFCToCC(amrex::MultiFab &mf_cc, const amrex amrex::Gpu::streamSynchronize(); } -template auto AMRSimulation::PlotFileMFAtLevel(int lev) -> amrex::MultiFab +template auto AMRSimulation::PlotFileMFAtLevel(const int lev, const int included_ghosts) -> amrex::MultiFab { // Combine state_new_cc_[lev] and derived variables in a new MF - int comp = 0; const int ncomp_cc = state_new_cc_[lev].nComp(); - const int nghost_cc = state_new_cc_[lev].nGrow(); // workaround Ascent bug + int comp = 0; int ncomp_per_dim_fc = 0; int ncomp_tot_fc = 0; int nghost_fc = 0; @@ -1680,12 +1691,13 @@ template auto AMRSimulation::PlotFileMFAtLevel(i } const int ncomp_deriv = derivedNames_.size(); const int ncomp_plotMF = ncomp_cc + ncomp_tot_fc + ncomp_deriv; - const int nghost_plotMF = nghost_cc; - amrex::MultiFab plotMF(grids[lev], dmap[lev], ncomp_plotMF, nghost_plotMF); + amrex::MultiFab plotMF(grids[lev], dmap[lev], ncomp_plotMF, included_ghosts); - // Fill ghost zones for state_new_cc_ - fillBoundaryConditions(state_new_cc_[lev], state_new_cc_[lev], lev, tNew_[lev], quokka::centering::cc, quokka::direction::na, InterpHookNone, - InterpHookNone, FillPatchType::fillpatch_function); + if (included_ghosts > 0) { + // Fill ghost zones for state_new_cc_ + fillBoundaryConditions(state_new_cc_[lev], state_new_cc_[lev], lev, tNew_[lev], quokka::centering::cc, quokka::direction::na, InterpHookNone, + InterpHookNone, FillPatchType::fillpatch_function); + } // Fill ghost zones for state_new_fc_ if constexpr (Physics_Indices::nvarTotal_fc > 0) { @@ -1697,7 +1709,7 @@ template auto AMRSimulation::PlotFileMFAtLevel(i // copy data from cell-centred state variables for (int i = 0; i < ncomp_cc; i++) { - amrex::MultiFab::Copy(plotMF, state_new_cc_[lev], i, comp, 1, nghost_cc); + amrex::MultiFab::Copy(plotMF, state_new_cc_[lev], i, comp, 1, included_ghosts); comp++; } @@ -1719,11 +1731,11 @@ template auto AMRSimulation::PlotFileMFAtLevel(i } // put together an array of multifabs for writing -template auto AMRSimulation::PlotFileMF() -> amrex::Vector +template auto AMRSimulation::PlotFileMF(const int included_ghosts) -> amrex::Vector { amrex::Vector r; for (int i = 0; i <= finest_level; ++i) { - r.push_back(PlotFileMFAtLevel(i)); + r.push_back(PlotFileMFAtLevel(i, included_ghosts)); } return r; } @@ -1765,7 +1777,7 @@ template void AMRSimulation::RenderAscent() BL_PROFILE("AMRSimulation::RenderAscent()"); // combine multifabs - amrex::Vector mf = PlotFileMF(); + amrex::Vector mf = PlotFileMF(nghost_cc_); amrex::Vector mf_ptr = amrex::GetVecOfConstPtrs(mf); amrex::Vector varnames; varnames.insert(varnames.end(), componentNames_cc_.begin(), componentNames_cc_.end()); @@ -1819,15 +1831,18 @@ template void AMRSimulation::WritePlotFile() { BL_PROFILE("AMRSimulation::WritePlotFile()"); -#ifndef AMREX_USE_HDF5 if (amrex::AsyncOut::UseAsyncOut()) { // ensure that we flush any plotfiles that are currently being written amrex::AsyncOut::Finish(); } -#endif // now construct output and submit to async write queue - amrex::Vector mf = PlotFileMF(); +#ifdef QUOKKA_USE_OPENPMD + int included_ghosts = 0; +#else + int included_ghosts = nghost_cc_; +#endif + amrex::Vector mf = PlotFileMF(included_ghosts); amrex::Vector mf_ptr = amrex::GetVecOfConstPtrs(mf); const std::string &plotfilename = PlotFileName(istep[0]); @@ -1836,8 +1851,8 @@ template void AMRSimulation::WritePlotFile() // write plotfile amrex::Print() << "Writing plotfile " << plotfilename << "\n"; -#ifdef AMREX_USE_HDF5 - amrex::WriteMultiLevelPlotfileHDF5(plotfilename, finest_level + 1, mf_ptr, varnames, Geom(), tNew_[0], istep, refRatio()); +#ifdef QUOKKA_USE_OPENPMD + quokka::OpenPMDOutput::WriteFile(varnames, finest_level + 1, mf_ptr, Geom(), plotfilename, tNew_[0], istep[0]); WriteMetadataFile(plotfilename + ".yaml"); #else amrex::WriteMultiLevelPlotfile(plotfilename, finest_level + 1, mf_ptr, varnames, Geom(), tNew_[0], istep, refRatio());